summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpica/Makefile1
-rw-r--r--drivers/acpi/acpica/achware.h3
-rw-r--r--drivers/acpi/acpica/aclocal.h23
-rw-r--r--drivers/acpi/acpica/acmacros.h29
-rw-r--r--drivers/acpi/acpica/dswload.c14
-rw-r--r--drivers/acpi/acpica/dswload2.c14
-rw-r--r--drivers/acpi/acpica/evgpe.c24
-rw-r--r--drivers/acpi/acpica/evxfgpe.c3
-rw-r--r--drivers/acpi/acpica/hwgpe.c15
-rw-r--r--drivers/acpi/acpica/hwxfsleep.c1
-rw-r--r--drivers/acpi/acpica/nsdump.c2
-rw-r--r--drivers/acpi/acpica/tbinstal.c20
-rw-r--r--drivers/acpi/acpica/tbxface.c41
-rw-r--r--drivers/acpi/acpica/utosi.c1
-rw-r--r--drivers/acpi/acpica/utxface.c354
-rw-r--r--drivers/acpi/acpica/utxfinit.c317
-rw-r--r--drivers/acpi/bus.c8
-rw-r--r--drivers/acpi/button.c13
-rw-r--r--drivers/acpi/fan.c22
-rw-r--r--drivers/acpi/glue.c135
-rw-r--r--drivers/acpi/hed.c20
-rw-r--r--drivers/acpi/proc.c57
-rw-r--r--drivers/acpi/sbshc.c18
-rw-r--r--drivers/acpi/scan.c56
-rw-r--r--drivers/acpi/tables.c18
-rw-r--r--drivers/acpi/utils.c11
-rw-r--r--drivers/base/memory.c40
-rw-r--r--drivers/block/rbd.c1784
-rw-r--r--drivers/block/rbd_types.h27
-rw-r--r--drivers/block/virtio_blk.c306
-rw-r--r--drivers/char/hw_random/omap-rng.c121
-rw-r--r--drivers/char/mbcs.c2
-rw-r--r--drivers/char/mem.c2
-rw-r--r--drivers/char/mspec.c2
-rw-r--r--drivers/char/virtio_console.c198
-rw-r--r--drivers/crypto/mv_cesa.c17
-rw-r--r--drivers/dma/Kconfig17
-rw-r--r--drivers/dma/Makefile2
-rw-r--r--drivers/dma/amba-pl08x.c2
-rw-r--r--drivers/dma/at_hdmac.c3
-rw-r--r--drivers/dma/dw_dmac.c258
-rw-r--r--drivers/dma/dw_dmac_regs.h48
-rw-r--r--drivers/dma/edma.c671
-rw-r--r--drivers/dma/ep93xx_dma.c4
-rw-r--r--drivers/dma/imx-dma.c2
-rw-r--r--drivers/dma/imx-sdma.c2
-rw-r--r--drivers/dma/ioat/dma_v2.c3
-rw-r--r--drivers/dma/ioat/pci.c22
-rw-r--r--drivers/dma/mmp_pdma.c875
-rw-r--r--drivers/dma/mmp_tdma.c53
-rw-r--r--drivers/dma/mxs-dma.c16
-rw-r--r--drivers/dma/omap-dma.c45
-rw-r--r--drivers/dma/pl330.c80
-rw-r--r--drivers/dma/sa11x0-dma.c2
-rw-r--r--drivers/dma/sirf-dma.c25
-rw-r--r--drivers/dma/ste_dma40.c17
-rw-r--r--drivers/dma/tegra20-apb-dma.c14
-rw-r--r--drivers/gpio/Kconfig6
-rw-r--r--drivers/gpio/Makefile1
-rw-r--r--drivers/gpio/gpio-mvebu.c679
-rw-r--r--drivers/gpio/gpio-stp-xway.c2
-rw-r--r--drivers/gpu/drm/drm_gem.c2
-rw-r--r--drivers/gpu/drm/drm_vm.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c2
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c4
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c2
-rw-r--r--drivers/hwmon/gpio-fan.c120
-rw-r--r--drivers/i2c/Kconfig17
-rw-r--r--drivers/i2c/busses/Kconfig18
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c2
-rw-r--r--drivers/i2c/busses/i2c-i801.c185
-rw-r--r--drivers/i2c/busses/i2c-parport.c2
-rw-r--r--drivers/i2c/busses/i2c-piix4.c1
-rw-r--r--drivers/i2c/busses/i2c-scmi.c14
-rw-r--r--drivers/i2c/busses/i2c-viapro.c3
-rw-r--r--drivers/i2c/busses/scx200_acb.c24
-rw-r--r--drivers/i2c/busses/scx200_i2c.c15
-rw-r--r--drivers/i2c/i2c-core.c16
-rw-r--r--drivers/i2c/i2c-mux.c22
-rw-r--r--drivers/i2c/i2c-smbus.c11
-rw-r--r--drivers/i2c/muxes/i2c-mux-gpio.c56
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca9541.c2
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca954x.c10
-rw-r--r--drivers/i2c/muxes/i2c-mux-pinctrl.c2
-rw-r--r--drivers/idle/intel_idle.c1
-rw-r--r--drivers/infiniband/core/cma.c3
-rw-r--r--drivers/infiniband/core/netlink.c1
-rw-r--r--drivers/infiniband/hw/ehca/ehca_uverbs.c4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_file_ops.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c2
-rw-r--r--drivers/input/misc/atlas_btns.c17
-rw-r--r--drivers/iommu/Kconfig2
-rw-r--r--drivers/iommu/amd_iommu.c514
-rw-r--r--drivers/iommu/amd_iommu_init.c253
-rw-r--r--drivers/iommu/amd_iommu_proto.h8
-rw-r--r--drivers/iommu/amd_iommu_types.h59
-rw-r--r--drivers/iommu/exynos-iommu.c3
-rw-r--r--drivers/iommu/intel-iommu.c4
-rw-r--r--drivers/iommu/irq_remapping.c5
-rw-r--r--drivers/iommu/irq_remapping.h6
-rw-r--r--drivers/iommu/tegra-smmu.c261
-rw-r--r--drivers/isdn/hisax/Kconfig10
-rw-r--r--drivers/lguest/lguest_device.c5
-rw-r--r--drivers/media/pci/meye/meye.c2
-rw-r--r--drivers/media/platform/omap/omap_vout.c2
-rw-r--r--drivers/media/platform/vino.c2
-rw-r--r--drivers/media/usb/sn9c102/sn9c102_core.c3
-rw-r--r--drivers/media/usb/usbvision/usbvision-video.c3
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-sg.c2
-rw-r--r--drivers/media/v4l2-core/videobuf-vmalloc.c2
-rw-r--r--drivers/media/v4l2-core/videobuf2-memops.c2
-rw-r--r--drivers/misc/carma/carma-fpga.c2
-rw-r--r--drivers/misc/sgi-gru/grufile.c5
-rw-r--r--drivers/mmc/core/core.c240
-rw-r--r--drivers/mmc/core/debugfs.c2
-rw-r--r--drivers/mmc/core/mmc.c57
-rw-r--r--drivers/mmc/core/mmc_ops.c84
-rw-r--r--drivers/mmc/core/sdio_bus.c7
-rw-r--r--drivers/mmc/core/slot-gpio.c8
-rw-r--r--drivers/mmc/host/Kconfig9
-rw-r--r--drivers/mmc/host/Makefile1
-rw-r--r--drivers/mmc/host/atmel-mci-regs.h7
-rw-r--r--drivers/mmc/host/atmel-mci.c113
-rw-r--r--drivers/mmc/host/bfin_sdh.c210
-rw-r--r--drivers/mmc/host/davinci_mmc.c271
-rw-r--r--drivers/mmc/host/dw_mmc-exynos.c253
-rw-r--r--drivers/mmc/host/dw_mmc-pci.c15
-rw-r--r--drivers/mmc/host/dw_mmc-pltfm.c62
-rw-r--r--drivers/mmc/host/dw_mmc-pltfm.h20
-rw-r--r--drivers/mmc/host/dw_mmc.c326
-rw-r--r--drivers/mmc/host/dw_mmc.h24
-rw-r--r--drivers/mmc/host/mmc_spi.c15
-rw-r--r--drivers/mmc/host/mmci.c13
-rw-r--r--drivers/mmc/host/mxcmmc.c76
-rw-r--r--drivers/mmc/host/omap.c37
-rw-r--r--drivers/mmc/host/omap_hsmmc.c125
-rw-r--r--drivers/mmc/host/pxamci.c52
-rw-r--r--drivers/mmc/host/sdhci-dove.c8
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c36
-rw-r--r--drivers/mmc/host/sdhci-pci.c19
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c3
-rw-r--r--drivers/mmc/host/sdhci-pxav2.c6
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c35
-rw-r--r--drivers/mmc/host/sdhci-s3c.c216
-rw-r--r--drivers/mmc/host/sdhci-spear.c67
-rw-r--r--drivers/mmc/host/sdhci-tegra.c7
-rw-r--r--drivers/mmc/host/sdhci.c205
-rw-r--r--drivers/mmc/host/sh_mmcif.c8
-rw-r--r--drivers/mmc/host/via-sdmmc.c16
-rw-r--r--drivers/mmc/host/vub300.c4
-rw-r--r--drivers/mtd/Kconfig7
-rw-r--r--drivers/mtd/Makefile1
-rw-r--r--drivers/mtd/bcm47xxpart.c202
-rw-r--r--drivers/mtd/chips/Kconfig11
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c14
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c67
-rw-r--r--drivers/mtd/cmdlinepart.c183
-rw-r--r--drivers/mtd/devices/Kconfig10
-rw-r--r--drivers/mtd/devices/Makefile1
-rw-r--r--drivers/mtd/devices/bcm47xxsflash.c105
-rw-r--r--drivers/mtd/devices/doc2001plus.c14
-rw-r--r--drivers/mtd/devices/docg3.c12
-rw-r--r--drivers/mtd/devices/m25p80.c24
-rw-r--r--drivers/mtd/devices/spear_smi.c141
-rw-r--r--drivers/mtd/maps/Kconfig16
-rw-r--r--drivers/mtd/maps/Makefile1
-rw-r--r--drivers/mtd/maps/autcpu12-nvram.c153
-rw-r--r--drivers/mtd/maps/pci.c23
-rw-r--r--drivers/mtd/maps/physmap_of.c14
-rw-r--r--drivers/mtd/maps/rbtx4939-flash.c2
-rw-r--r--drivers/mtd/maps/uclinux.c15
-rw-r--r--drivers/mtd/maps/wr_sbc82xx_flash.c174
-rw-r--r--drivers/mtd/mtdchar.c8
-rw-r--r--drivers/mtd/mtdcore.c27
-rw-r--r--drivers/mtd/mtdoops.c14
-rw-r--r--drivers/mtd/mtdpart.c5
-rw-r--r--drivers/mtd/nand/Kconfig63
-rw-r--r--drivers/mtd/nand/Makefile4
-rw-r--r--drivers/mtd/nand/ams-delta.c13
-rw-r--r--drivers/mtd/nand/atmel_nand.c987
-rw-r--r--drivers/mtd/nand/atmel_nand_ecc.h114
-rw-r--r--drivers/mtd/nand/au1550nd.c46
-rw-r--r--drivers/mtd/nand/bcm_umi_bch.c217
-rw-r--r--drivers/mtd/nand/bcm_umi_nand.c555
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c6
-rw-r--r--drivers/mtd/nand/cafe_nand.c20
-rw-r--r--drivers/mtd/nand/cmx270_nand.c13
-rw-r--r--drivers/mtd/nand/davinci_nand.c78
-rw-r--r--drivers/mtd/nand/denali.c12
-rw-r--r--drivers/mtd/nand/diskonchip.c63
-rw-r--r--drivers/mtd/nand/docg4.c43
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c51
-rw-r--r--drivers/mtd/nand/fsl_ifc_nand.c52
-rw-r--r--drivers/mtd/nand/gpio.c54
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-lib.c322
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c152
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.h26
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-regs.h12
-rw-r--r--drivers/mtd/nand/lpc32xx_mlc.c924
-rw-r--r--drivers/mtd/nand/lpc32xx_slc.c1039
-rw-r--r--drivers/mtd/nand/mpc5121_nfc.c22
-rw-r--r--drivers/mtd/nand/mxc_nand.c168
-rw-r--r--drivers/mtd/nand/nand_base.c529
-rw-r--r--drivers/mtd/nand/nand_bbt.c148
-rw-r--r--drivers/mtd/nand/nand_bcm_umi.c149
-rw-r--r--drivers/mtd/nand/nand_bcm_umi.h336
-rw-r--r--drivers/mtd/nand/nand_ids.c7
-rw-r--r--drivers/mtd/nand/nandsim.c17
-rw-r--r--drivers/mtd/nand/ndfc.c13
-rw-r--r--drivers/mtd/nand/nuc900_nand.c17
-rw-r--r--drivers/mtd/nand/omap2.c36
-rw-r--r--drivers/mtd/nand/orion_nand.c1
-rw-r--r--drivers/mtd/nand/plat_nand.c5
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c12
-rw-r--r--drivers/mtd/nand/r852.c22
-rw-r--r--drivers/mtd/nand/s3c2410.c191
-rw-r--r--drivers/mtd/nand/sh_flctl.c327
-rw-r--r--drivers/mtd/nand/socrates_nand.c19
-rw-r--r--drivers/mtd/nand/tmio_nand.c13
-rw-r--r--drivers/mtd/nand/txx9ndfmc.c13
-rw-r--r--drivers/mtd/nand/xway_nand.c201
-rw-r--r--drivers/mtd/sm_ftl.c1
-rw-r--r--drivers/mtd/tests/Makefile1
-rw-r--r--drivers/mtd/tests/mtd_nandbiterrs.c460
-rw-r--r--drivers/mtd/tests/mtd_nandecctest.c294
-rw-r--r--drivers/mtd/tests/mtd_speedtest.c16
-rw-r--r--drivers/mtd/tests/mtd_stresstest.c39
-rw-r--r--drivers/mtd/ubi/Kconfig21
-rw-r--r--drivers/mtd/ubi/Makefile1
-rw-r--r--drivers/mtd/ubi/attach.c386
-rw-r--r--drivers/mtd/ubi/build.c70
-rw-r--r--drivers/mtd/ubi/eba.c126
-rw-r--r--drivers/mtd/ubi/fastmap.c1537
-rw-r--r--drivers/mtd/ubi/ubi-media.h137
-rw-r--r--drivers/mtd/ubi/ubi.h118
-rw-r--r--drivers/mtd/ubi/wl.c599
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c2
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c10
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.h30
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c19
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c54
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c15
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c12
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c7
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c27
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h2
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c29
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c7
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c18
-rw-r--r--drivers/net/ethernet/marvell/skge.c13
-rw-r--r--drivers/net/ethernet/marvell/sky2.c5
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c4
-rw-r--r--drivers/net/ethernet/natsemi/xtsonic.c1
-rw-r--r--drivers/net/ethernet/octeon/octeon_mgmt.c550
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/Kconfig3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c3
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c2
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c1
-rw-r--r--drivers/net/ethernet/sfc/ptp.c9
-rw-r--r--drivers/net/ethernet/sis/sis900.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c20
-rw-r--r--drivers/net/ethernet/sun/niu.c1
-rw-r--r--drivers/net/ethernet/sun/sungem.c3
-rw-r--r--drivers/net/irda/irtty-sir.c4
-rw-r--r--drivers/net/irda/mcs7780.c4
-rw-r--r--drivers/net/irda/pxaficp_ir.c4
-rw-r--r--drivers/net/irda/sa1100_ir.c4
-rw-r--r--drivers/net/irda/sh_irda.c4
-rw-r--r--drivers/net/irda/sh_sir.c5
-rw-r--r--drivers/net/phy/mdio_bus.c1
-rw-r--r--drivers/net/vxlan.c5
-rw-r--r--drivers/net/wan/farsync.c2
-rw-r--r--drivers/oprofile/buffer_sync.c17
-rw-r--r--drivers/parport/Kconfig2
-rw-r--r--drivers/pinctrl/Kconfig38
-rw-r--r--drivers/pinctrl/Makefile8
-rw-r--r--drivers/pinctrl/pinctrl-armada-370.c421
-rw-r--r--drivers/pinctrl/pinctrl-armada-xp.c468
-rw-r--r--drivers/pinctrl/pinctrl-dove.c620
-rw-r--r--drivers/pinctrl/pinctrl-falcon.c468
-rw-r--r--drivers/pinctrl/pinctrl-kirkwood.c472
-rw-r--r--drivers/pinctrl/pinctrl-lantiq.c342
-rw-r--r--drivers/pinctrl/pinctrl-lantiq.h194
-rw-r--r--drivers/pinctrl/pinctrl-mvebu.c754
-rw-r--r--drivers/pinctrl/pinctrl-mvebu.h192
-rw-r--r--drivers/pinctrl/pinctrl-xway.c781
-rw-r--r--drivers/platform/x86/hp_accel.c25
-rw-r--r--drivers/platform/x86/ideapad-laptop.c14
-rw-r--r--drivers/platform/x86/topstar-laptop.c22
-rw-r--r--drivers/platform/x86/toshiba_bluetooth.c22
-rw-r--r--drivers/platform/x86/xo15-ebook.c14
-rw-r--r--drivers/pnp/pnpacpi/core.c7
-rw-r--r--drivers/remoteproc/remoteproc_virtio.c5
-rw-r--r--drivers/rpmsg/Kconfig1
-rw-r--r--drivers/s390/kvm/kvm_virtio.c5
-rw-r--r--drivers/scsi/sg.c2
-rw-r--r--drivers/spi/Kconfig8
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/spi/spi-davinci.c292
-rw-r--r--drivers/spi/spi-octeon.c362
-rw-r--r--drivers/spi/spi-omap-100k.c2
-rw-r--r--drivers/spi/spi-omap2-mcspi.c1
-rw-r--r--drivers/staging/android/ashmem.c1
-rw-r--r--drivers/staging/omapdrm/omap_gem_dmabuf.c2
-rw-r--r--drivers/staging/tidspbridge/rmgr/drv_interface.c2
-rw-r--r--drivers/uio/uio.c4
-rw-r--r--drivers/usb/core/usb-acpi.c7
-rw-r--r--drivers/usb/mon/mon_bin.c2
-rw-r--r--drivers/usb/musb/musb_io.h3
-rw-r--r--drivers/vfio/pci/vfio_pci.c2
-rw-r--r--drivers/video/68328fb.c2
-rw-r--r--drivers/video/aty/atyfb_base.c3
-rw-r--r--drivers/video/fb-puv3.c3
-rw-r--r--drivers/video/fb_defio.c2
-rw-r--r--drivers/video/fbmem.c3
-rw-r--r--drivers/video/gbefb.c2
-rw-r--r--drivers/video/omap2/omapfb/omapfb-main.c2
-rw-r--r--drivers/video/sbuslib.c5
-rw-r--r--drivers/video/smscufx.c1
-rw-r--r--drivers/video/udlfb.c1
-rw-r--r--drivers/video/vermilion/vermilion.c1
-rw-r--r--drivers/video/vfb.c1
-rw-r--r--drivers/virtio/Kconfig17
-rw-r--r--drivers/virtio/Makefile3
-rw-r--r--drivers/virtio/virtio.c2
-rw-r--r--drivers/virtio/virtio_mmio.c29
-rw-r--r--drivers/virtio/virtio_pci.c68
-rw-r--r--drivers/virtio/virtio_ring.c14
-rw-r--r--drivers/watchdog/m54xx_wdt.c21
-rw-r--r--drivers/xen/gntalloc.c2
-rw-r--r--drivers/xen/gntdev.c2
-rw-r--r--drivers/xen/privcmd.c3
340 files changed, 23307 insertions, 6474 deletions
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index 0a1b343..7f1d407 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -158,5 +158,6 @@ acpi-y += \
utresrc.o \
utstate.o \
utxface.o \
+ utxfinit.o \
utxferror.o \
utxfmutex.o
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index 5de4ec7..d902d31 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -110,8 +110,7 @@ acpi_status acpi_hw_write_port(acpi_io_address address, u32 value, u32 width);
/*
* hwgpe - GPE support
*/
-u32 acpi_hw_get_gpe_register_bit(struct acpi_gpe_event_info *gpe_event_info,
- struct acpi_gpe_register_info *gpe_register_info);
+u32 acpi_hw_get_gpe_register_bit(struct acpi_gpe_event_info *gpe_event_info);
acpi_status
acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action);
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index cc80fe1..c816ee6 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -707,15 +707,18 @@ union acpi_parse_value {
u8 disasm_opcode; /* Subtype used for disassembly */\
char aml_op_name[16]) /* Op name (debug only) */
-#define ACPI_DASM_BUFFER 0x00
-#define ACPI_DASM_RESOURCE 0x01
-#define ACPI_DASM_STRING 0x02
-#define ACPI_DASM_UNICODE 0x03
-#define ACPI_DASM_EISAID 0x04
-#define ACPI_DASM_MATCHOP 0x05
-#define ACPI_DASM_LNOT_PREFIX 0x06
-#define ACPI_DASM_LNOT_SUFFIX 0x07
-#define ACPI_DASM_IGNORE 0x08
+/* Flags for disasm_flags field above */
+
+#define ACPI_DASM_BUFFER 0x00 /* Buffer is a simple data buffer */
+#define ACPI_DASM_RESOURCE 0x01 /* Buffer is a Resource Descriptor */
+#define ACPI_DASM_STRING 0x02 /* Buffer is a ASCII string */
+#define ACPI_DASM_UNICODE 0x03 /* Buffer is a Unicode string */
+#define ACPI_DASM_PLD_METHOD 0x04 /* Buffer is a _PLD method bit-packed buffer */
+#define ACPI_DASM_EISAID 0x05 /* Integer is an EISAID */
+#define ACPI_DASM_MATCHOP 0x06 /* Parent opcode is a Match() operator */
+#define ACPI_DASM_LNOT_PREFIX 0x07 /* Start of a Lnot_equal (etc.) pair of opcodes */
+#define ACPI_DASM_LNOT_SUFFIX 0x08 /* End of a Lnot_equal (etc.) pair of opcodes */
+#define ACPI_DASM_IGNORE 0x09 /* Not used at this time */
/*
* Generic operation (for example: If, While, Store)
@@ -932,6 +935,7 @@ struct acpi_bit_register_info {
#define ACPI_OSI_WIN_VISTA_SP1 0x09
#define ACPI_OSI_WIN_VISTA_SP2 0x0A
#define ACPI_OSI_WIN_7 0x0B
+#define ACPI_OSI_WIN_8 0x0C
#define ACPI_ALWAYS_ILLEGAL 0x00
@@ -1024,6 +1028,7 @@ struct acpi_port_info {
****************************************************************************/
struct acpi_db_method_info {
+ acpi_handle method;
acpi_handle main_thread_gate;
acpi_handle thread_complete_gate;
acpi_thread_id *threads;
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index 832b619..a7f68c4 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -277,10 +277,33 @@
/* Bitfields within ACPI registers */
-#define ACPI_REGISTER_PREPARE_BITS(val, pos, mask) ((val << pos) & mask)
-#define ACPI_REGISTER_INSERT_VALUE(reg, pos, mask, val) reg = (reg & (~(mask))) | ACPI_REGISTER_PREPARE_BITS(val, pos, mask)
+#define ACPI_REGISTER_PREPARE_BITS(val, pos, mask) \
+ ((val << pos) & mask)
-#define ACPI_INSERT_BITS(target, mask, source) target = ((target & (~(mask))) | (source & mask))
+#define ACPI_REGISTER_INSERT_VALUE(reg, pos, mask, val) \
+ reg = (reg & (~(mask))) | ACPI_REGISTER_PREPARE_BITS(val, pos, mask)
+
+#define ACPI_INSERT_BITS(target, mask, source) \
+ target = ((target & (~(mask))) | (source & mask))
+
+/* Generic bitfield macros and masks */
+
+#define ACPI_GET_BITS(source_ptr, position, mask) \
+ ((*source_ptr >> position) & mask)
+
+#define ACPI_SET_BITS(target_ptr, position, mask, value) \
+ (*target_ptr |= ((value & mask) << position))
+
+#define ACPI_1BIT_MASK 0x00000001
+#define ACPI_2BIT_MASK 0x00000003
+#define ACPI_3BIT_MASK 0x00000007
+#define ACPI_4BIT_MASK 0x0000000F
+#define ACPI_5BIT_MASK 0x0000001F
+#define ACPI_6BIT_MASK 0x0000003F
+#define ACPI_7BIT_MASK 0x0000007F
+#define ACPI_8BIT_MASK 0x000000FF
+#define ACPI_16BIT_MASK 0x0000FFFF
+#define ACPI_24BIT_MASK 0x00FFFFFF
/*
* An object of type struct acpi_namespace_node can appear in some contexts
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index 552aa3a..5575100 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -230,6 +230,20 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
walk_state->scope_info->common.value = ACPI_TYPE_ANY;
break;
+ case ACPI_TYPE_METHOD:
+
+ /*
+ * Allow scope change to root during execution of module-level
+ * code. Root is typed METHOD during this time.
+ */
+ if ((node == acpi_gbl_root_node) &&
+ (walk_state->
+ parse_flags & ACPI_PARSE_MODULE_LEVEL)) {
+ break;
+ }
+
+ /*lint -fallthrough */
+
default:
/* All other types are an error */
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index ae71477..89c0114 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -230,6 +230,20 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
walk_state->scope_info->common.value = ACPI_TYPE_ANY;
break;
+ case ACPI_TYPE_METHOD:
+
+ /*
+ * Allow scope change to root during execution of module-level
+ * code. Root is typed METHOD during this time.
+ */
+ if ((node == acpi_gbl_root_node) &&
+ (walk_state->
+ parse_flags & ACPI_PARSE_MODULE_LEVEL)) {
+ break;
+ }
+
+ /*lint -fallthrough */
+
default:
/* All other types are an error */
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index afbd5cb..ef0193d 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -80,8 +80,7 @@ acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info)
return_ACPI_STATUS(AE_NOT_EXIST);
}
- register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info,
- gpe_register_info);
+ register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info);
/* Clear the run bit up front */
@@ -379,6 +378,18 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
*/
if (!(gpe_register_info->enable_for_run |
gpe_register_info->enable_for_wake)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
+ "Ignore disabled registers for GPE%02X-GPE%02X: "
+ "RunEnable=%02X, WakeEnable=%02X\n",
+ gpe_register_info->
+ base_gpe_number,
+ gpe_register_info->
+ base_gpe_number +
+ (ACPI_GPE_REGISTER_WIDTH - 1),
+ gpe_register_info->
+ enable_for_run,
+ gpe_register_info->
+ enable_for_wake));
continue;
}
@@ -401,9 +412,14 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
}
ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
- "Read GPE Register at GPE%02X: Status=%02X, Enable=%02X\n",
+ "Read registers for GPE%02X-GPE%02X: Status=%02X, Enable=%02X, "
+ "RunEnable=%02X, WakeEnable=%02X\n",
gpe_register_info->base_gpe_number,
- status_reg, enable_reg));
+ gpe_register_info->base_gpe_number +
+ (ACPI_GPE_REGISTER_WIDTH - 1),
+ status_reg, enable_reg,
+ gpe_register_info->enable_for_run,
+ gpe_register_info->enable_for_wake));
/* Check if there is anything active at all in this register */
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index 6affbdb..87c5f23 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -357,8 +357,7 @@ acpi_status acpi_set_gpe_wake_mask(acpi_handle gpe_device, u32 gpe_number, u8 ac
goto unlock_and_exit;
}
- register_bit =
- acpi_hw_get_gpe_register_bit(gpe_event_info, gpe_register_info);
+ register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info);
/* Perform the action */
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
index 25bd28c..db40765 100644
--- a/drivers/acpi/acpica/hwgpe.c
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -60,7 +60,6 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
* FUNCTION: acpi_hw_get_gpe_register_bit
*
* PARAMETERS: gpe_event_info - Info block for the GPE
- * gpe_register_info - Info block for the GPE register
*
* RETURN: Register mask with a one in the GPE bit position
*
@@ -69,11 +68,10 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
*
******************************************************************************/
-u32 acpi_hw_get_gpe_register_bit(struct acpi_gpe_event_info *gpe_event_info,
- struct acpi_gpe_register_info *gpe_register_info)
+u32 acpi_hw_get_gpe_register_bit(struct acpi_gpe_event_info *gpe_event_info)
{
return (u32)1 << (gpe_event_info->gpe_number -
- gpe_register_info->base_gpe_number);
+ gpe_event_info->register_info->base_gpe_number);
}
/******************************************************************************
@@ -115,8 +113,7 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action)
/* Set or clear just the bit that corresponds to this GPE */
- register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info,
- gpe_register_info);
+ register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info);
switch (action) {
case ACPI_GPE_CONDITIONAL_ENABLE:
@@ -178,8 +175,7 @@ acpi_status acpi_hw_clear_gpe(struct acpi_gpe_event_info * gpe_event_info)
* Write a one to the appropriate bit in the status register to
* clear this GPE.
*/
- register_bit =
- acpi_hw_get_gpe_register_bit(gpe_event_info, gpe_register_info);
+ register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info);
status = acpi_hw_write(register_bit,
&gpe_register_info->status_address);
@@ -222,8 +218,7 @@ acpi_hw_get_gpe_status(struct acpi_gpe_event_info * gpe_event_info,
/* Get the register bitmask for this GPE */
- register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info,
- gpe_register_info);
+ register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info);
/* GPE currently enabled? (enabled for runtime?) */
diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
index 1f165a7..0ff1ece 100644
--- a/drivers/acpi/acpica/hwxfsleep.c
+++ b/drivers/acpi/acpica/hwxfsleep.c
@@ -381,7 +381,6 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state)
* FUNCTION: acpi_leave_sleep_state_prep
*
* PARAMETERS: sleep_state - Which sleep state we are exiting
- * flags - ACPI_EXECUTE_BFS to run optional method
*
* RETURN: Status
*
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index 7ee4e6ae..2526aaf 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -264,7 +264,7 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
switch (type) {
case ACPI_TYPE_PROCESSOR:
- acpi_os_printf("ID %X Len %.4X Addr %p\n",
+ acpi_os_printf("ID %02X Len %02X Addr %p\n",
obj_desc->processor.proc_id,
obj_desc->processor.length,
ACPI_CAST_PTR(void,
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 74f97d7..70f9d78 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -350,6 +350,7 @@ struct acpi_table_header *acpi_tb_table_override(struct acpi_table_header
acpi_status acpi_tb_resize_root_table_list(void)
{
struct acpi_table_desc *tables;
+ u32 table_count;
ACPI_FUNCTION_TRACE(tb_resize_root_table_list);
@@ -363,8 +364,13 @@ acpi_status acpi_tb_resize_root_table_list(void)
/* Increase the Table Array size */
- tables = ACPI_ALLOCATE_ZEROED(((acpi_size) acpi_gbl_root_table_list.
- max_table_count +
+ if (acpi_gbl_root_table_list.flags & ACPI_ROOT_ORIGIN_ALLOCATED) {
+ table_count = acpi_gbl_root_table_list.max_table_count;
+ } else {
+ table_count = acpi_gbl_root_table_list.current_table_count;
+ }
+
+ tables = ACPI_ALLOCATE_ZEROED(((acpi_size) table_count +
ACPI_ROOT_TABLE_SIZE_INCREMENT) *
sizeof(struct acpi_table_desc));
if (!tables) {
@@ -377,8 +383,8 @@ acpi_status acpi_tb_resize_root_table_list(void)
if (acpi_gbl_root_table_list.tables) {
ACPI_MEMCPY(tables, acpi_gbl_root_table_list.tables,
- (acpi_size) acpi_gbl_root_table_list.
- max_table_count * sizeof(struct acpi_table_desc));
+ (acpi_size) table_count *
+ sizeof(struct acpi_table_desc));
if (acpi_gbl_root_table_list.flags & ACPI_ROOT_ORIGIN_ALLOCATED) {
ACPI_FREE(acpi_gbl_root_table_list.tables);
@@ -386,9 +392,9 @@ acpi_status acpi_tb_resize_root_table_list(void)
}
acpi_gbl_root_table_list.tables = tables;
- acpi_gbl_root_table_list.max_table_count +=
- ACPI_ROOT_TABLE_SIZE_INCREMENT;
- acpi_gbl_root_table_list.flags |= (u8)ACPI_ROOT_ORIGIN_ALLOCATED;
+ acpi_gbl_root_table_list.max_table_count =
+ table_count + ACPI_ROOT_TABLE_SIZE_INCREMENT;
+ acpi_gbl_root_table_list.flags |= ACPI_ROOT_ORIGIN_ALLOCATED;
return_ACPI_STATUS(AE_OK);
}
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index 29e51bc..2110126 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -159,14 +159,12 @@ acpi_initialize_tables(struct acpi_table_desc * initial_table_array,
* DESCRIPTION: Reallocate Root Table List into dynamic memory. Copies the
* root list from the previously provided scratch area. Should
* be called once dynamic memory allocation is available in the
- * kernel
+ * kernel.
*
******************************************************************************/
acpi_status acpi_reallocate_root_table(void)
{
- struct acpi_table_desc *tables;
- acpi_size new_size;
- acpi_size current_size;
+ acpi_status status;
ACPI_FUNCTION_TRACE(acpi_reallocate_root_table);
@@ -178,39 +176,10 @@ acpi_status acpi_reallocate_root_table(void)
return_ACPI_STATUS(AE_SUPPORT);
}
- /*
- * Get the current size of the root table and add the default
- * increment to create the new table size.
- */
- current_size = (acpi_size)
- acpi_gbl_root_table_list.current_table_count *
- sizeof(struct acpi_table_desc);
-
- new_size = current_size +
- (ACPI_ROOT_TABLE_SIZE_INCREMENT * sizeof(struct acpi_table_desc));
-
- /* Create new array and copy the old array */
-
- tables = ACPI_ALLOCATE_ZEROED(new_size);
- if (!tables) {
- return_ACPI_STATUS(AE_NO_MEMORY);
- }
+ acpi_gbl_root_table_list.flags |= ACPI_ROOT_ALLOW_RESIZE;
- ACPI_MEMCPY(tables, acpi_gbl_root_table_list.tables, current_size);
-
- /*
- * Update the root table descriptor. The new size will be the current
- * number of tables plus the increment, independent of the reserved
- * size of the original table list.
- */
- acpi_gbl_root_table_list.tables = tables;
- acpi_gbl_root_table_list.max_table_count =
- acpi_gbl_root_table_list.current_table_count +
- ACPI_ROOT_TABLE_SIZE_INCREMENT;
- acpi_gbl_root_table_list.flags =
- ACPI_ROOT_ORIGIN_ALLOCATED | ACPI_ROOT_ALLOW_RESIZE;
-
- return_ACPI_STATUS(AE_OK);
+ status = acpi_tb_resize_root_table_list();
+ return_ACPI_STATUS(status);
}
/*******************************************************************************
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
index 34ef0bd..676285d 100644
--- a/drivers/acpi/acpica/utosi.c
+++ b/drivers/acpi/acpica/utosi.c
@@ -73,6 +73,7 @@ static struct acpi_interface_info acpi_default_supported_interfaces[] = {
{"Windows 2006 SP1", NULL, 0, ACPI_OSI_WIN_VISTA_SP1}, /* Windows Vista SP1 - Added 09/2009 */
{"Windows 2006 SP2", NULL, 0, ACPI_OSI_WIN_VISTA_SP2}, /* Windows Vista SP2 - Added 09/2010 */
{"Windows 2009", NULL, 0, ACPI_OSI_WIN_7}, /* Windows 7 and Server 2008 R2 - Added 09/2009 */
+ {"Windows 2012", NULL, 0, ACPI_OSI_WIN_8}, /* Windows 8 and Server 2012 - Added 08/2012 */
/* Feature Group Strings */
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index 534179f..b09632b 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Module Name: utxface - External interfaces for "global" ACPI functions
+ * Module Name: utxface - External interfaces, miscellaneous utility functions
*
*****************************************************************************/
@@ -53,271 +53,6 @@
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utxface")
-#ifndef ACPI_ASL_COMPILER
-/*******************************************************************************
- *
- * FUNCTION: acpi_initialize_subsystem
- *
- * PARAMETERS: None
- *
- * RETURN: Status
- *
- * DESCRIPTION: Initializes all global variables. This is the first function
- * called, so any early initialization belongs here.
- *
- ******************************************************************************/
-acpi_status __init acpi_initialize_subsystem(void)
-{
- acpi_status status;
-
- ACPI_FUNCTION_TRACE(acpi_initialize_subsystem);
-
- acpi_gbl_startup_flags = ACPI_SUBSYSTEM_INITIALIZE;
- ACPI_DEBUG_EXEC(acpi_ut_init_stack_ptr_trace());
-
- /* Initialize the OS-Dependent layer */
-
- status = acpi_os_initialize();
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status, "During OSL initialization"));
- return_ACPI_STATUS(status);
- }
-
- /* Initialize all globals used by the subsystem */
-
- status = acpi_ut_init_globals();
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status,
- "During initialization of globals"));
- return_ACPI_STATUS(status);
- }
-
- /* Create the default mutex objects */
-
- status = acpi_ut_mutex_initialize();
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status,
- "During Global Mutex creation"));
- return_ACPI_STATUS(status);
- }
-
- /*
- * Initialize the namespace manager and
- * the root of the namespace tree
- */
- status = acpi_ns_root_initialize();
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status,
- "During Namespace initialization"));
- return_ACPI_STATUS(status);
- }
-
- /* Initialize the global OSI interfaces list with the static names */
-
- status = acpi_ut_initialize_interfaces();
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status,
- "During OSI interfaces initialization"));
- return_ACPI_STATUS(status);
- }
-
- /* If configured, initialize the AML debugger */
-
- ACPI_DEBUGGER_EXEC(status = acpi_db_initialize());
- return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_enable_subsystem
- *
- * PARAMETERS: flags - Init/enable Options
- *
- * RETURN: Status
- *
- * DESCRIPTION: Completes the subsystem initialization including hardware.
- * Puts system into ACPI mode if it isn't already.
- *
- ******************************************************************************/
-acpi_status acpi_enable_subsystem(u32 flags)
-{
- acpi_status status = AE_OK;
-
- ACPI_FUNCTION_TRACE(acpi_enable_subsystem);
-
-#if (!ACPI_REDUCED_HARDWARE)
-
- /* Enable ACPI mode */
-
- if (!(flags & ACPI_NO_ACPI_ENABLE)) {
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "[Init] Going into ACPI mode\n"));
-
- acpi_gbl_original_mode = acpi_hw_get_mode();
-
- status = acpi_enable();
- if (ACPI_FAILURE(status)) {
- ACPI_WARNING((AE_INFO, "AcpiEnable failed"));
- return_ACPI_STATUS(status);
- }
- }
-
- /*
- * Obtain a permanent mapping for the FACS. This is required for the
- * Global Lock and the Firmware Waking Vector
- */
- status = acpi_tb_initialize_facs();
- if (ACPI_FAILURE(status)) {
- ACPI_WARNING((AE_INFO, "Could not map the FACS table"));
- return_ACPI_STATUS(status);
- }
-#endif /* !ACPI_REDUCED_HARDWARE */
-
- /*
- * Install the default op_region handlers. These are installed unless
- * other handlers have already been installed via the
- * install_address_space_handler interface.
- */
- if (!(flags & ACPI_NO_ADDRESS_SPACE_INIT)) {
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "[Init] Installing default address space handlers\n"));
-
- status = acpi_ev_install_region_handlers();
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
- }
-#if (!ACPI_REDUCED_HARDWARE)
- /*
- * Initialize ACPI Event handling (Fixed and General Purpose)
- *
- * Note1: We must have the hardware and events initialized before we can
- * execute any control methods safely. Any control method can require
- * ACPI hardware support, so the hardware must be fully initialized before
- * any method execution!
- *
- * Note2: Fixed events are initialized and enabled here. GPEs are
- * initialized, but cannot be enabled until after the hardware is
- * completely initialized (SCI and global_lock activated)
- */
- if (!(flags & ACPI_NO_EVENT_INIT)) {
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "[Init] Initializing ACPI events\n"));
-
- status = acpi_ev_initialize_events();
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
- }
-
- /*
- * Install the SCI handler and Global Lock handler. This completes the
- * hardware initialization.
- */
- if (!(flags & ACPI_NO_HANDLER_INIT)) {
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "[Init] Installing SCI/GL handlers\n"));
-
- status = acpi_ev_install_xrupt_handlers();
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
- }
-#endif /* !ACPI_REDUCED_HARDWARE */
-
- return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_enable_subsystem)
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_initialize_objects
- *
- * PARAMETERS: flags - Init/enable Options
- *
- * RETURN: Status
- *
- * DESCRIPTION: Completes namespace initialization by initializing device
- * objects and executing AML code for Regions, buffers, etc.
- *
- ******************************************************************************/
-acpi_status acpi_initialize_objects(u32 flags)
-{
- acpi_status status = AE_OK;
-
- ACPI_FUNCTION_TRACE(acpi_initialize_objects);
-
- /*
- * Run all _REG methods
- *
- * Note: Any objects accessed by the _REG methods will be automatically
- * initialized, even if they contain executable AML (see the call to
- * acpi_ns_initialize_objects below).
- */
- if (!(flags & ACPI_NO_ADDRESS_SPACE_INIT)) {
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "[Init] Executing _REG OpRegion methods\n"));
-
- status = acpi_ev_initialize_op_regions();
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
- }
-
- /*
- * Execute any module-level code that was detected during the table load
- * phase. Although illegal since ACPI 2.0, there are many machines that
- * contain this type of code. Each block of detected executable AML code
- * outside of any control method is wrapped with a temporary control
- * method object and placed on a global list. The methods on this list
- * are executed below.
- */
- acpi_ns_exec_module_code_list();
-
- /*
- * Initialize the objects that remain uninitialized. This runs the
- * executable AML that may be part of the declaration of these objects:
- * operation_regions, buffer_fields, Buffers, and Packages.
- */
- if (!(flags & ACPI_NO_OBJECT_INIT)) {
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "[Init] Completing Initialization of ACPI Objects\n"));
-
- status = acpi_ns_initialize_objects();
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
- }
-
- /*
- * Initialize all device objects in the namespace. This runs the device
- * _STA and _INI methods.
- */
- if (!(flags & ACPI_NO_DEVICE_INIT)) {
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "[Init] Initializing ACPI Devices\n"));
-
- status = acpi_ns_initialize_devices();
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
- }
-
- /*
- * Empty the caches (delete the cached objects) on the assumption that
- * the table load filled them up more than they will be at runtime --
- * thus wasting non-paged memory.
- */
- status = acpi_purge_cached_objects();
-
- acpi_gbl_startup_flags |= ACPI_INITIALIZED_OK;
- return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_initialize_objects)
-
-#endif
/*******************************************************************************
*
* FUNCTION: acpi_terminate
@@ -683,3 +418,90 @@ acpi_check_address_range(acpi_adr_space_type space_id,
ACPI_EXPORT_SYMBOL(acpi_check_address_range)
#endif /* !ACPI_ASL_COMPILER */
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_decode_pld_buffer
+ *
+ * PARAMETERS: in_buffer - Buffer returned by _PLD method
+ * length - Length of the in_buffer
+ * return_buffer - Where the decode buffer is returned
+ *
+ * RETURN: Status and the decoded _PLD buffer. User must deallocate
+ * the buffer via ACPI_FREE.
+ *
+ * DESCRIPTION: Decode the bit-packed buffer returned by the _PLD method into
+ * a local struct that is much more useful to an ACPI driver.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_decode_pld_buffer(u8 *in_buffer,
+ acpi_size length, struct acpi_pld_info ** return_buffer)
+{
+ struct acpi_pld_info *pld_info;
+ u32 *buffer = ACPI_CAST_PTR(u32, in_buffer);
+ u32 dword;
+
+ /* Parameter validation */
+
+ if (!in_buffer || !return_buffer || (length < 16)) {
+ return (AE_BAD_PARAMETER);
+ }
+
+ pld_info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_pld_info));
+ if (!pld_info) {
+ return (AE_NO_MEMORY);
+ }
+
+ /* First 32-bit DWord */
+
+ ACPI_MOVE_32_TO_32(&dword, &buffer[0]);
+ pld_info->revision = ACPI_PLD_GET_REVISION(&dword);
+ pld_info->ignore_color = ACPI_PLD_GET_IGNORE_COLOR(&dword);
+ pld_info->color = ACPI_PLD_GET_COLOR(&dword);
+
+ /* Second 32-bit DWord */
+
+ ACPI_MOVE_32_TO_32(&dword, &buffer[1]);
+ pld_info->width = ACPI_PLD_GET_WIDTH(&dword);
+ pld_info->height = ACPI_PLD_GET_HEIGHT(&dword);
+
+ /* Third 32-bit DWord */
+
+ ACPI_MOVE_32_TO_32(&dword, &buffer[2]);
+ pld_info->user_visible = ACPI_PLD_GET_USER_VISIBLE(&dword);
+ pld_info->dock = ACPI_PLD_GET_DOCK(&dword);
+ pld_info->lid = ACPI_PLD_GET_LID(&dword);
+ pld_info->panel = ACPI_PLD_GET_PANEL(&dword);
+ pld_info->vertical_position = ACPI_PLD_GET_VERTICAL(&dword);
+ pld_info->horizontal_position = ACPI_PLD_GET_HORIZONTAL(&dword);
+ pld_info->shape = ACPI_PLD_GET_SHAPE(&dword);
+ pld_info->group_orientation = ACPI_PLD_GET_ORIENTATION(&dword);
+ pld_info->group_token = ACPI_PLD_GET_TOKEN(&dword);
+ pld_info->group_position = ACPI_PLD_GET_POSITION(&dword);
+ pld_info->bay = ACPI_PLD_GET_BAY(&dword);
+
+ /* Fourth 32-bit DWord */
+
+ ACPI_MOVE_32_TO_32(&dword, &buffer[3]);
+ pld_info->ejectable = ACPI_PLD_GET_EJECTABLE(&dword);
+ pld_info->ospm_eject_required = ACPI_PLD_GET_OSPM_EJECT(&dword);
+ pld_info->cabinet_number = ACPI_PLD_GET_CABINET(&dword);
+ pld_info->card_cage_number = ACPI_PLD_GET_CARD_CAGE(&dword);
+ pld_info->reference = ACPI_PLD_GET_REFERENCE(&dword);
+ pld_info->rotation = ACPI_PLD_GET_ROTATION(&dword);
+ pld_info->order = ACPI_PLD_GET_ORDER(&dword);
+
+ if (length >= ACPI_PLD_BUFFER_SIZE) {
+
+ /* Fifth 32-bit DWord (Revision 2 of _PLD) */
+
+ ACPI_MOVE_32_TO_32(&dword, &buffer[4]);
+ pld_info->vertical_offset = ACPI_PLD_GET_VERT_OFFSET(&dword);
+ pld_info->horizontal_offset = ACPI_PLD_GET_HORIZ_OFFSET(&dword);
+ }
+
+ *return_buffer = pld_info;
+ return (AE_OK);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_decode_pld_buffer)
diff --git a/drivers/acpi/acpica/utxfinit.c b/drivers/acpi/acpica/utxfinit.c
new file mode 100644
index 0000000..14f5236
--- /dev/null
+++ b/drivers/acpi/acpica/utxfinit.c
@@ -0,0 +1,317 @@
+/******************************************************************************
+ *
+ * Module Name: utxfinit - External interfaces for ACPICA initialization
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2012, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <linux/export.h>
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acevents.h"
+#include "acnamesp.h"
+#include "acdebug.h"
+#include "actables.h"
+
+#define _COMPONENT ACPI_UTILITIES
+ACPI_MODULE_NAME("utxfinit")
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_initialize_subsystem
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Initializes all global variables. This is the first function
+ * called, so any early initialization belongs here.
+ *
+ ******************************************************************************/
+acpi_status acpi_initialize_subsystem(void)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_initialize_subsystem);
+
+ acpi_gbl_startup_flags = ACPI_SUBSYSTEM_INITIALIZE;
+ ACPI_DEBUG_EXEC(acpi_ut_init_stack_ptr_trace());
+
+ /* Initialize the OS-Dependent layer */
+
+ status = acpi_os_initialize();
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status, "During OSL initialization"));
+ return_ACPI_STATUS(status);
+ }
+
+ /* Initialize all globals used by the subsystem */
+
+ status = acpi_ut_init_globals();
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "During initialization of globals"));
+ return_ACPI_STATUS(status);
+ }
+
+ /* Create the default mutex objects */
+
+ status = acpi_ut_mutex_initialize();
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "During Global Mutex creation"));
+ return_ACPI_STATUS(status);
+ }
+
+ /*
+ * Initialize the namespace manager and
+ * the root of the namespace tree
+ */
+ status = acpi_ns_root_initialize();
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "During Namespace initialization"));
+ return_ACPI_STATUS(status);
+ }
+
+ /* Initialize the global OSI interfaces list with the static names */
+
+ status = acpi_ut_initialize_interfaces();
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "During OSI interfaces initialization"));
+ return_ACPI_STATUS(status);
+ }
+
+ /* If configured, initialize the AML debugger */
+
+ ACPI_DEBUGGER_EXEC(status = acpi_db_initialize());
+ return_ACPI_STATUS(status);
+}
+ACPI_EXPORT_SYMBOL(acpi_initialize_subsystem)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_enable_subsystem
+ *
+ * PARAMETERS: flags - Init/enable Options
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Completes the subsystem initialization including hardware.
+ * Puts system into ACPI mode if it isn't already.
+ *
+ ******************************************************************************/
+acpi_status acpi_enable_subsystem(u32 flags)
+{
+ acpi_status status = AE_OK;
+
+ ACPI_FUNCTION_TRACE(acpi_enable_subsystem);
+
+#if (!ACPI_REDUCED_HARDWARE)
+
+ /* Enable ACPI mode */
+
+ if (!(flags & ACPI_NO_ACPI_ENABLE)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "[Init] Going into ACPI mode\n"));
+
+ acpi_gbl_original_mode = acpi_hw_get_mode();
+
+ status = acpi_enable();
+ if (ACPI_FAILURE(status)) {
+ ACPI_WARNING((AE_INFO, "AcpiEnable failed"));
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ /*
+ * Obtain a permanent mapping for the FACS. This is required for the
+ * Global Lock and the Firmware Waking Vector
+ */
+ status = acpi_tb_initialize_facs();
+ if (ACPI_FAILURE(status)) {
+ ACPI_WARNING((AE_INFO, "Could not map the FACS table"));
+ return_ACPI_STATUS(status);
+ }
+#endif /* !ACPI_REDUCED_HARDWARE */
+
+ /*
+ * Install the default op_region handlers. These are installed unless
+ * other handlers have already been installed via the
+ * install_address_space_handler interface.
+ */
+ if (!(flags & ACPI_NO_ADDRESS_SPACE_INIT)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "[Init] Installing default address space handlers\n"));
+
+ status = acpi_ev_install_region_handlers();
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ }
+#if (!ACPI_REDUCED_HARDWARE)
+ /*
+ * Initialize ACPI Event handling (Fixed and General Purpose)
+ *
+ * Note1: We must have the hardware and events initialized before we can
+ * execute any control methods safely. Any control method can require
+ * ACPI hardware support, so the hardware must be fully initialized before
+ * any method execution!
+ *
+ * Note2: Fixed events are initialized and enabled here. GPEs are
+ * initialized, but cannot be enabled until after the hardware is
+ * completely initialized (SCI and global_lock activated) and the various
+ * initialization control methods are run (_REG, _STA, _INI) on the
+ * entire namespace.
+ */
+ if (!(flags & ACPI_NO_EVENT_INIT)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "[Init] Initializing ACPI events\n"));
+
+ status = acpi_ev_initialize_events();
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ /*
+ * Install the SCI handler and Global Lock handler. This completes the
+ * hardware initialization.
+ */
+ if (!(flags & ACPI_NO_HANDLER_INIT)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "[Init] Installing SCI/GL handlers\n"));
+
+ status = acpi_ev_install_xrupt_handlers();
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ }
+#endif /* !ACPI_REDUCED_HARDWARE */
+
+ return_ACPI_STATUS(status);
+}
+ACPI_EXPORT_SYMBOL(acpi_enable_subsystem)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_initialize_objects
+ *
+ * PARAMETERS: flags - Init/enable Options
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Completes namespace initialization by initializing device
+ * objects and executing AML code for Regions, buffers, etc.
+ *
+ ******************************************************************************/
+acpi_status acpi_initialize_objects(u32 flags)
+{
+ acpi_status status = AE_OK;
+
+ ACPI_FUNCTION_TRACE(acpi_initialize_objects);
+
+ /*
+ * Run all _REG methods
+ *
+ * Note: Any objects accessed by the _REG methods will be automatically
+ * initialized, even if they contain executable AML (see the call to
+ * acpi_ns_initialize_objects below).
+ */
+ if (!(flags & ACPI_NO_ADDRESS_SPACE_INIT)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "[Init] Executing _REG OpRegion methods\n"));
+
+ status = acpi_ev_initialize_op_regions();
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ /*
+ * Execute any module-level code that was detected during the table load
+ * phase. Although illegal since ACPI 2.0, there are many machines that
+ * contain this type of code. Each block of detected executable AML code
+ * outside of any control method is wrapped with a temporary control
+ * method object and placed on a global list. The methods on this list
+ * are executed below.
+ */
+ acpi_ns_exec_module_code_list();
+
+ /*
+ * Initialize the objects that remain uninitialized. This runs the
+ * executable AML that may be part of the declaration of these objects:
+ * operation_regions, buffer_fields, Buffers, and Packages.
+ */
+ if (!(flags & ACPI_NO_OBJECT_INIT)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "[Init] Completing Initialization of ACPI Objects\n"));
+
+ status = acpi_ns_initialize_objects();
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ /*
+ * Initialize all device objects in the namespace. This runs the device
+ * _STA and _INI methods.
+ */
+ if (!(flags & ACPI_NO_DEVICE_INIT)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "[Init] Initializing ACPI Devices\n"));
+
+ status = acpi_ns_initialize_devices();
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ /*
+ * Empty the caches (delete the cached objects) on the assumption that
+ * the table load filled them up more than they will be at runtime --
+ * thus wasting non-paged memory.
+ */
+ status = acpi_purge_cached_objects();
+
+ acpi_gbl_startup_flags |= ACPI_INITIALIZED_OK;
+ return_ACPI_STATUS(status);
+}
+ACPI_EXPORT_SYMBOL(acpi_initialize_objects)
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index e059695..d59175e 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -994,8 +994,6 @@ static int __init acpi_bus_init(void)
status = acpi_ec_ecdt_probe();
/* Ignore result. Not having an ECDT is not fatal. */
- acpi_bus_osc_support();
-
status = acpi_initialize_objects(ACPI_FULL_INITIALIZATION);
if (ACPI_FAILURE(status)) {
printk(KERN_ERR PREFIX "Unable to initialize ACPI objects\n");
@@ -1003,6 +1001,12 @@ static int __init acpi_bus_init(void)
}
/*
+ * _OSC method may exist in module level code,
+ * so it must be run after ACPI_FULL_INITIALIZATION
+ */
+ acpi_bus_osc_support();
+
+ /*
* _PDC control method may load dynamic SSDT tables,
* and we need to install the table handler before that.
*/
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 314a3b8..f0d936b 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -450,15 +450,4 @@ static int acpi_button_remove(struct acpi_device *device, int type)
return 0;
}
-static int __init acpi_button_init(void)
-{
- return acpi_bus_register_driver(&acpi_button_driver);
-}
-
-static void __exit acpi_button_exit(void)
-{
- acpi_bus_unregister_driver(&acpi_button_driver);
-}
-
-module_init(acpi_button_init);
-module_exit(acpi_button_exit);
+module_acpi_driver(acpi_button_driver);
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index bc36a47..3bd6a54 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -212,24 +212,4 @@ static int acpi_fan_resume(struct device *dev)
}
#endif
-static int __init acpi_fan_init(void)
-{
- int result = 0;
-
- result = acpi_bus_register_driver(&acpi_fan_driver);
- if (result < 0)
- return -ENODEV;
-
- return 0;
-}
-
-static void __exit acpi_fan_exit(void)
-{
-
- acpi_bus_unregister_driver(&acpi_fan_driver);
-
- return;
-}
-
-module_init(acpi_fan_init);
-module_exit(acpi_fan_exit);
+module_acpi_driver(acpi_fan_driver);
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index 243ee85..d1a2d74 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -25,6 +25,8 @@
static LIST_HEAD(bus_type_list);
static DECLARE_RWSEM(bus_type_sem);
+#define PHYSICAL_NODE_STRING "physical_node"
+
int register_acpi_bus_type(struct acpi_bus_type *type)
{
if (acpi_disabled)
@@ -124,84 +126,119 @@ acpi_handle acpi_get_child(acpi_handle parent, u64 address)
EXPORT_SYMBOL(acpi_get_child);
-/* Link ACPI devices with physical devices */
-static void acpi_glue_data_handler(acpi_handle handle,
- void *context)
-{
- /* we provide an empty handler */
-}
-
-/* Note: a success call will increase reference count by one */
-struct device *acpi_get_physical_device(acpi_handle handle)
-{
- acpi_status status;
- struct device *dev;
-
- status = acpi_get_data(handle, acpi_glue_data_handler, (void **)&dev);
- if (ACPI_SUCCESS(status))
- return get_device(dev);
- return NULL;
-}
-
-EXPORT_SYMBOL(acpi_get_physical_device);
-
static int acpi_bind_one(struct device *dev, acpi_handle handle)
{
struct acpi_device *acpi_dev;
acpi_status status;
+ struct acpi_device_physical_node *physical_node;
+ char physical_node_name[sizeof(PHYSICAL_NODE_STRING) + 2];
+ int retval = -EINVAL;
if (dev->archdata.acpi_handle) {
dev_warn(dev, "Drivers changed 'acpi_handle'\n");
return -EINVAL;
}
+
get_device(dev);
- status = acpi_attach_data(handle, acpi_glue_data_handler, dev);
- if (ACPI_FAILURE(status)) {
- put_device(dev);
- return -EINVAL;
+ status = acpi_bus_get_device(handle, &acpi_dev);
+ if (ACPI_FAILURE(status))
+ goto err;
+
+ physical_node = kzalloc(sizeof(struct acpi_device_physical_node),
+ GFP_KERNEL);
+ if (!physical_node) {
+ retval = -ENOMEM;
+ goto err;
}
- dev->archdata.acpi_handle = handle;
- status = acpi_bus_get_device(handle, &acpi_dev);
- if (!ACPI_FAILURE(status)) {
- int ret;
-
- ret = sysfs_create_link(&dev->kobj, &acpi_dev->dev.kobj,
- "firmware_node");
- ret = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj,
- "physical_node");
- if (acpi_dev->wakeup.flags.valid)
- device_set_wakeup_capable(dev, true);
+ mutex_lock(&acpi_dev->physical_node_lock);
+ /* allocate physical node id according to physical_node_id_bitmap */
+ physical_node->node_id =
+ find_first_zero_bit(acpi_dev->physical_node_id_bitmap,
+ ACPI_MAX_PHYSICAL_NODE);
+ if (physical_node->node_id >= ACPI_MAX_PHYSICAL_NODE) {
+ retval = -ENOSPC;
+ mutex_unlock(&acpi_dev->physical_node_lock);
+ goto err;
}
+ set_bit(physical_node->node_id, acpi_dev->physical_node_id_bitmap);
+ physical_node->dev = dev;
+ list_add_tail(&physical_node->node, &acpi_dev->physical_node_list);
+ acpi_dev->physical_node_count++;
+ mutex_unlock(&acpi_dev->physical_node_lock);
+
+ dev->archdata.acpi_handle = handle;
+
+ if (!physical_node->node_id)
+ strcpy(physical_node_name, PHYSICAL_NODE_STRING);
+ else
+ sprintf(physical_node_name,
+ "physical_node%d", physical_node->node_id);
+ retval = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj,
+ physical_node_name);
+ retval = sysfs_create_link(&dev->kobj, &acpi_dev->dev.kobj,
+ "firmware_node");
+
+ if (acpi_dev->wakeup.flags.valid)
+ device_set_wakeup_capable(dev, true);
+
return 0;
+
+ err:
+ put_device(dev);
+ return retval;
}
static int acpi_unbind_one(struct device *dev)
{
+ struct acpi_device_physical_node *entry;
+ struct acpi_device *acpi_dev;
+ acpi_status status;
+ struct list_head *node, *next;
+
if (!dev->archdata.acpi_handle)
return 0;
- if (dev == acpi_get_physical_device(dev->archdata.acpi_handle)) {
- struct acpi_device *acpi_dev;
- /* acpi_get_physical_device increase refcnt by one */
- put_device(dev);
+ status = acpi_bus_get_device(dev->archdata.acpi_handle,
+ &acpi_dev);
+ if (ACPI_FAILURE(status))
+ goto err;
- if (!acpi_bus_get_device(dev->archdata.acpi_handle,
- &acpi_dev)) {
- sysfs_remove_link(&dev->kobj, "firmware_node");
- sysfs_remove_link(&acpi_dev->dev.kobj, "physical_node");
- }
+ mutex_lock(&acpi_dev->physical_node_lock);
+ list_for_each_safe(node, next, &acpi_dev->physical_node_list) {
+ char physical_node_name[sizeof(PHYSICAL_NODE_STRING) + 2];
+
+ entry = list_entry(node, struct acpi_device_physical_node,
+ node);
+ if (entry->dev != dev)
+ continue;
+
+ list_del(node);
+ clear_bit(entry->node_id, acpi_dev->physical_node_id_bitmap);
- acpi_detach_data(dev->archdata.acpi_handle,
- acpi_glue_data_handler);
+ acpi_dev->physical_node_count--;
+
+ if (!entry->node_id)
+ strcpy(physical_node_name, PHYSICAL_NODE_STRING);
+ else
+ sprintf(physical_node_name,
+ "physical_node%d", entry->node_id);
+
+ sysfs_remove_link(&acpi_dev->dev.kobj, physical_node_name);
+ sysfs_remove_link(&dev->kobj, "firmware_node");
dev->archdata.acpi_handle = NULL;
/* acpi_bind_one increase refcnt by one */
put_device(dev);
- } else {
- dev_err(dev, "Oops, 'acpi_handle' corrupt\n");
+ kfree(entry);
}
+ mutex_unlock(&acpi_dev->physical_node_lock);
+
return 0;
+
+err:
+ dev_err(dev, "Oops, 'acpi_handle' corrupt\n");
+ return -EINVAL;
}
static int acpi_platform_notify(struct device *dev)
diff --git a/drivers/acpi/hed.c b/drivers/acpi/hed.c
index d0c1967..20a0f2c 100644
--- a/drivers/acpi/hed.c
+++ b/drivers/acpi/hed.c
@@ -86,25 +86,7 @@ static struct acpi_driver acpi_hed_driver = {
.notify = acpi_hed_notify,
},
};
-
-static int __init acpi_hed_init(void)
-{
- if (acpi_disabled)
- return -ENODEV;
-
- if (acpi_bus_register_driver(&acpi_hed_driver) < 0)
- return -ENODEV;
-
- return 0;
-}
-
-static void __exit acpi_hed_exit(void)
-{
- acpi_bus_unregister_driver(&acpi_hed_driver);
-}
-
-module_init(acpi_hed_init);
-module_exit(acpi_hed_exit);
+module_acpi_driver(acpi_hed_driver);
ACPI_MODULE_NAME("hed");
MODULE_AUTHOR("Huang Ying");
diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
index 251c7b62..27adb09 100644
--- a/drivers/acpi/proc.c
+++ b/drivers/acpi/proc.c
@@ -302,26 +302,41 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
list_for_each_safe(node, next, &acpi_wakeup_device_list) {
struct acpi_device *dev =
container_of(node, struct acpi_device, wakeup_list);
- struct device *ldev;
+ struct acpi_device_physical_node *entry;
if (!dev->wakeup.flags.valid)
continue;
- ldev = acpi_get_physical_device(dev->handle);
- seq_printf(seq, "%s\t S%d\t%c%-8s ",
+ seq_printf(seq, "%s\t S%d\t",
dev->pnp.bus_id,
- (u32) dev->wakeup.sleep_state,
- dev->wakeup.flags.run_wake ? '*' : ' ',
- (device_may_wakeup(&dev->dev)
- || (ldev && device_may_wakeup(ldev))) ?
- "enabled" : "disabled");
- if (ldev)
- seq_printf(seq, "%s:%s",
- ldev->bus ? ldev->bus->name : "no-bus",
- dev_name(ldev));
- seq_printf(seq, "\n");
- put_device(ldev);
-
+ (u32) dev->wakeup.sleep_state);
+
+ if (!dev->physical_node_count)
+ seq_printf(seq, "%c%-8s\n",
+ dev->wakeup.flags.run_wake ?
+ '*' : ' ', "disabled");
+ else {
+ struct device *ldev;
+ list_for_each_entry(entry, &dev->physical_node_list,
+ node) {
+ ldev = get_device(entry->dev);
+ if (!ldev)
+ continue;
+
+ if (&entry->node !=
+ dev->physical_node_list.next)
+ seq_printf(seq, "\t\t");
+
+ seq_printf(seq, "%c%-8s %s:%s\n",
+ dev->wakeup.flags.run_wake ? '*' : ' ',
+ (device_may_wakeup(&dev->dev) ||
+ (ldev && device_may_wakeup(ldev))) ?
+ "enabled" : "disabled",
+ ldev->bus ? ldev->bus->name :
+ "no-bus", dev_name(ldev));
+ put_device(ldev);
+ }
+ }
}
mutex_unlock(&acpi_device_lock);
return 0;
@@ -329,12 +344,14 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
static void physical_device_enable_wakeup(struct acpi_device *adev)
{
- struct device *dev = acpi_get_physical_device(adev->handle);
+ struct acpi_device_physical_node *entry;
- if (dev && device_can_wakeup(dev)) {
- bool enable = !device_may_wakeup(dev);
- device_set_wakeup_enable(dev, enable);
- }
+ list_for_each_entry(entry,
+ &adev->physical_node_list, node)
+ if (entry->dev && device_can_wakeup(entry->dev)) {
+ bool enable = !device_may_wakeup(entry->dev);
+ device_set_wakeup_enable(entry->dev, enable);
+ }
}
static ssize_t
diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
index f8d2a47..cf6129a 100644
--- a/drivers/acpi/sbshc.c
+++ b/drivers/acpi/sbshc.c
@@ -310,23 +310,7 @@ static int acpi_smbus_hc_remove(struct acpi_device *device, int type)
return 0;
}
-static int __init acpi_smb_hc_init(void)
-{
- int result;
-
- result = acpi_bus_register_driver(&acpi_smb_hc_driver);
- if (result < 0)
- return -ENODEV;
- return 0;
-}
-
-static void __exit acpi_smb_hc_exit(void)
-{
- acpi_bus_unregister_driver(&acpi_smb_hc_driver);
-}
-
-module_init(acpi_smb_hc_init);
-module_exit(acpi_smb_hc_exit);
+module_acpi_driver(acpi_smb_hc_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alexey Starikovskiy");
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index d1ecca2..1fcb867 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -10,6 +10,7 @@
#include <linux/signal.h>
#include <linux/kthread.h>
#include <linux/dmi.h>
+#include <linux/nls.h>
#include <acpi/acpi_drivers.h>
@@ -232,8 +233,35 @@ end:
}
static DEVICE_ATTR(path, 0444, acpi_device_path_show, NULL);
+/* sysfs file that shows description text from the ACPI _STR method */
+static ssize_t description_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf) {
+ struct acpi_device *acpi_dev = to_acpi_device(dev);
+ int result;
+
+ if (acpi_dev->pnp.str_obj == NULL)
+ return 0;
+
+ /*
+ * The _STR object contains a Unicode identifier for a device.
+ * We need to convert to utf-8 so it can be displayed.
+ */
+ result = utf16s_to_utf8s(
+ (wchar_t *)acpi_dev->pnp.str_obj->buffer.pointer,
+ acpi_dev->pnp.str_obj->buffer.length,
+ UTF16_LITTLE_ENDIAN, buf,
+ PAGE_SIZE);
+
+ buf[result++] = '\n';
+
+ return result;
+}
+static DEVICE_ATTR(description, 0444, description_show, NULL);
+
static int acpi_device_setup_files(struct acpi_device *dev)
{
+ struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
acpi_status status;
acpi_handle temp;
int result = 0;
@@ -257,6 +285,21 @@ static int acpi_device_setup_files(struct acpi_device *dev)
goto end;
}
+ /*
+ * If device has _STR, 'description' file is created
+ */
+ status = acpi_get_handle(dev->handle, "_STR", &temp);
+ if (ACPI_SUCCESS(status)) {
+ status = acpi_evaluate_object(dev->handle, "_STR",
+ NULL, &buffer);
+ if (ACPI_FAILURE(status))
+ buffer.pointer = NULL;
+ dev->pnp.str_obj = buffer.pointer;
+ result = device_create_file(&dev->dev, &dev_attr_description);
+ if (result)
+ goto end;
+ }
+
/*
* If device has _EJ0, 'eject' file is created that is used to trigger
* hot-removal function from userland.
@@ -274,8 +317,15 @@ static void acpi_device_remove_files(struct acpi_device *dev)
acpi_handle temp;
/*
- * If device has _EJ0, 'eject' file is created that is used to trigger
- * hot-removal function from userland.
+ * If device has _STR, remove 'description' file
+ */
+ status = acpi_get_handle(dev->handle, "_STR", &temp);
+ if (ACPI_SUCCESS(status)) {
+ kfree(dev->pnp.str_obj);
+ device_remove_file(&dev->dev, &dev_attr_description);
+ }
+ /*
+ * If device has _EJ0, remove 'eject' file.
*/
status = acpi_get_handle(dev->handle, "_EJ0", &temp);
if (ACPI_SUCCESS(status))
@@ -481,6 +531,8 @@ static int acpi_device_register(struct acpi_device *device)
INIT_LIST_HEAD(&device->children);
INIT_LIST_HEAD(&device->node);
INIT_LIST_HEAD(&device->wakeup_list);
+ INIT_LIST_HEAD(&device->physical_node_list);
+ mutex_init(&device->physical_node_lock);
new_bus_id = kzalloc(sizeof(struct acpi_device_bus_id), GFP_KERNEL);
if (!new_bus_id) {
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index f336bca7..2572d97 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -240,10 +240,17 @@ acpi_table_parse_entries(char *id,
table_end) {
if (entry->type == entry_id
&& (!max_entries || count++ < max_entries))
- if (handler(entry, table_end)) {
- early_acpi_os_unmap_memory((char *)table_header, tbl_size);
- return -EINVAL;
- }
+ if (handler(entry, table_end))
+ goto err;
+
+ /*
+ * If entry->length is 0, break from this loop to avoid
+ * infinite loop.
+ */
+ if (entry->length == 0) {
+ pr_err(PREFIX "[%4.4s:0x%02x] Invalid zero length\n", id, entry_id);
+ goto err;
+ }
entry = (struct acpi_subtable_header *)
((unsigned long)entry + entry->length);
@@ -255,6 +262,9 @@ acpi_table_parse_entries(char *id,
early_acpi_os_unmap_memory((char *)table_header, tbl_size);
return count;
+err:
+ early_acpi_os_unmap_memory((char *)table_header, tbl_size);
+ return -EINVAL;
}
int __init
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 3e87c9c..462f7e3 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -384,7 +384,7 @@ acpi_evaluate_reference(acpi_handle handle,
EXPORT_SYMBOL(acpi_evaluate_reference);
acpi_status
-acpi_get_physical_device_location(acpi_handle handle, struct acpi_pld *pld)
+acpi_get_physical_device_location(acpi_handle handle, struct acpi_pld_info **pld)
{
acpi_status status;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -400,13 +400,16 @@ acpi_get_physical_device_location(acpi_handle handle, struct acpi_pld *pld)
if (!output || output->type != ACPI_TYPE_PACKAGE
|| !output->package.count
|| output->package.elements[0].type != ACPI_TYPE_BUFFER
- || output->package.elements[0].buffer.length > sizeof(*pld)) {
+ || output->package.elements[0].buffer.length < ACPI_PLD_REV1_BUFFER_SIZE) {
status = AE_TYPE;
goto out;
}
- memcpy(pld, output->package.elements[0].buffer.pointer,
- output->package.elements[0].buffer.length);
+ status = acpi_decode_pld_buffer(
+ output->package.elements[0].buffer.pointer,
+ output->package.elements[0].buffer.length,
+ pld);
+
out:
kfree(buffer.pointer);
return status;
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 7dda4f7..86c8821 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -248,26 +248,23 @@ static bool pages_correctly_reserved(unsigned long start_pfn,
static int
memory_block_action(unsigned long phys_index, unsigned long action)
{
- unsigned long start_pfn, start_paddr;
+ unsigned long start_pfn;
unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
struct page *first_page;
int ret;
first_page = pfn_to_page(phys_index << PFN_SECTION_SHIFT);
+ start_pfn = page_to_pfn(first_page);
switch (action) {
case MEM_ONLINE:
- start_pfn = page_to_pfn(first_page);
-
if (!pages_correctly_reserved(start_pfn, nr_pages))
return -EBUSY;
ret = online_pages(start_pfn, nr_pages);
break;
case MEM_OFFLINE:
- start_paddr = page_to_pfn(first_page) << PAGE_SHIFT;
- ret = remove_memory(start_paddr,
- nr_pages << PAGE_SHIFT);
+ ret = offline_pages(start_pfn, nr_pages);
break;
default:
WARN(1, KERN_WARNING "%s(%ld, %ld) unknown action: "
@@ -278,13 +275,11 @@ memory_block_action(unsigned long phys_index, unsigned long action)
return ret;
}
-static int memory_block_change_state(struct memory_block *mem,
+static int __memory_block_change_state(struct memory_block *mem,
unsigned long to_state, unsigned long from_state_req)
{
int ret = 0;
- mutex_lock(&mem->state_mutex);
-
if (mem->state != from_state_req) {
ret = -EINVAL;
goto out;
@@ -312,10 +307,20 @@ static int memory_block_change_state(struct memory_block *mem,
break;
}
out:
- mutex_unlock(&mem->state_mutex);
return ret;
}
+static int memory_block_change_state(struct memory_block *mem,
+ unsigned long to_state, unsigned long from_state_req)
+{
+ int ret;
+
+ mutex_lock(&mem->state_mutex);
+ ret = __memory_block_change_state(mem, to_state, from_state_req);
+ mutex_unlock(&mem->state_mutex);
+
+ return ret;
+}
static ssize_t
store_mem_state(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
@@ -656,6 +661,21 @@ int unregister_memory_section(struct mem_section *section)
}
/*
+ * offline one memory block. If the memory block has been offlined, do nothing.
+ */
+int offline_memory_block(struct memory_block *mem)
+{
+ int ret = 0;
+
+ mutex_lock(&mem->state_mutex);
+ if (mem->state != MEM_OFFLINE)
+ ret = __memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE);
+ mutex_unlock(&mem->state_mutex);
+
+ return ret;
+}
+
+/*
* Initialize the sysfs support for memory devices...
*/
int __init memory_dev_init(void)
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 54a55f0..bb3d9be 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -41,6 +41,8 @@
#include "rbd_types.h"
+#define RBD_DEBUG /* Activate rbd_assert() calls */
+
/*
* The basic unit of block I/O is a sector. It is interpreted in a
* number of contexts in Linux (blk, bio, genhd), but the default is
@@ -50,16 +52,24 @@
#define SECTOR_SHIFT 9
#define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
+/* It might be useful to have this defined elsewhere too */
+
+#define U64_MAX ((u64) (~0ULL))
+
#define RBD_DRV_NAME "rbd"
#define RBD_DRV_NAME_LONG "rbd (rados block device)"
#define RBD_MINORS_PER_MAJOR 256 /* max minors per blkdev */
#define RBD_MAX_SNAP_NAME_LEN 32
+#define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
#define RBD_MAX_OPT_LEN 1024
#define RBD_SNAP_HEAD_NAME "-"
+#define RBD_IMAGE_ID_LEN_MAX 64
+#define RBD_OBJ_PREFIX_LEN_MAX 64
+
/*
* An RBD device name will be "rbd#", where the "rbd" comes from
* RBD_DRV_NAME above, and # is a unique integer identifier.
@@ -69,21 +79,22 @@
#define DEV_NAME_LEN 32
#define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1)
-#define RBD_NOTIFY_TIMEOUT_DEFAULT 10
+#define RBD_READ_ONLY_DEFAULT false
/*
* block device image metadata (in-memory version)
*/
struct rbd_image_header {
- u64 image_size;
+ /* These four fields never change for a given rbd image */
char *object_prefix;
+ u64 features;
__u8 obj_order;
__u8 crypt_type;
__u8 comp_type;
- struct ceph_snap_context *snapc;
- size_t snap_names_len;
- u32 total_snaps;
+ /* The remaining fields need to be updated occasionally */
+ u64 image_size;
+ struct ceph_snap_context *snapc;
char *snap_names;
u64 *snap_sizes;
@@ -91,7 +102,7 @@ struct rbd_image_header {
};
struct rbd_options {
- int notify_timeout;
+ bool read_only;
};
/*
@@ -99,7 +110,6 @@ struct rbd_options {
*/
struct rbd_client {
struct ceph_client *client;
- struct rbd_options *rbd_opts;
struct kref kref;
struct list_head node;
};
@@ -141,6 +151,16 @@ struct rbd_snap {
u64 size;
struct list_head node;
u64 id;
+ u64 features;
+};
+
+struct rbd_mapping {
+ char *snap_name;
+ u64 snap_id;
+ u64 size;
+ u64 features;
+ bool snap_exists;
+ bool read_only;
};
/*
@@ -151,8 +171,9 @@ struct rbd_device {
int major; /* blkdev assigned major */
struct gendisk *disk; /* blkdev's gendisk and rq */
- struct request_queue *q;
+ u32 image_format; /* Either 1 or 2 */
+ struct rbd_options rbd_opts;
struct rbd_client *rbd_client;
char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
@@ -160,6 +181,8 @@ struct rbd_device {
spinlock_t lock; /* queue lock */
struct rbd_image_header header;
+ char *image_id;
+ size_t image_id_len;
char *image_name;
size_t image_name_len;
char *header_name;
@@ -171,13 +194,8 @@ struct rbd_device {
/* protects updating the header */
struct rw_semaphore header_rwsem;
- /* name of the snapshot this device reads from */
- char *snap_name;
- /* id of the snapshot this device reads from */
- u64 snap_id; /* current snapshot id */
- /* whether the snap_id this device reads from still exists */
- bool snap_exists;
- int read_only;
+
+ struct rbd_mapping mapping;
struct list_head node;
@@ -196,12 +214,10 @@ static DEFINE_SPINLOCK(rbd_dev_list_lock);
static LIST_HEAD(rbd_client_list); /* clients */
static DEFINE_SPINLOCK(rbd_client_list_lock);
-static int __rbd_init_snaps_header(struct rbd_device *rbd_dev);
+static int rbd_dev_snaps_update(struct rbd_device *rbd_dev);
+static int rbd_dev_snaps_register(struct rbd_device *rbd_dev);
+
static void rbd_dev_release(struct device *dev);
-static ssize_t rbd_snap_add(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count);
static void __rbd_remove_snap_dev(struct rbd_snap *snap);
static ssize_t rbd_add(struct bus_type *bus, const char *buf,
@@ -229,6 +245,18 @@ static struct device rbd_root_dev = {
.release = rbd_root_dev_release,
};
+#ifdef RBD_DEBUG
+#define rbd_assert(expr) \
+ if (unlikely(!(expr))) { \
+ printk(KERN_ERR "\nAssertion failure in %s() " \
+ "at line %d:\n\n" \
+ "\trbd_assert(%s);\n\n", \
+ __func__, __LINE__, #expr); \
+ BUG(); \
+ }
+#else /* !RBD_DEBUG */
+# define rbd_assert(expr) ((void) 0)
+#endif /* !RBD_DEBUG */
static struct device *rbd_get_dev(struct rbd_device *rbd_dev)
{
@@ -246,11 +274,11 @@ static int rbd_open(struct block_device *bdev, fmode_t mode)
{
struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
- if ((mode & FMODE_WRITE) && rbd_dev->read_only)
+ if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
return -EROFS;
rbd_get_dev(rbd_dev);
- set_device_ro(bdev, rbd_dev->read_only);
+ set_device_ro(bdev, rbd_dev->mapping.read_only);
return 0;
}
@@ -274,8 +302,7 @@ static const struct block_device_operations rbd_bd_ops = {
* Initialize an rbd client instance.
* We own *ceph_opts.
*/
-static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts,
- struct rbd_options *rbd_opts)
+static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
{
struct rbd_client *rbdc;
int ret = -ENOMEM;
@@ -299,8 +326,6 @@ static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts,
if (ret < 0)
goto out_err;
- rbdc->rbd_opts = rbd_opts;
-
spin_lock(&rbd_client_list_lock);
list_add_tail(&rbdc->node, &rbd_client_list);
spin_unlock(&rbd_client_list_lock);
@@ -322,36 +347,52 @@ out_opt:
}
/*
- * Find a ceph client with specific addr and configuration.
+ * Find a ceph client with specific addr and configuration. If
+ * found, bump its reference count.
*/
-static struct rbd_client *__rbd_client_find(struct ceph_options *ceph_opts)
+static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
{
struct rbd_client *client_node;
+ bool found = false;
if (ceph_opts->flags & CEPH_OPT_NOSHARE)
return NULL;
- list_for_each_entry(client_node, &rbd_client_list, node)
- if (!ceph_compare_options(ceph_opts, client_node->client))
- return client_node;
- return NULL;
+ spin_lock(&rbd_client_list_lock);
+ list_for_each_entry(client_node, &rbd_client_list, node) {
+ if (!ceph_compare_options(ceph_opts, client_node->client)) {
+ kref_get(&client_node->kref);
+ found = true;
+ break;
+ }
+ }
+ spin_unlock(&rbd_client_list_lock);
+
+ return found ? client_node : NULL;
}
/*
* mount options
*/
enum {
- Opt_notify_timeout,
Opt_last_int,
/* int args above */
Opt_last_string,
/* string args above */
+ Opt_read_only,
+ Opt_read_write,
+ /* Boolean args above */
+ Opt_last_bool,
};
static match_table_t rbd_opts_tokens = {
- {Opt_notify_timeout, "notify_timeout=%d"},
/* int args above */
/* string args above */
+ {Opt_read_only, "mapping.read_only"},
+ {Opt_read_only, "ro"}, /* Alternate spelling */
+ {Opt_read_write, "read_write"},
+ {Opt_read_write, "rw"}, /* Alternate spelling */
+ /* Boolean args above */
{-1, NULL}
};
@@ -376,16 +417,22 @@ static int parse_rbd_opts_token(char *c, void *private)
} else if (token > Opt_last_int && token < Opt_last_string) {
dout("got string token %d val %s\n", token,
argstr[0].from);
+ } else if (token > Opt_last_string && token < Opt_last_bool) {
+ dout("got Boolean token %d\n", token);
} else {
dout("got token %d\n", token);
}
switch (token) {
- case Opt_notify_timeout:
- rbd_opts->notify_timeout = intval;
+ case Opt_read_only:
+ rbd_opts->read_only = true;
+ break;
+ case Opt_read_write:
+ rbd_opts->read_only = false;
break;
default:
- BUG_ON(token);
+ rbd_assert(false);
+ break;
}
return 0;
}
@@ -394,48 +441,33 @@ static int parse_rbd_opts_token(char *c, void *private)
* Get a ceph client with specific addr and configuration, if one does
* not exist create it.
*/
-static struct rbd_client *rbd_get_client(const char *mon_addr,
- size_t mon_addr_len,
- char *options)
+static int rbd_get_client(struct rbd_device *rbd_dev, const char *mon_addr,
+ size_t mon_addr_len, char *options)
{
- struct rbd_client *rbdc;
+ struct rbd_options *rbd_opts = &rbd_dev->rbd_opts;
struct ceph_options *ceph_opts;
- struct rbd_options *rbd_opts;
-
- rbd_opts = kzalloc(sizeof(*rbd_opts), GFP_KERNEL);
- if (!rbd_opts)
- return ERR_PTR(-ENOMEM);
+ struct rbd_client *rbdc;
- rbd_opts->notify_timeout = RBD_NOTIFY_TIMEOUT_DEFAULT;
+ rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
ceph_opts = ceph_parse_options(options, mon_addr,
mon_addr + mon_addr_len,
parse_rbd_opts_token, rbd_opts);
- if (IS_ERR(ceph_opts)) {
- kfree(rbd_opts);
- return ERR_CAST(ceph_opts);
- }
+ if (IS_ERR(ceph_opts))
+ return PTR_ERR(ceph_opts);
- spin_lock(&rbd_client_list_lock);
- rbdc = __rbd_client_find(ceph_opts);
+ rbdc = rbd_client_find(ceph_opts);
if (rbdc) {
/* using an existing client */
- kref_get(&rbdc->kref);
- spin_unlock(&rbd_client_list_lock);
-
ceph_destroy_options(ceph_opts);
- kfree(rbd_opts);
-
- return rbdc;
+ } else {
+ rbdc = rbd_client_create(ceph_opts);
+ if (IS_ERR(rbdc))
+ return PTR_ERR(rbdc);
}
- spin_unlock(&rbd_client_list_lock);
-
- rbdc = rbd_client_create(ceph_opts, rbd_opts);
+ rbd_dev->rbd_client = rbdc;
- if (IS_ERR(rbdc))
- kfree(rbd_opts);
-
- return rbdc;
+ return 0;
}
/*
@@ -453,7 +485,6 @@ static void rbd_client_release(struct kref *kref)
spin_unlock(&rbd_client_list_lock);
ceph_destroy_client(rbdc->client);
- kfree(rbdc->rbd_opts);
kfree(rbdc);
}
@@ -479,10 +510,38 @@ static void rbd_coll_release(struct kref *kref)
kfree(coll);
}
+static bool rbd_image_format_valid(u32 image_format)
+{
+ return image_format == 1 || image_format == 2;
+}
+
static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
{
- return !memcmp(&ondisk->text,
- RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT));
+ size_t size;
+ u32 snap_count;
+
+ /* The header has to start with the magic rbd header text */
+ if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
+ return false;
+
+ /*
+ * The size of a snapshot header has to fit in a size_t, and
+ * that limits the number of snapshots.
+ */
+ snap_count = le32_to_cpu(ondisk->snap_count);
+ size = SIZE_MAX - sizeof (struct ceph_snap_context);
+ if (snap_count > size / sizeof (__le64))
+ return false;
+
+ /*
+ * Not only that, but the size of the entire the snapshot
+ * header must also be representable in a size_t.
+ */
+ size -= snap_count * sizeof (__le64);
+ if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
+ return false;
+
+ return true;
}
/*
@@ -490,179 +549,203 @@ static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
* header.
*/
static int rbd_header_from_disk(struct rbd_image_header *header,
- struct rbd_image_header_ondisk *ondisk,
- u32 allocated_snaps)
+ struct rbd_image_header_ondisk *ondisk)
{
u32 snap_count;
+ size_t len;
+ size_t size;
+ u32 i;
- if (!rbd_dev_ondisk_valid(ondisk))
- return -ENXIO;
+ memset(header, 0, sizeof (*header));
snap_count = le32_to_cpu(ondisk->snap_count);
- if (snap_count > (SIZE_MAX - sizeof(struct ceph_snap_context))
- / sizeof (u64))
- return -EINVAL;
- header->snapc = kmalloc(sizeof(struct ceph_snap_context) +
- snap_count * sizeof(u64),
- GFP_KERNEL);
- if (!header->snapc)
+
+ len = strnlen(ondisk->object_prefix, sizeof (ondisk->object_prefix));
+ header->object_prefix = kmalloc(len + 1, GFP_KERNEL);
+ if (!header->object_prefix)
return -ENOMEM;
+ memcpy(header->object_prefix, ondisk->object_prefix, len);
+ header->object_prefix[len] = '\0';
if (snap_count) {
- header->snap_names_len = le64_to_cpu(ondisk->snap_names_len);
- header->snap_names = kmalloc(header->snap_names_len,
- GFP_KERNEL);
+ u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
+
+ /* Save a copy of the snapshot names */
+
+ if (snap_names_len > (u64) SIZE_MAX)
+ return -EIO;
+ header->snap_names = kmalloc(snap_names_len, GFP_KERNEL);
if (!header->snap_names)
- goto err_snapc;
- header->snap_sizes = kmalloc(snap_count * sizeof(u64),
- GFP_KERNEL);
+ goto out_err;
+ /*
+ * Note that rbd_dev_v1_header_read() guarantees
+ * the ondisk buffer we're working with has
+ * snap_names_len bytes beyond the end of the
+ * snapshot id array, this memcpy() is safe.
+ */
+ memcpy(header->snap_names, &ondisk->snaps[snap_count],
+ snap_names_len);
+
+ /* Record each snapshot's size */
+
+ size = snap_count * sizeof (*header->snap_sizes);
+ header->snap_sizes = kmalloc(size, GFP_KERNEL);
if (!header->snap_sizes)
- goto err_names;
+ goto out_err;
+ for (i = 0; i < snap_count; i++)
+ header->snap_sizes[i] =
+ le64_to_cpu(ondisk->snaps[i].image_size);
} else {
WARN_ON(ondisk->snap_names_len);
- header->snap_names_len = 0;
header->snap_names = NULL;
header->snap_sizes = NULL;
}
- header->object_prefix = kmalloc(sizeof (ondisk->block_name) + 1,
- GFP_KERNEL);
- if (!header->object_prefix)
- goto err_sizes;
-
- memcpy(header->object_prefix, ondisk->block_name,
- sizeof(ondisk->block_name));
- header->object_prefix[sizeof (ondisk->block_name)] = '\0';
-
- header->image_size = le64_to_cpu(ondisk->image_size);
+ header->features = 0; /* No features support in v1 images */
header->obj_order = ondisk->options.order;
header->crypt_type = ondisk->options.crypt_type;
header->comp_type = ondisk->options.comp_type;
+ /* Allocate and fill in the snapshot context */
+
+ header->image_size = le64_to_cpu(ondisk->image_size);
+ size = sizeof (struct ceph_snap_context);
+ size += snap_count * sizeof (header->snapc->snaps[0]);
+ header->snapc = kzalloc(size, GFP_KERNEL);
+ if (!header->snapc)
+ goto out_err;
+
atomic_set(&header->snapc->nref, 1);
header->snapc->seq = le64_to_cpu(ondisk->snap_seq);
header->snapc->num_snaps = snap_count;
- header->total_snaps = snap_count;
-
- if (snap_count && allocated_snaps == snap_count) {
- int i;
-
- for (i = 0; i < snap_count; i++) {
- header->snapc->snaps[i] =
- le64_to_cpu(ondisk->snaps[i].id);
- header->snap_sizes[i] =
- le64_to_cpu(ondisk->snaps[i].image_size);
- }
-
- /* copy snapshot names */
- memcpy(header->snap_names, &ondisk->snaps[snap_count],
- header->snap_names_len);
- }
+ for (i = 0; i < snap_count; i++)
+ header->snapc->snaps[i] =
+ le64_to_cpu(ondisk->snaps[i].id);
return 0;
-err_sizes:
+out_err:
kfree(header->snap_sizes);
header->snap_sizes = NULL;
-err_names:
kfree(header->snap_names);
header->snap_names = NULL;
-err_snapc:
- kfree(header->snapc);
- header->snapc = NULL;
+ kfree(header->object_prefix);
+ header->object_prefix = NULL;
return -ENOMEM;
}
-static int snap_by_name(struct rbd_image_header *header, const char *snap_name,
- u64 *seq, u64 *size)
+static int snap_by_name(struct rbd_device *rbd_dev, const char *snap_name)
{
- int i;
- char *p = header->snap_names;
- for (i = 0; i < header->total_snaps; i++) {
- if (!strcmp(snap_name, p)) {
+ struct rbd_snap *snap;
- /* Found it. Pass back its id and/or size */
+ list_for_each_entry(snap, &rbd_dev->snaps, node) {
+ if (!strcmp(snap_name, snap->name)) {
+ rbd_dev->mapping.snap_id = snap->id;
+ rbd_dev->mapping.size = snap->size;
+ rbd_dev->mapping.features = snap->features;
- if (seq)
- *seq = header->snapc->snaps[i];
- if (size)
- *size = header->snap_sizes[i];
- return i;
+ return 0;
}
- p += strlen(p) + 1; /* Skip ahead to the next name */
}
+
return -ENOENT;
}
-static int rbd_header_set_snap(struct rbd_device *rbd_dev, u64 *size)
+static int rbd_dev_set_mapping(struct rbd_device *rbd_dev, char *snap_name)
{
int ret;
- down_write(&rbd_dev->header_rwsem);
-
- if (!memcmp(rbd_dev->snap_name, RBD_SNAP_HEAD_NAME,
+ if (!memcmp(snap_name, RBD_SNAP_HEAD_NAME,
sizeof (RBD_SNAP_HEAD_NAME))) {
- rbd_dev->snap_id = CEPH_NOSNAP;
- rbd_dev->snap_exists = false;
- rbd_dev->read_only = 0;
- if (size)
- *size = rbd_dev->header.image_size;
+ rbd_dev->mapping.snap_id = CEPH_NOSNAP;
+ rbd_dev->mapping.size = rbd_dev->header.image_size;
+ rbd_dev->mapping.features = rbd_dev->header.features;
+ rbd_dev->mapping.snap_exists = false;
+ rbd_dev->mapping.read_only = rbd_dev->rbd_opts.read_only;
+ ret = 0;
} else {
- u64 snap_id = 0;
-
- ret = snap_by_name(&rbd_dev->header, rbd_dev->snap_name,
- &snap_id, size);
+ ret = snap_by_name(rbd_dev, snap_name);
if (ret < 0)
goto done;
- rbd_dev->snap_id = snap_id;
- rbd_dev->snap_exists = true;
- rbd_dev->read_only = 1;
+ rbd_dev->mapping.snap_exists = true;
+ rbd_dev->mapping.read_only = true;
}
-
- ret = 0;
+ rbd_dev->mapping.snap_name = snap_name;
done:
- up_write(&rbd_dev->header_rwsem);
return ret;
}
static void rbd_header_free(struct rbd_image_header *header)
{
kfree(header->object_prefix);
+ header->object_prefix = NULL;
kfree(header->snap_sizes);
+ header->snap_sizes = NULL;
kfree(header->snap_names);
+ header->snap_names = NULL;
ceph_put_snap_context(header->snapc);
+ header->snapc = NULL;
}
-/*
- * get the actual striped segment name, offset and length
- */
-static u64 rbd_get_segment(struct rbd_image_header *header,
- const char *object_prefix,
- u64 ofs, u64 len,
- char *seg_name, u64 *segofs)
+static char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
+{
+ char *name;
+ u64 segment;
+ int ret;
+
+ name = kmalloc(RBD_MAX_SEG_NAME_LEN + 1, GFP_NOIO);
+ if (!name)
+ return NULL;
+ segment = offset >> rbd_dev->header.obj_order;
+ ret = snprintf(name, RBD_MAX_SEG_NAME_LEN, "%s.%012llx",
+ rbd_dev->header.object_prefix, segment);
+ if (ret < 0 || ret >= RBD_MAX_SEG_NAME_LEN) {
+ pr_err("error formatting segment name for #%llu (%d)\n",
+ segment, ret);
+ kfree(name);
+ name = NULL;
+ }
+
+ return name;
+}
+
+static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
{
- u64 seg = ofs >> header->obj_order;
+ u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
- if (seg_name)
- snprintf(seg_name, RBD_MAX_SEG_NAME_LEN,
- "%s.%012llx", object_prefix, seg);
+ return offset & (segment_size - 1);
+}
+
+static u64 rbd_segment_length(struct rbd_device *rbd_dev,
+ u64 offset, u64 length)
+{
+ u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
- ofs = ofs & ((1 << header->obj_order) - 1);
- len = min_t(u64, len, (1 << header->obj_order) - ofs);
+ offset &= segment_size - 1;
- if (segofs)
- *segofs = ofs;
+ rbd_assert(length <= U64_MAX - offset);
+ if (offset + length > segment_size)
+ length = segment_size - offset;
- return len;
+ return length;
}
static int rbd_get_num_segments(struct rbd_image_header *header,
u64 ofs, u64 len)
{
- u64 start_seg = ofs >> header->obj_order;
- u64 end_seg = (ofs + len - 1) >> header->obj_order;
+ u64 start_seg;
+ u64 end_seg;
+
+ if (!len)
+ return 0;
+ if (len - 1 > U64_MAX - ofs)
+ return -ERANGE;
+
+ start_seg = ofs >> header->obj_order;
+ end_seg = (ofs + len - 1) >> header->obj_order;
+
return end_seg - start_seg + 1;
}
@@ -724,7 +807,9 @@ static struct bio *bio_chain_clone(struct bio **old, struct bio **next,
struct bio_pair **bp,
int len, gfp_t gfpmask)
{
- struct bio *tmp, *old_chain = *old, *new_chain = NULL, *tail = NULL;
+ struct bio *old_chain = *old;
+ struct bio *new_chain = NULL;
+ struct bio *tail;
int total = 0;
if (*bp) {
@@ -733,9 +818,12 @@ static struct bio *bio_chain_clone(struct bio **old, struct bio **next,
}
while (old_chain && (total < len)) {
+ struct bio *tmp;
+
tmp = bio_kmalloc(gfpmask, old_chain->bi_max_vecs);
if (!tmp)
goto err_out;
+ gfpmask &= ~__GFP_WAIT; /* can't wait after the first */
if (total + old_chain->bi_size > len) {
struct bio_pair *bp;
@@ -763,24 +851,18 @@ static struct bio *bio_chain_clone(struct bio **old, struct bio **next,
}
tmp->bi_bdev = NULL;
- gfpmask &= ~__GFP_WAIT;
tmp->bi_next = NULL;
-
- if (!new_chain) {
- new_chain = tail = tmp;
- } else {
+ if (new_chain)
tail->bi_next = tmp;
- tail = tmp;
- }
+ else
+ new_chain = tmp;
+ tail = tmp;
old_chain = old_chain->bi_next;
total += tmp->bi_size;
}
- BUG_ON(total < len);
-
- if (tail)
- tail->bi_next = NULL;
+ rbd_assert(total == len);
*old = old_chain;
@@ -938,8 +1020,9 @@ static int rbd_do_request(struct request *rq,
layout->fl_stripe_count = cpu_to_le32(1);
layout->fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
layout->fl_pg_pool = cpu_to_le32(rbd_dev->pool_id);
- ceph_calc_raw_layout(osdc, layout, snapid, ofs, &len, &bno,
- req, ops);
+ ret = ceph_calc_raw_layout(osdc, layout, snapid, ofs, &len, &bno,
+ req, ops);
+ rbd_assert(ret == 0);
ceph_osdc_build_request(req, ofs, &len,
ops,
@@ -1030,8 +1113,8 @@ static int rbd_req_sync_op(struct rbd_device *rbd_dev,
int flags,
struct ceph_osd_req_op *ops,
const char *object_name,
- u64 ofs, u64 len,
- char *buf,
+ u64 ofs, u64 inbound_size,
+ char *inbound,
struct ceph_osd_request **linger_req,
u64 *ver)
{
@@ -1039,15 +1122,15 @@ static int rbd_req_sync_op(struct rbd_device *rbd_dev,
struct page **pages;
int num_pages;
- BUG_ON(ops == NULL);
+ rbd_assert(ops != NULL);
- num_pages = calc_pages_for(ofs , len);
+ num_pages = calc_pages_for(ofs, inbound_size);
pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
if (IS_ERR(pages))
return PTR_ERR(pages);
ret = rbd_do_request(NULL, rbd_dev, snapc, snapid,
- object_name, ofs, len, NULL,
+ object_name, ofs, inbound_size, NULL,
pages, num_pages,
flags,
ops,
@@ -1057,8 +1140,8 @@ static int rbd_req_sync_op(struct rbd_device *rbd_dev,
if (ret < 0)
goto done;
- if ((flags & CEPH_OSD_FLAG_READ) && buf)
- ret = ceph_copy_from_page_vector(pages, buf, ofs, ret);
+ if ((flags & CEPH_OSD_FLAG_READ) && inbound)
+ ret = ceph_copy_from_page_vector(pages, inbound, ofs, ret);
done:
ceph_release_page_vector(pages, num_pages);
@@ -1085,14 +1168,11 @@ static int rbd_do_op(struct request *rq,
struct ceph_osd_req_op *ops;
u32 payload_len;
- seg_name = kmalloc(RBD_MAX_SEG_NAME_LEN + 1, GFP_NOIO);
+ seg_name = rbd_segment_name(rbd_dev, ofs);
if (!seg_name)
return -ENOMEM;
-
- seg_len = rbd_get_segment(&rbd_dev->header,
- rbd_dev->header.object_prefix,
- ofs, len,
- seg_name, &seg_ofs);
+ seg_len = rbd_segment_length(rbd_dev, ofs, len);
+ seg_ofs = rbd_segment_offset(rbd_dev, ofs);
payload_len = (flags & CEPH_OSD_FLAG_WRITE ? seg_len : 0);
@@ -1104,7 +1184,7 @@ static int rbd_do_op(struct request *rq,
/* we've taken care of segment sizes earlier when we
cloned the bios. We should never have a segment
truncated at this point */
- BUG_ON(seg_len < len);
+ rbd_assert(seg_len == len);
ret = rbd_do_request(rq, rbd_dev, snapc, snapid,
seg_name, seg_ofs, seg_len,
@@ -1306,89 +1386,36 @@ static int rbd_req_sync_unwatch(struct rbd_device *rbd_dev)
return ret;
}
-struct rbd_notify_info {
- struct rbd_device *rbd_dev;
-};
-
-static void rbd_notify_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
-{
- struct rbd_device *rbd_dev = (struct rbd_device *)data;
- if (!rbd_dev)
- return;
-
- dout("rbd_notify_cb %s notify_id=%llu opcode=%u\n",
- rbd_dev->header_name, (unsigned long long) notify_id,
- (unsigned int) opcode);
-}
-
/*
- * Request sync osd notify
- */
-static int rbd_req_sync_notify(struct rbd_device *rbd_dev)
-{
- struct ceph_osd_req_op *ops;
- struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
- struct ceph_osd_event *event;
- struct rbd_notify_info info;
- int payload_len = sizeof(u32) + sizeof(u32);
- int ret;
-
- ops = rbd_create_rw_ops(1, CEPH_OSD_OP_NOTIFY, payload_len);
- if (!ops)
- return -ENOMEM;
-
- info.rbd_dev = rbd_dev;
-
- ret = ceph_osdc_create_event(osdc, rbd_notify_cb, 1,
- (void *)&info, &event);
- if (ret < 0)
- goto fail;
-
- ops[0].watch.ver = 1;
- ops[0].watch.flag = 1;
- ops[0].watch.cookie = event->cookie;
- ops[0].watch.prot_ver = RADOS_NOTIFY_VER;
- ops[0].watch.timeout = 12;
-
- ret = rbd_req_sync_op(rbd_dev, NULL,
- CEPH_NOSNAP,
- CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
- ops,
- rbd_dev->header_name,
- 0, 0, NULL, NULL, NULL);
- if (ret < 0)
- goto fail_event;
-
- ret = ceph_osdc_wait_event(event, CEPH_OSD_TIMEOUT_DEFAULT);
- dout("ceph_osdc_wait_event returned %d\n", ret);
- rbd_destroy_ops(ops);
- return 0;
-
-fail_event:
- ceph_osdc_cancel_event(event);
-fail:
- rbd_destroy_ops(ops);
- return ret;
-}
-
-/*
- * Request sync osd read
+ * Synchronous osd object method call
*/
static int rbd_req_sync_exec(struct rbd_device *rbd_dev,
const char *object_name,
const char *class_name,
const char *method_name,
- const char *data,
- int len,
+ const char *outbound,
+ size_t outbound_size,
+ char *inbound,
+ size_t inbound_size,
+ int flags,
u64 *ver)
{
struct ceph_osd_req_op *ops;
int class_name_len = strlen(class_name);
int method_name_len = strlen(method_name);
+ int payload_size;
int ret;
- ops = rbd_create_rw_ops(1, CEPH_OSD_OP_CALL,
- class_name_len + method_name_len + len);
+ /*
+ * Any input parameters required by the method we're calling
+ * will be sent along with the class and method names as
+ * part of the message payload. That data and its size are
+ * supplied via the indata and indata_len fields (named from
+ * the perspective of the server side) in the OSD request
+ * operation.
+ */
+ payload_size = class_name_len + method_name_len + outbound_size;
+ ops = rbd_create_rw_ops(1, CEPH_OSD_OP_CALL, payload_size);
if (!ops)
return -ENOMEM;
@@ -1397,14 +1424,14 @@ static int rbd_req_sync_exec(struct rbd_device *rbd_dev,
ops[0].cls.method_name = method_name;
ops[0].cls.method_len = (__u8) method_name_len;
ops[0].cls.argc = 0;
- ops[0].cls.indata = data;
- ops[0].cls.indata_len = len;
+ ops[0].cls.indata = outbound;
+ ops[0].cls.indata_len = outbound_size;
ret = rbd_req_sync_op(rbd_dev, NULL,
CEPH_NOSNAP,
- CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
- ops,
- object_name, 0, 0, NULL, NULL, ver);
+ flags, ops,
+ object_name, 0, inbound_size, inbound,
+ NULL, ver);
rbd_destroy_ops(ops);
@@ -1446,10 +1473,6 @@ static void rbd_rq_fn(struct request_queue *q)
struct rbd_req_coll *coll;
struct ceph_snap_context *snapc;
- /* peek at request from block layer */
- if (!rq)
- break;
-
dout("fetched request\n");
/* filter out block requests we don't understand */
@@ -1464,7 +1487,7 @@ static void rbd_rq_fn(struct request_queue *q)
size = blk_rq_bytes(rq);
ofs = blk_rq_pos(rq) * SECTOR_SIZE;
rq_bio = rq->bio;
- if (do_write && rbd_dev->read_only) {
+ if (do_write && rbd_dev->mapping.read_only) {
__blk_end_request_all(rq, -EROFS);
continue;
}
@@ -1473,7 +1496,8 @@ static void rbd_rq_fn(struct request_queue *q)
down_read(&rbd_dev->header_rwsem);
- if (rbd_dev->snap_id != CEPH_NOSNAP && !rbd_dev->snap_exists) {
+ if (rbd_dev->mapping.snap_id != CEPH_NOSNAP &&
+ !rbd_dev->mapping.snap_exists) {
up_read(&rbd_dev->header_rwsem);
dout("request for non-existent snapshot");
spin_lock_irq(q->queue_lock);
@@ -1490,6 +1514,12 @@ static void rbd_rq_fn(struct request_queue *q)
size, (unsigned long long) blk_rq_pos(rq) * SECTOR_SIZE);
num_segs = rbd_get_num_segments(&rbd_dev->header, ofs, size);
+ if (num_segs <= 0) {
+ spin_lock_irq(q->queue_lock);
+ __blk_end_request_all(rq, num_segs);
+ ceph_put_snap_context(snapc);
+ continue;
+ }
coll = rbd_alloc_coll(num_segs);
if (!coll) {
spin_lock_irq(q->queue_lock);
@@ -1501,10 +1531,7 @@ static void rbd_rq_fn(struct request_queue *q)
do {
/* a bio clone to be passed down to OSD req */
dout("rq->bio->bi_vcnt=%hu\n", rq->bio->bi_vcnt);
- op_size = rbd_get_segment(&rbd_dev->header,
- rbd_dev->header.object_prefix,
- ofs, size,
- NULL, NULL);
+ op_size = rbd_segment_length(rbd_dev, ofs, size);
kref_get(&coll->kref);
bio = bio_chain_clone(&rq_bio, &next_bio, &bp,
op_size, GFP_ATOMIC);
@@ -1524,7 +1551,7 @@ static void rbd_rq_fn(struct request_queue *q)
coll, cur_seg);
else
rbd_req_read(rq, rbd_dev,
- rbd_dev->snap_id,
+ rbd_dev->mapping.snap_id,
ofs,
op_size, bio,
coll, cur_seg);
@@ -1580,8 +1607,6 @@ static void rbd_free_disk(struct rbd_device *rbd_dev)
if (!disk)
return;
- rbd_header_free(&rbd_dev->header);
-
if (disk->flags & GENHD_FL_UP)
del_gendisk(disk);
if (disk->queue)
@@ -1590,105 +1615,96 @@ static void rbd_free_disk(struct rbd_device *rbd_dev)
}
/*
- * reload the ondisk the header
+ * Read the complete header for the given rbd device.
+ *
+ * Returns a pointer to a dynamically-allocated buffer containing
+ * the complete and validated header. Caller can pass the address
+ * of a variable that will be filled in with the version of the
+ * header object at the time it was read.
+ *
+ * Returns a pointer-coded errno if a failure occurs.
*/
-static int rbd_read_header(struct rbd_device *rbd_dev,
- struct rbd_image_header *header)
+static struct rbd_image_header_ondisk *
+rbd_dev_v1_header_read(struct rbd_device *rbd_dev, u64 *version)
{
- ssize_t rc;
- struct rbd_image_header_ondisk *dh;
+ struct rbd_image_header_ondisk *ondisk = NULL;
u32 snap_count = 0;
- u64 ver;
- size_t len;
+ u64 names_size = 0;
+ u32 want_count;
+ int ret;
/*
- * First reads the fixed-size header to determine the number
- * of snapshots, then re-reads it, along with all snapshot
- * records as well as their stored names.
+ * The complete header will include an array of its 64-bit
+ * snapshot ids, followed by the names of those snapshots as
+ * a contiguous block of NUL-terminated strings. Note that
+ * the number of snapshots could change by the time we read
+ * it in, in which case we re-read it.
*/
- len = sizeof (*dh);
- while (1) {
- dh = kmalloc(len, GFP_KERNEL);
- if (!dh)
- return -ENOMEM;
-
- rc = rbd_req_sync_read(rbd_dev,
- CEPH_NOSNAP,
+ do {
+ size_t size;
+
+ kfree(ondisk);
+
+ size = sizeof (*ondisk);
+ size += snap_count * sizeof (struct rbd_image_snap_ondisk);
+ size += names_size;
+ ondisk = kmalloc(size, GFP_KERNEL);
+ if (!ondisk)
+ return ERR_PTR(-ENOMEM);
+
+ ret = rbd_req_sync_read(rbd_dev, CEPH_NOSNAP,
rbd_dev->header_name,
- 0, len,
- (char *)dh, &ver);
- if (rc < 0)
- goto out_dh;
-
- rc = rbd_header_from_disk(header, dh, snap_count);
- if (rc < 0) {
- if (rc == -ENXIO)
- pr_warning("unrecognized header format"
- " for image %s\n",
- rbd_dev->image_name);
- goto out_dh;
+ 0, size,
+ (char *) ondisk, version);
+
+ if (ret < 0)
+ goto out_err;
+ if (WARN_ON((size_t) ret < size)) {
+ ret = -ENXIO;
+ pr_warning("short header read for image %s"
+ " (want %zd got %d)\n",
+ rbd_dev->image_name, size, ret);
+ goto out_err;
+ }
+ if (!rbd_dev_ondisk_valid(ondisk)) {
+ ret = -ENXIO;
+ pr_warning("invalid header for image %s\n",
+ rbd_dev->image_name);
+ goto out_err;
}
- if (snap_count == header->total_snaps)
- break;
+ names_size = le64_to_cpu(ondisk->snap_names_len);
+ want_count = snap_count;
+ snap_count = le32_to_cpu(ondisk->snap_count);
+ } while (snap_count != want_count);
- snap_count = header->total_snaps;
- len = sizeof (*dh) +
- snap_count * sizeof(struct rbd_image_snap_ondisk) +
- header->snap_names_len;
+ return ondisk;
- rbd_header_free(header);
- kfree(dh);
- }
- header->obj_version = ver;
+out_err:
+ kfree(ondisk);
-out_dh:
- kfree(dh);
- return rc;
+ return ERR_PTR(ret);
}
/*
- * create a snapshot
+ * reload the ondisk the header
*/
-static int rbd_header_add_snap(struct rbd_device *rbd_dev,
- const char *snap_name,
- gfp_t gfp_flags)
+static int rbd_read_header(struct rbd_device *rbd_dev,
+ struct rbd_image_header *header)
{
- int name_len = strlen(snap_name);
- u64 new_snapid;
+ struct rbd_image_header_ondisk *ondisk;
+ u64 ver = 0;
int ret;
- void *data, *p, *e;
- struct ceph_mon_client *monc;
-
- /* we should create a snapshot only if we're pointing at the head */
- if (rbd_dev->snap_id != CEPH_NOSNAP)
- return -EINVAL;
- monc = &rbd_dev->rbd_client->client->monc;
- ret = ceph_monc_create_snapid(monc, rbd_dev->pool_id, &new_snapid);
- dout("created snapid=%llu\n", (unsigned long long) new_snapid);
- if (ret < 0)
- return ret;
-
- data = kmalloc(name_len + 16, gfp_flags);
- if (!data)
- return -ENOMEM;
+ ondisk = rbd_dev_v1_header_read(rbd_dev, &ver);
+ if (IS_ERR(ondisk))
+ return PTR_ERR(ondisk);
+ ret = rbd_header_from_disk(header, ondisk);
+ if (ret >= 0)
+ header->obj_version = ver;
+ kfree(ondisk);
- p = data;
- e = data + name_len + 16;
-
- ceph_encode_string_safe(&p, e, snap_name, name_len, bad);
- ceph_encode_64_safe(&p, e, new_snapid, bad);
-
- ret = rbd_req_sync_exec(rbd_dev, rbd_dev->header_name,
- "rbd", "snap_add",
- data, p - data, NULL);
-
- kfree(data);
-
- return ret < 0 ? ret : 0;
-bad:
- return -ERANGE;
+ return ret;
}
static void __rbd_remove_all_snaps(struct rbd_device *rbd_dev)
@@ -1715,11 +1731,15 @@ static int __rbd_refresh_header(struct rbd_device *rbd_dev, u64 *hver)
down_write(&rbd_dev->header_rwsem);
/* resized? */
- if (rbd_dev->snap_id == CEPH_NOSNAP) {
+ if (rbd_dev->mapping.snap_id == CEPH_NOSNAP) {
sector_t size = (sector_t) h.image_size / SECTOR_SIZE;
- dout("setting size to %llu sectors", (unsigned long long) size);
- set_capacity(rbd_dev->disk, size);
+ if (size != (sector_t) rbd_dev->mapping.size) {
+ dout("setting size to %llu sectors",
+ (unsigned long long) size);
+ rbd_dev->mapping.size = (u64) size;
+ set_capacity(rbd_dev->disk, size);
+ }
}
/* rbd_dev->header.object_prefix shouldn't change */
@@ -1732,16 +1752,16 @@ static int __rbd_refresh_header(struct rbd_device *rbd_dev, u64 *hver)
*hver = h.obj_version;
rbd_dev->header.obj_version = h.obj_version;
rbd_dev->header.image_size = h.image_size;
- rbd_dev->header.total_snaps = h.total_snaps;
rbd_dev->header.snapc = h.snapc;
rbd_dev->header.snap_names = h.snap_names;
- rbd_dev->header.snap_names_len = h.snap_names_len;
rbd_dev->header.snap_sizes = h.snap_sizes;
/* Free the extra copy of the object prefix */
WARN_ON(strcmp(rbd_dev->header.object_prefix, h.object_prefix));
kfree(h.object_prefix);
- ret = __rbd_init_snaps_header(rbd_dev);
+ ret = rbd_dev_snaps_update(rbd_dev);
+ if (!ret)
+ ret = rbd_dev_snaps_register(rbd_dev);
up_write(&rbd_dev->header_rwsem);
@@ -1763,29 +1783,12 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
{
struct gendisk *disk;
struct request_queue *q;
- int rc;
u64 segment_size;
- u64 total_size = 0;
-
- /* contact OSD, request size info about the object being mapped */
- rc = rbd_read_header(rbd_dev, &rbd_dev->header);
- if (rc)
- return rc;
-
- /* no need to lock here, as rbd_dev is not registered yet */
- rc = __rbd_init_snaps_header(rbd_dev);
- if (rc)
- return rc;
-
- rc = rbd_header_set_snap(rbd_dev, &total_size);
- if (rc)
- return rc;
/* create gendisk info */
- rc = -ENOMEM;
disk = alloc_disk(RBD_MINORS_PER_MAJOR);
if (!disk)
- goto out;
+ return -ENOMEM;
snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
rbd_dev->dev_id);
@@ -1795,7 +1798,6 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
disk->private_data = rbd_dev;
/* init rq */
- rc = -ENOMEM;
q = blk_init_queue(rbd_rq_fn, &rbd_dev->lock);
if (!q)
goto out_disk;
@@ -1816,20 +1818,14 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
q->queuedata = rbd_dev;
rbd_dev->disk = disk;
- rbd_dev->q = q;
- /* finally, announce the disk to the world */
- set_capacity(disk, total_size / SECTOR_SIZE);
- add_disk(disk);
+ set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
- pr_info("%s: added with size 0x%llx\n",
- disk->disk_name, (unsigned long long)total_size);
return 0;
-
out_disk:
put_disk(disk);
-out:
- return rc;
+
+ return -ENOMEM;
}
/*
@@ -1854,6 +1850,19 @@ static ssize_t rbd_size_show(struct device *dev,
return sprintf(buf, "%llu\n", (unsigned long long) size * SECTOR_SIZE);
}
+/*
+ * Note this shows the features for whatever's mapped, which is not
+ * necessarily the base image.
+ */
+static ssize_t rbd_features_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
+
+ return sprintf(buf, "0x%016llx\n",
+ (unsigned long long) rbd_dev->mapping.features);
+}
+
static ssize_t rbd_major_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -1895,13 +1904,25 @@ static ssize_t rbd_name_show(struct device *dev,
return sprintf(buf, "%s\n", rbd_dev->image_name);
}
+static ssize_t rbd_image_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
+
+ return sprintf(buf, "%s\n", rbd_dev->image_id);
+}
+
+/*
+ * Shows the name of the currently-mapped snapshot (or
+ * RBD_SNAP_HEAD_NAME for the base image).
+ */
static ssize_t rbd_snap_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
- return sprintf(buf, "%s\n", rbd_dev->snap_name);
+ return sprintf(buf, "%s\n", rbd_dev->mapping.snap_name);
}
static ssize_t rbd_image_refresh(struct device *dev,
@@ -1918,25 +1939,27 @@ static ssize_t rbd_image_refresh(struct device *dev,
}
static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
+static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
+static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
-static DEVICE_ATTR(create_snap, S_IWUSR, NULL, rbd_snap_add);
static struct attribute *rbd_attrs[] = {
&dev_attr_size.attr,
+ &dev_attr_features.attr,
&dev_attr_major.attr,
&dev_attr_client_id.attr,
&dev_attr_pool.attr,
&dev_attr_pool_id.attr,
&dev_attr_name.attr,
+ &dev_attr_image_id.attr,
&dev_attr_current_snap.attr,
&dev_attr_refresh.attr,
- &dev_attr_create_snap.attr,
NULL
};
@@ -1982,12 +2005,24 @@ static ssize_t rbd_snap_id_show(struct device *dev,
return sprintf(buf, "%llu\n", (unsigned long long)snap->id);
}
+static ssize_t rbd_snap_features_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
+
+ return sprintf(buf, "0x%016llx\n",
+ (unsigned long long) snap->features);
+}
+
static DEVICE_ATTR(snap_size, S_IRUGO, rbd_snap_size_show, NULL);
static DEVICE_ATTR(snap_id, S_IRUGO, rbd_snap_id_show, NULL);
+static DEVICE_ATTR(snap_features, S_IRUGO, rbd_snap_features_show, NULL);
static struct attribute *rbd_snap_attrs[] = {
&dev_attr_snap_size.attr,
&dev_attr_snap_id.attr,
+ &dev_attr_snap_features.attr,
NULL,
};
@@ -2012,10 +2047,21 @@ static struct device_type rbd_snap_device_type = {
.release = rbd_snap_dev_release,
};
+static bool rbd_snap_registered(struct rbd_snap *snap)
+{
+ bool ret = snap->dev.type == &rbd_snap_device_type;
+ bool reg = device_is_registered(&snap->dev);
+
+ rbd_assert(!ret ^ reg);
+
+ return ret;
+}
+
static void __rbd_remove_snap_dev(struct rbd_snap *snap)
{
list_del(&snap->node);
- device_unregister(&snap->dev);
+ if (device_is_registered(&snap->dev))
+ device_unregister(&snap->dev);
}
static int rbd_register_snap_dev(struct rbd_snap *snap,
@@ -2028,13 +2074,17 @@ static int rbd_register_snap_dev(struct rbd_snap *snap,
dev->parent = parent;
dev->release = rbd_snap_dev_release;
dev_set_name(dev, "snap_%s", snap->name);
+ dout("%s: registering device for snapshot %s\n", __func__, snap->name);
+
ret = device_register(dev);
return ret;
}
static struct rbd_snap *__rbd_add_snap_dev(struct rbd_device *rbd_dev,
- int i, const char *name)
+ const char *snap_name,
+ u64 snap_id, u64 snap_size,
+ u64 snap_features)
{
struct rbd_snap *snap;
int ret;
@@ -2044,17 +2094,13 @@ static struct rbd_snap *__rbd_add_snap_dev(struct rbd_device *rbd_dev,
return ERR_PTR(-ENOMEM);
ret = -ENOMEM;
- snap->name = kstrdup(name, GFP_KERNEL);
+ snap->name = kstrdup(snap_name, GFP_KERNEL);
if (!snap->name)
goto err;
- snap->size = rbd_dev->header.snap_sizes[i];
- snap->id = rbd_dev->header.snapc->snaps[i];
- if (device_is_registered(&rbd_dev->dev)) {
- ret = rbd_register_snap_dev(snap, &rbd_dev->dev);
- if (ret < 0)
- goto err;
- }
+ snap->id = snap_id;
+ snap->size = snap_size;
+ snap->features = snap_features;
return snap;
@@ -2065,128 +2111,439 @@ err:
return ERR_PTR(ret);
}
+static char *rbd_dev_v1_snap_info(struct rbd_device *rbd_dev, u32 which,
+ u64 *snap_size, u64 *snap_features)
+{
+ char *snap_name;
+
+ rbd_assert(which < rbd_dev->header.snapc->num_snaps);
+
+ *snap_size = rbd_dev->header.snap_sizes[which];
+ *snap_features = 0; /* No features for v1 */
+
+ /* Skip over names until we find the one we are looking for */
+
+ snap_name = rbd_dev->header.snap_names;
+ while (which--)
+ snap_name += strlen(snap_name) + 1;
+
+ return snap_name;
+}
+
/*
- * search for the previous snap in a null delimited string list
+ * Get the size and object order for an image snapshot, or if
+ * snap_id is CEPH_NOSNAP, gets this information for the base
+ * image.
*/
-const char *rbd_prev_snap_name(const char *name, const char *start)
+static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
+ u8 *order, u64 *snap_size)
{
- if (name < start + 2)
- return NULL;
+ __le64 snapid = cpu_to_le64(snap_id);
+ int ret;
+ struct {
+ u8 order;
+ __le64 size;
+ } __attribute__ ((packed)) size_buf = { 0 };
+
+ ret = rbd_req_sync_exec(rbd_dev, rbd_dev->header_name,
+ "rbd", "get_size",
+ (char *) &snapid, sizeof (snapid),
+ (char *) &size_buf, sizeof (size_buf),
+ CEPH_OSD_FLAG_READ, NULL);
+ dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret);
+ if (ret < 0)
+ return ret;
+
+ *order = size_buf.order;
+ *snap_size = le64_to_cpu(size_buf.size);
+
+ dout(" snap_id 0x%016llx order = %u, snap_size = %llu\n",
+ (unsigned long long) snap_id, (unsigned int) *order,
+ (unsigned long long) *snap_size);
+
+ return 0;
+}
+
+static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
+{
+ return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
+ &rbd_dev->header.obj_order,
+ &rbd_dev->header.image_size);
+}
+
+static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
+{
+ void *reply_buf;
+ int ret;
+ void *p;
+
+ reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
+ if (!reply_buf)
+ return -ENOMEM;
+
+ ret = rbd_req_sync_exec(rbd_dev, rbd_dev->header_name,
+ "rbd", "get_object_prefix",
+ NULL, 0,
+ reply_buf, RBD_OBJ_PREFIX_LEN_MAX,
+ CEPH_OSD_FLAG_READ, NULL);
+ dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret);
+ if (ret < 0)
+ goto out;
+
+ p = reply_buf;
+ rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
+ p + RBD_OBJ_PREFIX_LEN_MAX,
+ NULL, GFP_NOIO);
+
+ if (IS_ERR(rbd_dev->header.object_prefix)) {
+ ret = PTR_ERR(rbd_dev->header.object_prefix);
+ rbd_dev->header.object_prefix = NULL;
+ } else {
+ dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
+ }
- name -= 2;
- while (*name) {
- if (name == start)
- return start;
- name--;
+out:
+ kfree(reply_buf);
+
+ return ret;
+}
+
+static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
+ u64 *snap_features)
+{
+ __le64 snapid = cpu_to_le64(snap_id);
+ struct {
+ __le64 features;
+ __le64 incompat;
+ } features_buf = { 0 };
+ int ret;
+
+ ret = rbd_req_sync_exec(rbd_dev, rbd_dev->header_name,
+ "rbd", "get_features",
+ (char *) &snapid, sizeof (snapid),
+ (char *) &features_buf, sizeof (features_buf),
+ CEPH_OSD_FLAG_READ, NULL);
+ dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret);
+ if (ret < 0)
+ return ret;
+ *snap_features = le64_to_cpu(features_buf.features);
+
+ dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
+ (unsigned long long) snap_id,
+ (unsigned long long) *snap_features,
+ (unsigned long long) le64_to_cpu(features_buf.incompat));
+
+ return 0;
+}
+
+static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
+{
+ return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
+ &rbd_dev->header.features);
+}
+
+static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev, u64 *ver)
+{
+ size_t size;
+ int ret;
+ void *reply_buf;
+ void *p;
+ void *end;
+ u64 seq;
+ u32 snap_count;
+ struct ceph_snap_context *snapc;
+ u32 i;
+
+ /*
+ * We'll need room for the seq value (maximum snapshot id),
+ * snapshot count, and array of that many snapshot ids.
+ * For now we have a fixed upper limit on the number we're
+ * prepared to receive.
+ */
+ size = sizeof (__le64) + sizeof (__le32) +
+ RBD_MAX_SNAP_COUNT * sizeof (__le64);
+ reply_buf = kzalloc(size, GFP_KERNEL);
+ if (!reply_buf)
+ return -ENOMEM;
+
+ ret = rbd_req_sync_exec(rbd_dev, rbd_dev->header_name,
+ "rbd", "get_snapcontext",
+ NULL, 0,
+ reply_buf, size,
+ CEPH_OSD_FLAG_READ, ver);
+ dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret);
+ if (ret < 0)
+ goto out;
+
+ ret = -ERANGE;
+ p = reply_buf;
+ end = (char *) reply_buf + size;
+ ceph_decode_64_safe(&p, end, seq, out);
+ ceph_decode_32_safe(&p, end, snap_count, out);
+
+ /*
+ * Make sure the reported number of snapshot ids wouldn't go
+ * beyond the end of our buffer. But before checking that,
+ * make sure the computed size of the snapshot context we
+ * allocate is representable in a size_t.
+ */
+ if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
+ / sizeof (u64)) {
+ ret = -EINVAL;
+ goto out;
}
- return name + 1;
+ if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
+ goto out;
+
+ size = sizeof (struct ceph_snap_context) +
+ snap_count * sizeof (snapc->snaps[0]);
+ snapc = kmalloc(size, GFP_KERNEL);
+ if (!snapc) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ atomic_set(&snapc->nref, 1);
+ snapc->seq = seq;
+ snapc->num_snaps = snap_count;
+ for (i = 0; i < snap_count; i++)
+ snapc->snaps[i] = ceph_decode_64(&p);
+
+ rbd_dev->header.snapc = snapc;
+
+ dout(" snap context seq = %llu, snap_count = %u\n",
+ (unsigned long long) seq, (unsigned int) snap_count);
+
+out:
+ kfree(reply_buf);
+
+ return 0;
}
-/*
- * compare the old list of snapshots that we have to what's in the header
- * and update it accordingly. Note that the header holds the snapshots
- * in a reverse order (from newest to oldest) and we need to go from
- * older to new so that we don't get a duplicate snap name when
- * doing the process (e.g., removed snapshot and recreated a new
- * one with the same name.
- */
-static int __rbd_init_snaps_header(struct rbd_device *rbd_dev)
+static char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, u32 which)
{
- const char *name, *first_name;
- int i = rbd_dev->header.total_snaps;
- struct rbd_snap *snap, *old_snap = NULL;
- struct list_head *p, *n;
+ size_t size;
+ void *reply_buf;
+ __le64 snap_id;
+ int ret;
+ void *p;
+ void *end;
+ size_t snap_name_len;
+ char *snap_name;
+
+ size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
+ reply_buf = kmalloc(size, GFP_KERNEL);
+ if (!reply_buf)
+ return ERR_PTR(-ENOMEM);
- first_name = rbd_dev->header.snap_names;
- name = first_name + rbd_dev->header.snap_names_len;
+ snap_id = cpu_to_le64(rbd_dev->header.snapc->snaps[which]);
+ ret = rbd_req_sync_exec(rbd_dev, rbd_dev->header_name,
+ "rbd", "get_snapshot_name",
+ (char *) &snap_id, sizeof (snap_id),
+ reply_buf, size,
+ CEPH_OSD_FLAG_READ, NULL);
+ dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret);
+ if (ret < 0)
+ goto out;
- list_for_each_prev_safe(p, n, &rbd_dev->snaps) {
- u64 cur_id;
+ p = reply_buf;
+ end = (char *) reply_buf + size;
+ snap_name_len = 0;
+ snap_name = ceph_extract_encoded_string(&p, end, &snap_name_len,
+ GFP_KERNEL);
+ if (IS_ERR(snap_name)) {
+ ret = PTR_ERR(snap_name);
+ goto out;
+ } else {
+ dout(" snap_id 0x%016llx snap_name = %s\n",
+ (unsigned long long) le64_to_cpu(snap_id), snap_name);
+ }
+ kfree(reply_buf);
- old_snap = list_entry(p, struct rbd_snap, node);
+ return snap_name;
+out:
+ kfree(reply_buf);
- if (i)
- cur_id = rbd_dev->header.snapc->snaps[i - 1];
+ return ERR_PTR(ret);
+}
- if (!i || old_snap->id < cur_id) {
- /*
- * old_snap->id was skipped, thus was
- * removed. If this rbd_dev is mapped to
- * the removed snapshot, record that it no
- * longer exists, to prevent further I/O.
- */
- if (rbd_dev->snap_id == old_snap->id)
- rbd_dev->snap_exists = false;
- __rbd_remove_snap_dev(old_snap);
- continue;
- }
- if (old_snap->id == cur_id) {
- /* we have this snapshot already */
- i--;
- name = rbd_prev_snap_name(name, first_name);
+static char *rbd_dev_v2_snap_info(struct rbd_device *rbd_dev, u32 which,
+ u64 *snap_size, u64 *snap_features)
+{
+ __le64 snap_id;
+ u8 order;
+ int ret;
+
+ snap_id = rbd_dev->header.snapc->snaps[which];
+ ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, &order, snap_size);
+ if (ret)
+ return ERR_PTR(ret);
+ ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, snap_features);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return rbd_dev_v2_snap_name(rbd_dev, which);
+}
+
+static char *rbd_dev_snap_info(struct rbd_device *rbd_dev, u32 which,
+ u64 *snap_size, u64 *snap_features)
+{
+ if (rbd_dev->image_format == 1)
+ return rbd_dev_v1_snap_info(rbd_dev, which,
+ snap_size, snap_features);
+ if (rbd_dev->image_format == 2)
+ return rbd_dev_v2_snap_info(rbd_dev, which,
+ snap_size, snap_features);
+ return ERR_PTR(-EINVAL);
+}
+
+/*
+ * Scan the rbd device's current snapshot list and compare it to the
+ * newly-received snapshot context. Remove any existing snapshots
+ * not present in the new snapshot context. Add a new snapshot for
+ * any snaphots in the snapshot context not in the current list.
+ * And verify there are no changes to snapshots we already know
+ * about.
+ *
+ * Assumes the snapshots in the snapshot context are sorted by
+ * snapshot id, highest id first. (Snapshots in the rbd_dev's list
+ * are also maintained in that order.)
+ */
+static int rbd_dev_snaps_update(struct rbd_device *rbd_dev)
+{
+ struct ceph_snap_context *snapc = rbd_dev->header.snapc;
+ const u32 snap_count = snapc->num_snaps;
+ struct list_head *head = &rbd_dev->snaps;
+ struct list_head *links = head->next;
+ u32 index = 0;
+
+ dout("%s: snap count is %u\n", __func__, (unsigned int) snap_count);
+ while (index < snap_count || links != head) {
+ u64 snap_id;
+ struct rbd_snap *snap;
+ char *snap_name;
+ u64 snap_size = 0;
+ u64 snap_features = 0;
+
+ snap_id = index < snap_count ? snapc->snaps[index]
+ : CEPH_NOSNAP;
+ snap = links != head ? list_entry(links, struct rbd_snap, node)
+ : NULL;
+ rbd_assert(!snap || snap->id != CEPH_NOSNAP);
+
+ if (snap_id == CEPH_NOSNAP || (snap && snap->id > snap_id)) {
+ struct list_head *next = links->next;
+
+ /* Existing snapshot not in the new snap context */
+
+ if (rbd_dev->mapping.snap_id == snap->id)
+ rbd_dev->mapping.snap_exists = false;
+ __rbd_remove_snap_dev(snap);
+ dout("%ssnap id %llu has been removed\n",
+ rbd_dev->mapping.snap_id == snap->id ?
+ "mapped " : "",
+ (unsigned long long) snap->id);
+
+ /* Done with this list entry; advance */
+
+ links = next;
continue;
}
- for (; i > 0;
- i--, name = rbd_prev_snap_name(name, first_name)) {
- if (!name) {
- WARN_ON(1);
- return -EINVAL;
+
+ snap_name = rbd_dev_snap_info(rbd_dev, index,
+ &snap_size, &snap_features);
+ if (IS_ERR(snap_name))
+ return PTR_ERR(snap_name);
+
+ dout("entry %u: snap_id = %llu\n", (unsigned int) snap_count,
+ (unsigned long long) snap_id);
+ if (!snap || (snap_id != CEPH_NOSNAP && snap->id < snap_id)) {
+ struct rbd_snap *new_snap;
+
+ /* We haven't seen this snapshot before */
+
+ new_snap = __rbd_add_snap_dev(rbd_dev, snap_name,
+ snap_id, snap_size, snap_features);
+ if (IS_ERR(new_snap)) {
+ int err = PTR_ERR(new_snap);
+
+ dout(" failed to add dev, error %d\n", err);
+
+ return err;
}
- cur_id = rbd_dev->header.snapc->snaps[i];
- /* snapshot removal? handle it above */
- if (cur_id >= old_snap->id)
- break;
- /* a new snapshot */
- snap = __rbd_add_snap_dev(rbd_dev, i - 1, name);
- if (IS_ERR(snap))
- return PTR_ERR(snap);
-
- /* note that we add it backward so using n and not p */
- list_add(&snap->node, n);
- p = &snap->node;
+
+ /* New goes before existing, or at end of list */
+
+ dout(" added dev%s\n", snap ? "" : " at end\n");
+ if (snap)
+ list_add_tail(&new_snap->node, &snap->node);
+ else
+ list_add_tail(&new_snap->node, head);
+ } else {
+ /* Already have this one */
+
+ dout(" already present\n");
+
+ rbd_assert(snap->size == snap_size);
+ rbd_assert(!strcmp(snap->name, snap_name));
+ rbd_assert(snap->features == snap_features);
+
+ /* Done with this list entry; advance */
+
+ links = links->next;
}
+
+ /* Advance to the next entry in the snapshot context */
+
+ index++;
}
- /* we're done going over the old snap list, just add what's left */
- for (; i > 0; i--) {
- name = rbd_prev_snap_name(name, first_name);
- if (!name) {
- WARN_ON(1);
- return -EINVAL;
+ dout("%s: done\n", __func__);
+
+ return 0;
+}
+
+/*
+ * Scan the list of snapshots and register the devices for any that
+ * have not already been registered.
+ */
+static int rbd_dev_snaps_register(struct rbd_device *rbd_dev)
+{
+ struct rbd_snap *snap;
+ int ret = 0;
+
+ dout("%s called\n", __func__);
+ if (WARN_ON(!device_is_registered(&rbd_dev->dev)))
+ return -EIO;
+
+ list_for_each_entry(snap, &rbd_dev->snaps, node) {
+ if (!rbd_snap_registered(snap)) {
+ ret = rbd_register_snap_dev(snap, &rbd_dev->dev);
+ if (ret < 0)
+ break;
}
- snap = __rbd_add_snap_dev(rbd_dev, i - 1, name);
- if (IS_ERR(snap))
- return PTR_ERR(snap);
- list_add(&snap->node, &rbd_dev->snaps);
}
+ dout("%s: returning %d\n", __func__, ret);
- return 0;
+ return ret;
}
static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
{
- int ret;
struct device *dev;
- struct rbd_snap *snap;
+ int ret;
mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
- dev = &rbd_dev->dev;
+ dev = &rbd_dev->dev;
dev->bus = &rbd_bus_type;
dev->type = &rbd_device_type;
dev->parent = &rbd_root_dev;
dev->release = rbd_dev_release;
dev_set_name(dev, "%d", rbd_dev->dev_id);
ret = device_register(dev);
- if (ret < 0)
- goto out;
- list_for_each_entry(snap, &rbd_dev->snaps, node) {
- ret = rbd_register_snap_dev(snap, &rbd_dev->dev);
- if (ret < 0)
- break;
- }
-out:
mutex_unlock(&ctl_mutex);
+
return ret;
}
@@ -2211,33 +2568,37 @@ static int rbd_init_watch_dev(struct rbd_device *rbd_dev)
return ret;
}
-static atomic64_t rbd_id_max = ATOMIC64_INIT(0);
+static atomic64_t rbd_dev_id_max = ATOMIC64_INIT(0);
/*
* Get a unique rbd identifier for the given new rbd_dev, and add
* the rbd_dev to the global list. The minimum rbd id is 1.
*/
-static void rbd_id_get(struct rbd_device *rbd_dev)
+static void rbd_dev_id_get(struct rbd_device *rbd_dev)
{
- rbd_dev->dev_id = atomic64_inc_return(&rbd_id_max);
+ rbd_dev->dev_id = atomic64_inc_return(&rbd_dev_id_max);
spin_lock(&rbd_dev_list_lock);
list_add_tail(&rbd_dev->node, &rbd_dev_list);
spin_unlock(&rbd_dev_list_lock);
+ dout("rbd_dev %p given dev id %llu\n", rbd_dev,
+ (unsigned long long) rbd_dev->dev_id);
}
/*
* Remove an rbd_dev from the global list, and record that its
* identifier is no longer in use.
*/
-static void rbd_id_put(struct rbd_device *rbd_dev)
+static void rbd_dev_id_put(struct rbd_device *rbd_dev)
{
struct list_head *tmp;
int rbd_id = rbd_dev->dev_id;
int max_id;
- BUG_ON(rbd_id < 1);
+ rbd_assert(rbd_id > 0);
+ dout("rbd_dev %p released dev id %llu\n", rbd_dev,
+ (unsigned long long) rbd_dev->dev_id);
spin_lock(&rbd_dev_list_lock);
list_del_init(&rbd_dev->node);
@@ -2245,7 +2606,7 @@ static void rbd_id_put(struct rbd_device *rbd_dev)
* If the id being "put" is not the current maximum, there
* is nothing special we need to do.
*/
- if (rbd_id != atomic64_read(&rbd_id_max)) {
+ if (rbd_id != atomic64_read(&rbd_dev_id_max)) {
spin_unlock(&rbd_dev_list_lock);
return;
}
@@ -2266,12 +2627,13 @@ static void rbd_id_put(struct rbd_device *rbd_dev)
spin_unlock(&rbd_dev_list_lock);
/*
- * The max id could have been updated by rbd_id_get(), in
+ * The max id could have been updated by rbd_dev_id_get(), in
* which case it now accurately reflects the new maximum.
* Be careful not to overwrite the maximum value in that
* case.
*/
- atomic64_cmpxchg(&rbd_id_max, rbd_id, max_id);
+ atomic64_cmpxchg(&rbd_dev_id_max, rbd_id, max_id);
+ dout(" max dev id has been reset\n");
}
/*
@@ -2360,28 +2722,31 @@ static inline char *dup_token(const char **buf, size_t *lenp)
}
/*
- * This fills in the pool_name, image_name, image_name_len, snap_name,
- * rbd_dev, rbd_md_name, and name fields of the given rbd_dev, based
- * on the list of monitor addresses and other options provided via
- * /sys/bus/rbd/add.
+ * This fills in the pool_name, image_name, image_name_len, rbd_dev,
+ * rbd_md_name, and name fields of the given rbd_dev, based on the
+ * list of monitor addresses and other options provided via
+ * /sys/bus/rbd/add. Returns a pointer to a dynamically-allocated
+ * copy of the snapshot name to map if successful, or a
+ * pointer-coded error otherwise.
*
* Note: rbd_dev is assumed to have been initially zero-filled.
*/
-static int rbd_add_parse_args(struct rbd_device *rbd_dev,
- const char *buf,
- const char **mon_addrs,
- size_t *mon_addrs_size,
- char *options,
- size_t options_size)
+static char *rbd_add_parse_args(struct rbd_device *rbd_dev,
+ const char *buf,
+ const char **mon_addrs,
+ size_t *mon_addrs_size,
+ char *options,
+ size_t options_size)
{
size_t len;
- int ret;
+ char *err_ptr = ERR_PTR(-EINVAL);
+ char *snap_name;
/* The first four tokens are required */
len = next_token(&buf);
if (!len)
- return -EINVAL;
+ return err_ptr;
*mon_addrs_size = len + 1;
*mon_addrs = buf;
@@ -2389,9 +2754,9 @@ static int rbd_add_parse_args(struct rbd_device *rbd_dev,
len = copy_token(&buf, options, options_size);
if (!len || len >= options_size)
- return -EINVAL;
+ return err_ptr;
- ret = -ENOMEM;
+ err_ptr = ERR_PTR(-ENOMEM);
rbd_dev->pool_name = dup_token(&buf, NULL);
if (!rbd_dev->pool_name)
goto out_err;
@@ -2400,41 +2765,227 @@ static int rbd_add_parse_args(struct rbd_device *rbd_dev,
if (!rbd_dev->image_name)
goto out_err;
- /* Create the name of the header object */
+ /* Snapshot name is optional */
+ len = next_token(&buf);
+ if (!len) {
+ buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
+ len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
+ }
+ snap_name = kmalloc(len + 1, GFP_KERNEL);
+ if (!snap_name)
+ goto out_err;
+ memcpy(snap_name, buf, len);
+ *(snap_name + len) = '\0';
- rbd_dev->header_name = kmalloc(rbd_dev->image_name_len
- + sizeof (RBD_SUFFIX),
- GFP_KERNEL);
- if (!rbd_dev->header_name)
+dout(" SNAP_NAME is <%s>, len is %zd\n", snap_name, len);
+
+ return snap_name;
+
+out_err:
+ kfree(rbd_dev->image_name);
+ rbd_dev->image_name = NULL;
+ rbd_dev->image_name_len = 0;
+ kfree(rbd_dev->pool_name);
+ rbd_dev->pool_name = NULL;
+
+ return err_ptr;
+}
+
+/*
+ * An rbd format 2 image has a unique identifier, distinct from the
+ * name given to it by the user. Internally, that identifier is
+ * what's used to specify the names of objects related to the image.
+ *
+ * A special "rbd id" object is used to map an rbd image name to its
+ * id. If that object doesn't exist, then there is no v2 rbd image
+ * with the supplied name.
+ *
+ * This function will record the given rbd_dev's image_id field if
+ * it can be determined, and in that case will return 0. If any
+ * errors occur a negative errno will be returned and the rbd_dev's
+ * image_id field will be unchanged (and should be NULL).
+ */
+static int rbd_dev_image_id(struct rbd_device *rbd_dev)
+{
+ int ret;
+ size_t size;
+ char *object_name;
+ void *response;
+ void *p;
+
+ /*
+ * First, see if the format 2 image id file exists, and if
+ * so, get the image's persistent id from it.
+ */
+ size = sizeof (RBD_ID_PREFIX) + rbd_dev->image_name_len;
+ object_name = kmalloc(size, GFP_NOIO);
+ if (!object_name)
+ return -ENOMEM;
+ sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->image_name);
+ dout("rbd id object name is %s\n", object_name);
+
+ /* Response will be an encoded string, which includes a length */
+
+ size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
+ response = kzalloc(size, GFP_NOIO);
+ if (!response) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = rbd_req_sync_exec(rbd_dev, object_name,
+ "rbd", "get_id",
+ NULL, 0,
+ response, RBD_IMAGE_ID_LEN_MAX,
+ CEPH_OSD_FLAG_READ, NULL);
+ dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret);
+ if (ret < 0)
+ goto out;
+
+ p = response;
+ rbd_dev->image_id = ceph_extract_encoded_string(&p,
+ p + RBD_IMAGE_ID_LEN_MAX,
+ &rbd_dev->image_id_len,
+ GFP_NOIO);
+ if (IS_ERR(rbd_dev->image_id)) {
+ ret = PTR_ERR(rbd_dev->image_id);
+ rbd_dev->image_id = NULL;
+ } else {
+ dout("image_id is %s\n", rbd_dev->image_id);
+ }
+out:
+ kfree(response);
+ kfree(object_name);
+
+ return ret;
+}
+
+static int rbd_dev_v1_probe(struct rbd_device *rbd_dev)
+{
+ int ret;
+ size_t size;
+
+ /* Version 1 images have no id; empty string is used */
+
+ rbd_dev->image_id = kstrdup("", GFP_KERNEL);
+ if (!rbd_dev->image_id)
+ return -ENOMEM;
+ rbd_dev->image_id_len = 0;
+
+ /* Record the header object name for this rbd image. */
+
+ size = rbd_dev->image_name_len + sizeof (RBD_SUFFIX);
+ rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
+ if (!rbd_dev->header_name) {
+ ret = -ENOMEM;
goto out_err;
+ }
sprintf(rbd_dev->header_name, "%s%s", rbd_dev->image_name, RBD_SUFFIX);
+ /* Populate rbd image metadata */
+
+ ret = rbd_read_header(rbd_dev, &rbd_dev->header);
+ if (ret < 0)
+ goto out_err;
+ rbd_dev->image_format = 1;
+
+ dout("discovered version 1 image, header name is %s\n",
+ rbd_dev->header_name);
+
+ return 0;
+
+out_err:
+ kfree(rbd_dev->header_name);
+ rbd_dev->header_name = NULL;
+ kfree(rbd_dev->image_id);
+ rbd_dev->image_id = NULL;
+
+ return ret;
+}
+
+static int rbd_dev_v2_probe(struct rbd_device *rbd_dev)
+{
+ size_t size;
+ int ret;
+ u64 ver = 0;
+
/*
- * The snapshot name is optional. If none is is supplied,
- * we use the default value.
+ * Image id was filled in by the caller. Record the header
+ * object name for this rbd image.
*/
- rbd_dev->snap_name = dup_token(&buf, &len);
- if (!rbd_dev->snap_name)
+ size = sizeof (RBD_HEADER_PREFIX) + rbd_dev->image_id_len;
+ rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
+ if (!rbd_dev->header_name)
+ return -ENOMEM;
+ sprintf(rbd_dev->header_name, "%s%s",
+ RBD_HEADER_PREFIX, rbd_dev->image_id);
+
+ /* Get the size and object order for the image */
+
+ ret = rbd_dev_v2_image_size(rbd_dev);
+ if (ret < 0)
goto out_err;
- if (!len) {
- /* Replace the empty name with the default */
- kfree(rbd_dev->snap_name);
- rbd_dev->snap_name
- = kmalloc(sizeof (RBD_SNAP_HEAD_NAME), GFP_KERNEL);
- if (!rbd_dev->snap_name)
- goto out_err;
- memcpy(rbd_dev->snap_name, RBD_SNAP_HEAD_NAME,
- sizeof (RBD_SNAP_HEAD_NAME));
- }
+ /* Get the object prefix (a.k.a. block_name) for the image */
- return 0;
+ ret = rbd_dev_v2_object_prefix(rbd_dev);
+ if (ret < 0)
+ goto out_err;
+
+ /* Get the features for the image */
+ ret = rbd_dev_v2_features(rbd_dev);
+ if (ret < 0)
+ goto out_err;
+
+ /* crypto and compression type aren't (yet) supported for v2 images */
+
+ rbd_dev->header.crypt_type = 0;
+ rbd_dev->header.comp_type = 0;
+
+ /* Get the snapshot context, plus the header version */
+
+ ret = rbd_dev_v2_snap_context(rbd_dev, &ver);
+ if (ret)
+ goto out_err;
+ rbd_dev->header.obj_version = ver;
+
+ rbd_dev->image_format = 2;
+
+ dout("discovered version 2 image, header name is %s\n",
+ rbd_dev->header_name);
+
+ return -ENOTSUPP;
out_err:
kfree(rbd_dev->header_name);
- kfree(rbd_dev->image_name);
- kfree(rbd_dev->pool_name);
- rbd_dev->pool_name = NULL;
+ rbd_dev->header_name = NULL;
+ kfree(rbd_dev->header.object_prefix);
+ rbd_dev->header.object_prefix = NULL;
+
+ return ret;
+}
+
+/*
+ * Probe for the existence of the header object for the given rbd
+ * device. For format 2 images this includes determining the image
+ * id.
+ */
+static int rbd_dev_probe(struct rbd_device *rbd_dev)
+{
+ int ret;
+
+ /*
+ * Get the id from the image id object. If it's not a
+ * format 2 image, we'll get ENOENT back, and we'll assume
+ * it's a format 1 image.
+ */
+ ret = rbd_dev_image_id(rbd_dev);
+ if (ret)
+ ret = rbd_dev_v1_probe(rbd_dev);
+ else
+ ret = rbd_dev_v2_probe(rbd_dev);
+ if (ret)
+ dout("probe failed, returning %d\n", ret);
return ret;
}
@@ -2449,16 +3000,17 @@ static ssize_t rbd_add(struct bus_type *bus,
size_t mon_addrs_size = 0;
struct ceph_osd_client *osdc;
int rc = -ENOMEM;
+ char *snap_name;
if (!try_module_get(THIS_MODULE))
return -ENODEV;
options = kmalloc(count, GFP_KERNEL);
if (!options)
- goto err_nomem;
+ goto err_out_mem;
rbd_dev = kzalloc(sizeof(*rbd_dev), GFP_KERNEL);
if (!rbd_dev)
- goto err_nomem;
+ goto err_out_mem;
/* static rbd_device initialization */
spin_lock_init(&rbd_dev->lock);
@@ -2466,27 +3018,18 @@ static ssize_t rbd_add(struct bus_type *bus,
INIT_LIST_HEAD(&rbd_dev->snaps);
init_rwsem(&rbd_dev->header_rwsem);
- /* generate unique id: find highest unique id, add one */
- rbd_id_get(rbd_dev);
-
- /* Fill in the device name, now that we have its id. */
- BUILD_BUG_ON(DEV_NAME_LEN
- < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
- sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
-
/* parse add command */
- rc = rbd_add_parse_args(rbd_dev, buf, &mon_addrs, &mon_addrs_size,
- options, count);
- if (rc)
- goto err_put_id;
-
- rbd_dev->rbd_client = rbd_get_client(mon_addrs, mon_addrs_size - 1,
- options);
- if (IS_ERR(rbd_dev->rbd_client)) {
- rc = PTR_ERR(rbd_dev->rbd_client);
- goto err_put_id;
+ snap_name = rbd_add_parse_args(rbd_dev, buf,
+ &mon_addrs, &mon_addrs_size, options, count);
+ if (IS_ERR(snap_name)) {
+ rc = PTR_ERR(snap_name);
+ goto err_out_mem;
}
+ rc = rbd_get_client(rbd_dev, mon_addrs, mon_addrs_size - 1, options);
+ if (rc < 0)
+ goto err_out_args;
+
/* pick the pool */
osdc = &rbd_dev->rbd_client->client->osdc;
rc = ceph_pg_poolid_by_name(osdc->osdmap, rbd_dev->pool_name);
@@ -2494,23 +3037,53 @@ static ssize_t rbd_add(struct bus_type *bus,
goto err_out_client;
rbd_dev->pool_id = rc;
- /* register our block device */
- rc = register_blkdev(0, rbd_dev->name);
+ rc = rbd_dev_probe(rbd_dev);
if (rc < 0)
goto err_out_client;
+ rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
+
+ /* no need to lock here, as rbd_dev is not registered yet */
+ rc = rbd_dev_snaps_update(rbd_dev);
+ if (rc)
+ goto err_out_header;
+
+ rc = rbd_dev_set_mapping(rbd_dev, snap_name);
+ if (rc)
+ goto err_out_header;
+
+ /* generate unique id: find highest unique id, add one */
+ rbd_dev_id_get(rbd_dev);
+
+ /* Fill in the device name, now that we have its id. */
+ BUILD_BUG_ON(DEV_NAME_LEN
+ < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
+ sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
+
+ /* Get our block major device number. */
+
+ rc = register_blkdev(0, rbd_dev->name);
+ if (rc < 0)
+ goto err_out_id;
rbd_dev->major = rc;
- rc = rbd_bus_add_dev(rbd_dev);
+ /* Set up the blkdev mapping. */
+
+ rc = rbd_init_disk(rbd_dev);
if (rc)
goto err_out_blkdev;
+ rc = rbd_bus_add_dev(rbd_dev);
+ if (rc)
+ goto err_out_disk;
+
/*
* At this point cleanup in the event of an error is the job
* of the sysfs code (initiated by rbd_bus_del_dev()).
- *
- * Set up and announce blkdev mapping.
*/
- rc = rbd_init_disk(rbd_dev);
+
+ down_write(&rbd_dev->header_rwsem);
+ rc = rbd_dev_snaps_register(rbd_dev);
+ up_write(&rbd_dev->header_rwsem);
if (rc)
goto err_out_bus;
@@ -2518,6 +3091,13 @@ static ssize_t rbd_add(struct bus_type *bus,
if (rc)
goto err_out_bus;
+ /* Everything's ready. Announce the disk to the world. */
+
+ add_disk(rbd_dev->disk);
+
+ pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
+ (unsigned long long) rbd_dev->mapping.size);
+
return count;
err_out_bus:
@@ -2527,19 +3107,23 @@ err_out_bus:
kfree(options);
return rc;
+err_out_disk:
+ rbd_free_disk(rbd_dev);
err_out_blkdev:
unregister_blkdev(rbd_dev->major, rbd_dev->name);
+err_out_id:
+ rbd_dev_id_put(rbd_dev);
+err_out_header:
+ rbd_header_free(&rbd_dev->header);
err_out_client:
+ kfree(rbd_dev->header_name);
rbd_put_client(rbd_dev);
-err_put_id:
- if (rbd_dev->pool_name) {
- kfree(rbd_dev->snap_name);
- kfree(rbd_dev->header_name);
- kfree(rbd_dev->image_name);
- kfree(rbd_dev->pool_name);
- }
- rbd_id_put(rbd_dev);
-err_nomem:
+ kfree(rbd_dev->image_id);
+err_out_args:
+ kfree(rbd_dev->mapping.snap_name);
+ kfree(rbd_dev->image_name);
+ kfree(rbd_dev->pool_name);
+err_out_mem:
kfree(rbd_dev);
kfree(options);
@@ -2585,12 +3169,16 @@ static void rbd_dev_release(struct device *dev)
rbd_free_disk(rbd_dev);
unregister_blkdev(rbd_dev->major, rbd_dev->name);
+ /* release allocated disk header fields */
+ rbd_header_free(&rbd_dev->header);
+
/* done with the id, and with the rbd_dev */
- kfree(rbd_dev->snap_name);
+ kfree(rbd_dev->mapping.snap_name);
+ kfree(rbd_dev->image_id);
kfree(rbd_dev->header_name);
kfree(rbd_dev->pool_name);
kfree(rbd_dev->image_name);
- rbd_id_put(rbd_dev);
+ rbd_dev_id_put(rbd_dev);
kfree(rbd_dev);
/* release module ref */
@@ -2628,47 +3216,7 @@ static ssize_t rbd_remove(struct bus_type *bus,
done:
mutex_unlock(&ctl_mutex);
- return ret;
-}
-static ssize_t rbd_snap_add(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
- int ret;
- char *name = kmalloc(count + 1, GFP_KERNEL);
- if (!name)
- return -ENOMEM;
-
- snprintf(name, count, "%s", buf);
-
- mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
-
- ret = rbd_header_add_snap(rbd_dev,
- name, GFP_KERNEL);
- if (ret < 0)
- goto err_unlock;
-
- ret = __rbd_refresh_header(rbd_dev, NULL);
- if (ret < 0)
- goto err_unlock;
-
- /* shouldn't hold ctl_mutex when notifying.. notify might
- trigger a watch callback that would need to get that mutex */
- mutex_unlock(&ctl_mutex);
-
- /* make a best effort, don't error if failed */
- rbd_req_sync_notify(rbd_dev);
-
- ret = count;
- kfree(name);
- return ret;
-
-err_unlock:
- mutex_unlock(&ctl_mutex);
- kfree(name);
return ret;
}
diff --git a/drivers/block/rbd_types.h b/drivers/block/rbd_types.h
index 0924e9e..cbe77fa 100644
--- a/drivers/block/rbd_types.h
+++ b/drivers/block/rbd_types.h
@@ -15,15 +15,30 @@
#include <linux/types.h>
+/* For format version 2, rbd image 'foo' consists of objects
+ * rbd_id.foo - id of image
+ * rbd_header.<id> - image metadata
+ * rbd_data.<id>.0000000000000000
+ * rbd_data.<id>.0000000000000001
+ * ... - data
+ * Clients do not access header data directly in rbd format 2.
+ */
+
+#define RBD_HEADER_PREFIX "rbd_header."
+#define RBD_DATA_PREFIX "rbd_data."
+#define RBD_ID_PREFIX "rbd_id."
+
/*
- * rbd image 'foo' consists of objects
- * foo.rbd - image metadata
- * foo.00000000
- * foo.00000001
- * ... - data
+ * For format version 1, rbd image 'foo' consists of objects
+ * foo.rbd - image metadata
+ * rb.<idhi>.<idlo>.00000000
+ * rb.<idhi>.<idlo>.00000001
+ * ... - data
+ * There is no notion of a persistent image id in rbd format 1.
*/
#define RBD_SUFFIX ".rbd"
+
#define RBD_DIRECTORY "rbd_directory"
#define RBD_INFO "rbd_info"
@@ -47,7 +62,7 @@ struct rbd_image_snap_ondisk {
struct rbd_image_header_ondisk {
char text[40];
- char block_name[24];
+ char object_prefix[24];
char signature[4];
char version[8];
struct {
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index c0bbeb4..0bdde8f 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -14,6 +14,9 @@
#define PART_BITS 4
+static bool use_bio;
+module_param(use_bio, bool, S_IRUGO);
+
static int major;
static DEFINE_IDA(vd_index_ida);
@@ -23,6 +26,7 @@ struct virtio_blk
{
struct virtio_device *vdev;
struct virtqueue *vq;
+ wait_queue_head_t queue_wait;
/* The disk structure for the kernel. */
struct gendisk *disk;
@@ -51,53 +55,244 @@ struct virtio_blk
struct virtblk_req
{
struct request *req;
+ struct bio *bio;
struct virtio_blk_outhdr out_hdr;
struct virtio_scsi_inhdr in_hdr;
+ struct work_struct work;
+ struct virtio_blk *vblk;
+ int flags;
u8 status;
+ struct scatterlist sg[];
+};
+
+enum {
+ VBLK_IS_FLUSH = 1,
+ VBLK_REQ_FLUSH = 2,
+ VBLK_REQ_DATA = 4,
+ VBLK_REQ_FUA = 8,
};
-static void blk_done(struct virtqueue *vq)
+static inline int virtblk_result(struct virtblk_req *vbr)
+{
+ switch (vbr->status) {
+ case VIRTIO_BLK_S_OK:
+ return 0;
+ case VIRTIO_BLK_S_UNSUPP:
+ return -ENOTTY;
+ default:
+ return -EIO;
+ }
+}
+
+static inline struct virtblk_req *virtblk_alloc_req(struct virtio_blk *vblk,
+ gfp_t gfp_mask)
{
- struct virtio_blk *vblk = vq->vdev->priv;
struct virtblk_req *vbr;
- unsigned int len;
- unsigned long flags;
- spin_lock_irqsave(vblk->disk->queue->queue_lock, flags);
- while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) {
- int error;
+ vbr = mempool_alloc(vblk->pool, gfp_mask);
+ if (!vbr)
+ return NULL;
- switch (vbr->status) {
- case VIRTIO_BLK_S_OK:
- error = 0;
- break;
- case VIRTIO_BLK_S_UNSUPP:
- error = -ENOTTY;
- break;
- default:
- error = -EIO;
+ vbr->vblk = vblk;
+ if (use_bio)
+ sg_init_table(vbr->sg, vblk->sg_elems);
+
+ return vbr;
+}
+
+static void virtblk_add_buf_wait(struct virtio_blk *vblk,
+ struct virtblk_req *vbr,
+ unsigned long out,
+ unsigned long in)
+{
+ DEFINE_WAIT(wait);
+
+ for (;;) {
+ prepare_to_wait_exclusive(&vblk->queue_wait, &wait,
+ TASK_UNINTERRUPTIBLE);
+
+ spin_lock_irq(vblk->disk->queue->queue_lock);
+ if (virtqueue_add_buf(vblk->vq, vbr->sg, out, in, vbr,
+ GFP_ATOMIC) < 0) {
+ spin_unlock_irq(vblk->disk->queue->queue_lock);
+ io_schedule();
+ } else {
+ virtqueue_kick(vblk->vq);
+ spin_unlock_irq(vblk->disk->queue->queue_lock);
break;
}
- switch (vbr->req->cmd_type) {
- case REQ_TYPE_BLOCK_PC:
- vbr->req->resid_len = vbr->in_hdr.residual;
- vbr->req->sense_len = vbr->in_hdr.sense_len;
- vbr->req->errors = vbr->in_hdr.errors;
- break;
- case REQ_TYPE_SPECIAL:
- vbr->req->errors = (error != 0);
- break;
- default:
- break;
+ }
+
+ finish_wait(&vblk->queue_wait, &wait);
+}
+
+static inline void virtblk_add_req(struct virtblk_req *vbr,
+ unsigned int out, unsigned int in)
+{
+ struct virtio_blk *vblk = vbr->vblk;
+
+ spin_lock_irq(vblk->disk->queue->queue_lock);
+ if (unlikely(virtqueue_add_buf(vblk->vq, vbr->sg, out, in, vbr,
+ GFP_ATOMIC) < 0)) {
+ spin_unlock_irq(vblk->disk->queue->queue_lock);
+ virtblk_add_buf_wait(vblk, vbr, out, in);
+ return;
+ }
+ virtqueue_kick(vblk->vq);
+ spin_unlock_irq(vblk->disk->queue->queue_lock);
+}
+
+static int virtblk_bio_send_flush(struct virtblk_req *vbr)
+{
+ unsigned int out = 0, in = 0;
+
+ vbr->flags |= VBLK_IS_FLUSH;
+ vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH;
+ vbr->out_hdr.sector = 0;
+ vbr->out_hdr.ioprio = 0;
+ sg_set_buf(&vbr->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr));
+ sg_set_buf(&vbr->sg[out + in++], &vbr->status, sizeof(vbr->status));
+
+ virtblk_add_req(vbr, out, in);
+
+ return 0;
+}
+
+static int virtblk_bio_send_data(struct virtblk_req *vbr)
+{
+ struct virtio_blk *vblk = vbr->vblk;
+ unsigned int num, out = 0, in = 0;
+ struct bio *bio = vbr->bio;
+
+ vbr->flags &= ~VBLK_IS_FLUSH;
+ vbr->out_hdr.type = 0;
+ vbr->out_hdr.sector = bio->bi_sector;
+ vbr->out_hdr.ioprio = bio_prio(bio);
+
+ sg_set_buf(&vbr->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr));
+
+ num = blk_bio_map_sg(vblk->disk->queue, bio, vbr->sg + out);
+
+ sg_set_buf(&vbr->sg[num + out + in++], &vbr->status,
+ sizeof(vbr->status));
+
+ if (num) {
+ if (bio->bi_rw & REQ_WRITE) {
+ vbr->out_hdr.type |= VIRTIO_BLK_T_OUT;
+ out += num;
+ } else {
+ vbr->out_hdr.type |= VIRTIO_BLK_T_IN;
+ in += num;
}
+ }
+
+ virtblk_add_req(vbr, out, in);
+
+ return 0;
+}
+
+static void virtblk_bio_send_data_work(struct work_struct *work)
+{
+ struct virtblk_req *vbr;
+
+ vbr = container_of(work, struct virtblk_req, work);
+
+ virtblk_bio_send_data(vbr);
+}
+
+static void virtblk_bio_send_flush_work(struct work_struct *work)
+{
+ struct virtblk_req *vbr;
+
+ vbr = container_of(work, struct virtblk_req, work);
+
+ virtblk_bio_send_flush(vbr);
+}
+
+static inline void virtblk_request_done(struct virtblk_req *vbr)
+{
+ struct virtio_blk *vblk = vbr->vblk;
+ struct request *req = vbr->req;
+ int error = virtblk_result(vbr);
+
+ if (req->cmd_type == REQ_TYPE_BLOCK_PC) {
+ req->resid_len = vbr->in_hdr.residual;
+ req->sense_len = vbr->in_hdr.sense_len;
+ req->errors = vbr->in_hdr.errors;
+ } else if (req->cmd_type == REQ_TYPE_SPECIAL) {
+ req->errors = (error != 0);
+ }
+
+ __blk_end_request_all(req, error);
+ mempool_free(vbr, vblk->pool);
+}
+
+static inline void virtblk_bio_flush_done(struct virtblk_req *vbr)
+{
+ struct virtio_blk *vblk = vbr->vblk;
+
+ if (vbr->flags & VBLK_REQ_DATA) {
+ /* Send out the actual write data */
+ INIT_WORK(&vbr->work, virtblk_bio_send_data_work);
+ queue_work(virtblk_wq, &vbr->work);
+ } else {
+ bio_endio(vbr->bio, virtblk_result(vbr));
+ mempool_free(vbr, vblk->pool);
+ }
+}
+
+static inline void virtblk_bio_data_done(struct virtblk_req *vbr)
+{
+ struct virtio_blk *vblk = vbr->vblk;
- __blk_end_request_all(vbr->req, error);
+ if (unlikely(vbr->flags & VBLK_REQ_FUA)) {
+ /* Send out a flush before end the bio */
+ vbr->flags &= ~VBLK_REQ_DATA;
+ INIT_WORK(&vbr->work, virtblk_bio_send_flush_work);
+ queue_work(virtblk_wq, &vbr->work);
+ } else {
+ bio_endio(vbr->bio, virtblk_result(vbr));
mempool_free(vbr, vblk->pool);
}
+}
+
+static inline void virtblk_bio_done(struct virtblk_req *vbr)
+{
+ if (unlikely(vbr->flags & VBLK_IS_FLUSH))
+ virtblk_bio_flush_done(vbr);
+ else
+ virtblk_bio_data_done(vbr);
+}
+
+static void virtblk_done(struct virtqueue *vq)
+{
+ struct virtio_blk *vblk = vq->vdev->priv;
+ bool bio_done = false, req_done = false;
+ struct virtblk_req *vbr;
+ unsigned long flags;
+ unsigned int len;
+
+ spin_lock_irqsave(vblk->disk->queue->queue_lock, flags);
+ do {
+ virtqueue_disable_cb(vq);
+ while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) {
+ if (vbr->bio) {
+ virtblk_bio_done(vbr);
+ bio_done = true;
+ } else {
+ virtblk_request_done(vbr);
+ req_done = true;
+ }
+ }
+ } while (!virtqueue_enable_cb(vq));
/* In case queue is stopped waiting for more buffers. */
- blk_start_queue(vblk->disk->queue);
+ if (req_done)
+ blk_start_queue(vblk->disk->queue);
spin_unlock_irqrestore(vblk->disk->queue->queue_lock, flags);
+
+ if (bio_done)
+ wake_up(&vblk->queue_wait);
}
static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
@@ -106,13 +301,13 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
unsigned long num, out = 0, in = 0;
struct virtblk_req *vbr;
- vbr = mempool_alloc(vblk->pool, GFP_ATOMIC);
+ vbr = virtblk_alloc_req(vblk, GFP_ATOMIC);
if (!vbr)
/* When another request finishes we'll try again. */
return false;
vbr->req = req;
-
+ vbr->bio = NULL;
if (req->cmd_flags & REQ_FLUSH) {
vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH;
vbr->out_hdr.sector = 0;
@@ -172,7 +367,8 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
}
}
- if (virtqueue_add_buf(vblk->vq, vblk->sg, out, in, vbr, GFP_ATOMIC)<0) {
+ if (virtqueue_add_buf(vblk->vq, vblk->sg, out, in, vbr,
+ GFP_ATOMIC) < 0) {
mempool_free(vbr, vblk->pool);
return false;
}
@@ -180,7 +376,7 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
return true;
}
-static void do_virtblk_request(struct request_queue *q)
+static void virtblk_request(struct request_queue *q)
{
struct virtio_blk *vblk = q->queuedata;
struct request *req;
@@ -203,6 +399,34 @@ static void do_virtblk_request(struct request_queue *q)
virtqueue_kick(vblk->vq);
}
+static void virtblk_make_request(struct request_queue *q, struct bio *bio)
+{
+ struct virtio_blk *vblk = q->queuedata;
+ struct virtblk_req *vbr;
+
+ BUG_ON(bio->bi_phys_segments + 2 > vblk->sg_elems);
+
+ vbr = virtblk_alloc_req(vblk, GFP_NOIO);
+ if (!vbr) {
+ bio_endio(bio, -ENOMEM);
+ return;
+ }
+
+ vbr->bio = bio;
+ vbr->flags = 0;
+ if (bio->bi_rw & REQ_FLUSH)
+ vbr->flags |= VBLK_REQ_FLUSH;
+ if (bio->bi_rw & REQ_FUA)
+ vbr->flags |= VBLK_REQ_FUA;
+ if (bio->bi_size)
+ vbr->flags |= VBLK_REQ_DATA;
+
+ if (unlikely(vbr->flags & VBLK_REQ_FLUSH))
+ virtblk_bio_send_flush(vbr);
+ else
+ virtblk_bio_send_data(vbr);
+}
+
/* return id (s/n) string for *disk to *id_str
*/
static int virtblk_get_id(struct gendisk *disk, char *id_str)
@@ -360,7 +584,7 @@ static int init_vq(struct virtio_blk *vblk)
int err = 0;
/* We expect one virtqueue, for output. */
- vblk->vq = virtio_find_single_vq(vblk->vdev, blk_done, "requests");
+ vblk->vq = virtio_find_single_vq(vblk->vdev, virtblk_done, "requests");
if (IS_ERR(vblk->vq))
err = PTR_ERR(vblk->vq);
@@ -477,6 +701,8 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
struct virtio_blk *vblk;
struct request_queue *q;
int err, index;
+ int pool_size;
+
u64 cap;
u32 v, blk_size, sg_elems, opt_io_size;
u16 min_io_size;
@@ -506,10 +732,12 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
goto out_free_index;
}
+ init_waitqueue_head(&vblk->queue_wait);
vblk->vdev = vdev;
vblk->sg_elems = sg_elems;
sg_init_table(vblk->sg, vblk->sg_elems);
mutex_init(&vblk->config_lock);
+
INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
vblk->config_enable = true;
@@ -517,7 +745,10 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
if (err)
goto out_free_vblk;
- vblk->pool = mempool_create_kmalloc_pool(1,sizeof(struct virtblk_req));
+ pool_size = sizeof(struct virtblk_req);
+ if (use_bio)
+ pool_size += sizeof(struct scatterlist) * sg_elems;
+ vblk->pool = mempool_create_kmalloc_pool(1, pool_size);
if (!vblk->pool) {
err = -ENOMEM;
goto out_free_vq;
@@ -530,12 +761,14 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
goto out_mempool;
}
- q = vblk->disk->queue = blk_init_queue(do_virtblk_request, NULL);
+ q = vblk->disk->queue = blk_init_queue(virtblk_request, NULL);
if (!q) {
err = -ENOMEM;
goto out_put_disk;
}
+ if (use_bio)
+ blk_queue_make_request(q, virtblk_make_request);
q->queuedata = vblk;
virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
@@ -620,7 +853,6 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
if (!err && opt_io_size)
blk_queue_io_opt(q, blk_size * opt_io_size);
-
add_disk(vblk->disk);
err = device_create_file(disk_to_dev(vblk->disk), &dev_attr_serial);
if (err)
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index 4fbdceb..a5effd8 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -18,11 +18,12 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/random.h>
-#include <linux/clk.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/hw_random.h>
#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
#include <asm/io.h>
@@ -46,26 +47,36 @@
#define RNG_SYSSTATUS 0x44 /* System status
[0] = RESETDONE */
-static void __iomem *rng_base;
-static struct clk *rng_ick;
-static struct platform_device *rng_dev;
+/**
+ * struct omap_rng_private_data - RNG IP block-specific data
+ * @base: virtual address of the beginning of the RNG IP block registers
+ * @mem_res: struct resource * for the IP block registers physical memory
+ */
+struct omap_rng_private_data {
+ void __iomem *base;
+ struct resource *mem_res;
+};
-static inline u32 omap_rng_read_reg(int reg)
+static inline u32 omap_rng_read_reg(struct omap_rng_private_data *priv, int reg)
{
- return __raw_readl(rng_base + reg);
+ return __raw_readl(priv->base + reg);
}
-static inline void omap_rng_write_reg(int reg, u32 val)
+static inline void omap_rng_write_reg(struct omap_rng_private_data *priv,
+ int reg, u32 val)
{
- __raw_writel(val, rng_base + reg);
+ __raw_writel(val, priv->base + reg);
}
static int omap_rng_data_present(struct hwrng *rng, int wait)
{
+ struct omap_rng_private_data *priv;
int data, i;
+ priv = (struct omap_rng_private_data *)rng->priv;
+
for (i = 0; i < 20; i++) {
- data = omap_rng_read_reg(RNG_STAT_REG) ? 0 : 1;
+ data = omap_rng_read_reg(priv, RNG_STAT_REG) ? 0 : 1;
if (data || !wait)
break;
/* RNG produces data fast enough (2+ MBit/sec, even
@@ -80,9 +91,13 @@ static int omap_rng_data_present(struct hwrng *rng, int wait)
static int omap_rng_data_read(struct hwrng *rng, u32 *data)
{
- *data = omap_rng_read_reg(RNG_OUT_REG);
+ struct omap_rng_private_data *priv;
+
+ priv = (struct omap_rng_private_data *)rng->priv;
+
+ *data = omap_rng_read_reg(priv, RNG_OUT_REG);
- return 4;
+ return sizeof(u32);
}
static struct hwrng omap_rng_ops = {
@@ -93,69 +108,68 @@ static struct hwrng omap_rng_ops = {
static int __devinit omap_rng_probe(struct platform_device *pdev)
{
- struct resource *res;
+ struct omap_rng_private_data *priv;
int ret;
- /*
- * A bit ugly, and it will never actually happen but there can
- * be only one RNG and this catches any bork
- */
- if (rng_dev)
- return -EBUSY;
-
- if (cpu_is_omap24xx()) {
- rng_ick = clk_get(&pdev->dev, "ick");
- if (IS_ERR(rng_ick)) {
- dev_err(&pdev->dev, "Could not get rng_ick\n");
- ret = PTR_ERR(rng_ick);
- return ret;
- } else
- clk_enable(rng_ick);
- }
+ priv = kzalloc(sizeof(struct omap_rng_private_data), GFP_KERNEL);
+ if (!priv) {
+ dev_err(&pdev->dev, "could not allocate memory\n");
+ return -ENOMEM;
+ };
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ omap_rng_ops.priv = (unsigned long)priv;
+ dev_set_drvdata(&pdev->dev, priv);
- rng_base = devm_request_and_ioremap(&pdev->dev, res);
- if (!rng_base) {
+ priv->mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!priv->mem_res) {
+ ret = -ENOENT;
+ goto err_ioremap;
+ }
+
+ priv->base = devm_request_and_ioremap(&pdev->dev, priv->mem_res);
+ if (!priv->base) {
ret = -ENOMEM;
goto err_ioremap;
}
- dev_set_drvdata(&pdev->dev, res);
+ dev_set_drvdata(&pdev->dev, priv);
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
ret = hwrng_register(&omap_rng_ops);
if (ret)
goto err_register;
dev_info(&pdev->dev, "OMAP Random Number Generator ver. %02x\n",
- omap_rng_read_reg(RNG_REV_REG));
- omap_rng_write_reg(RNG_MASK_REG, 0x1);
+ omap_rng_read_reg(priv, RNG_REV_REG));
- rng_dev = pdev;
+ omap_rng_write_reg(priv, RNG_MASK_REG, 0x1);
return 0;
err_register:
- rng_base = NULL;
+ priv->base = NULL;
+ pm_runtime_disable(&pdev->dev);
err_ioremap:
- if (cpu_is_omap24xx()) {
- clk_disable(rng_ick);
- clk_put(rng_ick);
- }
+ kfree(priv);
+
return ret;
}
static int __exit omap_rng_remove(struct platform_device *pdev)
{
+ struct omap_rng_private_data *priv = dev_get_drvdata(&pdev->dev);
+
hwrng_unregister(&omap_rng_ops);
- omap_rng_write_reg(RNG_MASK_REG, 0x0);
+ omap_rng_write_reg(priv, RNG_MASK_REG, 0x0);
- if (cpu_is_omap24xx()) {
- clk_disable(rng_ick);
- clk_put(rng_ick);
- }
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ release_mem_region(priv->mem_res->start, resource_size(priv->mem_res));
- rng_base = NULL;
+ kfree(priv);
return 0;
}
@@ -164,13 +178,21 @@ static int __exit omap_rng_remove(struct platform_device *pdev)
static int omap_rng_suspend(struct device *dev)
{
- omap_rng_write_reg(RNG_MASK_REG, 0x0);
+ struct omap_rng_private_data *priv = dev_get_drvdata(dev);
+
+ omap_rng_write_reg(priv, RNG_MASK_REG, 0x0);
+ pm_runtime_put_sync(dev);
+
return 0;
}
static int omap_rng_resume(struct device *dev)
{
- omap_rng_write_reg(RNG_MASK_REG, 0x1);
+ struct omap_rng_private_data *priv = dev_get_drvdata(dev);
+
+ pm_runtime_get_sync(dev);
+ omap_rng_write_reg(priv, RNG_MASK_REG, 0x1);
+
return 0;
}
@@ -198,9 +220,6 @@ static struct platform_driver omap_rng_driver = {
static int __init omap_rng_init(void)
{
- if (!cpu_is_omap16xx() && !cpu_is_omap24xx())
- return -ENODEV;
-
return platform_driver_register(&omap_rng_driver);
}
diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
index 0c7d340..f74e892 100644
--- a/drivers/char/mbcs.c
+++ b/drivers/char/mbcs.c
@@ -507,7 +507,7 @@ static int mbcs_gscr_mmap(struct file *fp, struct vm_area_struct *vma)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
+ /* Remap-pfn-range will mark the range VM_IO */
if (remap_pfn_range(vma,
vma->vm_start,
__pa(soft->gscr_addr) >> PAGE_SHIFT,
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index e5eedfa..0537903 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -322,7 +322,7 @@ static int mmap_mem(struct file *file, struct vm_area_struct *vma)
vma->vm_ops = &mmap_mem_ops;
- /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
+ /* Remap-pfn-range will mark the range VM_IO */
if (remap_pfn_range(vma,
vma->vm_start,
vma->vm_pgoff,
diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c
index 845f97f..e1f60f9 100644
--- a/drivers/char/mspec.c
+++ b/drivers/char/mspec.c
@@ -286,7 +286,7 @@ mspec_mmap(struct file *file, struct vm_area_struct *vma,
atomic_set(&vdata->refcnt, 1);
vma->vm_private_data = vdata;
- vma->vm_flags |= (VM_IO | VM_RESERVED | VM_PFNMAP | VM_DONTEXPAND);
+ vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
if (vdata->type == MSPEC_FETCHOP || vdata->type == MSPEC_UNCACHED)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &mspec_vm_ops;
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 060a672..8ab9c3d 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -24,6 +24,8 @@
#include <linux/err.h>
#include <linux/freezer.h>
#include <linux/fs.h>
+#include <linux/splice.h>
+#include <linux/pagemap.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/poll.h>
@@ -474,26 +476,53 @@ static ssize_t send_control_msg(struct port *port, unsigned int event,
return 0;
}
+struct buffer_token {
+ union {
+ void *buf;
+ struct scatterlist *sg;
+ } u;
+ /* If sgpages == 0 then buf is used, else sg is used */
+ unsigned int sgpages;
+};
+
+static void reclaim_sg_pages(struct scatterlist *sg, unsigned int nrpages)
+{
+ int i;
+ struct page *page;
+
+ for (i = 0; i < nrpages; i++) {
+ page = sg_page(&sg[i]);
+ if (!page)
+ break;
+ put_page(page);
+ }
+ kfree(sg);
+}
+
/* Callers must take the port->outvq_lock */
static void reclaim_consumed_buffers(struct port *port)
{
- void *buf;
+ struct buffer_token *tok;
unsigned int len;
if (!port->portdev) {
/* Device has been unplugged. vqs are already gone. */
return;
}
- while ((buf = virtqueue_get_buf(port->out_vq, &len))) {
- kfree(buf);
+ while ((tok = virtqueue_get_buf(port->out_vq, &len))) {
+ if (tok->sgpages)
+ reclaim_sg_pages(tok->u.sg, tok->sgpages);
+ else
+ kfree(tok->u.buf);
+ kfree(tok);
port->outvq_full = false;
}
}
-static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count,
- bool nonblock)
+static ssize_t __send_to_port(struct port *port, struct scatterlist *sg,
+ int nents, size_t in_count,
+ struct buffer_token *tok, bool nonblock)
{
- struct scatterlist sg[1];
struct virtqueue *out_vq;
ssize_t ret;
unsigned long flags;
@@ -505,8 +534,7 @@ static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count,
reclaim_consumed_buffers(port);
- sg_init_one(sg, in_buf, in_count);
- ret = virtqueue_add_buf(out_vq, sg, 1, 0, in_buf, GFP_ATOMIC);
+ ret = virtqueue_add_buf(out_vq, sg, nents, 0, tok, GFP_ATOMIC);
/* Tell Host to go! */
virtqueue_kick(out_vq);
@@ -544,6 +572,37 @@ done:
return in_count;
}
+static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count,
+ bool nonblock)
+{
+ struct scatterlist sg[1];
+ struct buffer_token *tok;
+
+ tok = kmalloc(sizeof(*tok), GFP_ATOMIC);
+ if (!tok)
+ return -ENOMEM;
+ tok->sgpages = 0;
+ tok->u.buf = in_buf;
+
+ sg_init_one(sg, in_buf, in_count);
+
+ return __send_to_port(port, sg, 1, in_count, tok, nonblock);
+}
+
+static ssize_t send_pages(struct port *port, struct scatterlist *sg, int nents,
+ size_t in_count, bool nonblock)
+{
+ struct buffer_token *tok;
+
+ tok = kmalloc(sizeof(*tok), GFP_ATOMIC);
+ if (!tok)
+ return -ENOMEM;
+ tok->sgpages = nents;
+ tok->u.sg = sg;
+
+ return __send_to_port(port, sg, nents, in_count, tok, nonblock);
+}
+
/*
* Give out the data that's requested from the buffer that we have
* queued up.
@@ -665,6 +724,26 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
return fill_readbuf(port, ubuf, count, true);
}
+static int wait_port_writable(struct port *port, bool nonblock)
+{
+ int ret;
+
+ if (will_write_block(port)) {
+ if (nonblock)
+ return -EAGAIN;
+
+ ret = wait_event_freezable(port->waitqueue,
+ !will_write_block(port));
+ if (ret < 0)
+ return ret;
+ }
+ /* Port got hot-unplugged. */
+ if (!port->guest_connected)
+ return -ENODEV;
+
+ return 0;
+}
+
static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
size_t count, loff_t *offp)
{
@@ -681,18 +760,9 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
nonblock = filp->f_flags & O_NONBLOCK;
- if (will_write_block(port)) {
- if (nonblock)
- return -EAGAIN;
-
- ret = wait_event_freezable(port->waitqueue,
- !will_write_block(port));
- if (ret < 0)
- return ret;
- }
- /* Port got hot-unplugged. */
- if (!port->guest_connected)
- return -ENODEV;
+ ret = wait_port_writable(port, nonblock);
+ if (ret < 0)
+ return ret;
count = min((size_t)(32 * 1024), count);
@@ -725,6 +795,93 @@ out:
return ret;
}
+struct sg_list {
+ unsigned int n;
+ unsigned int size;
+ size_t len;
+ struct scatterlist *sg;
+};
+
+static int pipe_to_sg(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
+ struct splice_desc *sd)
+{
+ struct sg_list *sgl = sd->u.data;
+ unsigned int offset, len;
+
+ if (sgl->n == sgl->size)
+ return 0;
+
+ /* Try lock this page */
+ if (buf->ops->steal(pipe, buf) == 0) {
+ /* Get reference and unlock page for moving */
+ get_page(buf->page);
+ unlock_page(buf->page);
+
+ len = min(buf->len, sd->len);
+ sg_set_page(&(sgl->sg[sgl->n]), buf->page, len, buf->offset);
+ } else {
+ /* Failback to copying a page */
+ struct page *page = alloc_page(GFP_KERNEL);
+ char *src = buf->ops->map(pipe, buf, 1);
+ char *dst;
+
+ if (!page)
+ return -ENOMEM;
+ dst = kmap(page);
+
+ offset = sd->pos & ~PAGE_MASK;
+
+ len = sd->len;
+ if (len + offset > PAGE_SIZE)
+ len = PAGE_SIZE - offset;
+
+ memcpy(dst + offset, src + buf->offset, len);
+
+ kunmap(page);
+ buf->ops->unmap(pipe, buf, src);
+
+ sg_set_page(&(sgl->sg[sgl->n]), page, len, offset);
+ }
+ sgl->n++;
+ sgl->len += len;
+
+ return len;
+}
+
+/* Faster zero-copy write by splicing */
+static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe,
+ struct file *filp, loff_t *ppos,
+ size_t len, unsigned int flags)
+{
+ struct port *port = filp->private_data;
+ struct sg_list sgl;
+ ssize_t ret;
+ struct splice_desc sd = {
+ .total_len = len,
+ .flags = flags,
+ .pos = *ppos,
+ .u.data = &sgl,
+ };
+
+ ret = wait_port_writable(port, filp->f_flags & O_NONBLOCK);
+ if (ret < 0)
+ return ret;
+
+ sgl.n = 0;
+ sgl.len = 0;
+ sgl.size = pipe->nrbufs;
+ sgl.sg = kmalloc(sizeof(struct scatterlist) * sgl.size, GFP_KERNEL);
+ if (unlikely(!sgl.sg))
+ return -ENOMEM;
+
+ sg_init_table(sgl.sg, sgl.size);
+ ret = __splice_from_pipe(pipe, &sd, pipe_to_sg);
+ if (likely(ret > 0))
+ ret = send_pages(port, sgl.sg, sgl.n, sgl.len, true);
+
+ return ret;
+}
+
static unsigned int port_fops_poll(struct file *filp, poll_table *wait)
{
struct port *port;
@@ -856,6 +1013,7 @@ static const struct file_operations port_fops = {
.open = port_fops_open,
.read = port_fops_read,
.write = port_fops_write,
+ .splice_write = port_fops_splice_write,
.poll = port_fops_poll,
.release = port_fops_release,
.fasync = port_fops_fasync,
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
index 21c1a87..24ccae4 100644
--- a/drivers/crypto/mv_cesa.c
+++ b/drivers/crypto/mv_cesa.c
@@ -19,6 +19,9 @@
#include <linux/clk.h>
#include <crypto/internal/hash.h>
#include <crypto/sha.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
#include "mv_cesa.h"
@@ -1062,7 +1065,10 @@ static int mv_probe(struct platform_device *pdev)
goto err_unmap_reg;
}
- irq = platform_get_irq(pdev, 0);
+ if (pdev->dev.of_node)
+ irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ else
+ irq = platform_get_irq(pdev, 0);
if (irq < 0 || irq == NO_IRQ) {
ret = irq;
goto err_unmap_sram;
@@ -1170,12 +1176,19 @@ static int mv_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id mv_cesa_of_match_table[] = {
+ { .compatible = "marvell,orion-crypto", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mv_cesa_of_match_table);
+
static struct platform_driver marvell_crypto = {
.probe = mv_probe,
- .remove = mv_remove,
+ .remove = __devexit_p(mv_remove),
.driver = {
.owner = THIS_MODULE,
.name = "mv_crypto",
+ .of_match_table = of_match_ptr(mv_cesa_of_match_table),
},
};
MODULE_ALIAS("platform:mv_crypto");
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index d06ea29..677cd6e 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -208,6 +208,16 @@ config SIRF_DMA
help
Enable support for the CSR SiRFprimaII DMA engine.
+config TI_EDMA
+ tristate "TI EDMA support"
+ depends on ARCH_DAVINCI
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ default n
+ help
+ Enable support for the TI EDMA controller. This DMA
+ engine is found on TI DaVinci and AM33xx parts.
+
config ARCH_HAS_ASYNC_TX_FIND_CHANNEL
bool
@@ -292,6 +302,13 @@ config DMA_OMAP
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
+config MMP_PDMA
+ bool "MMP PDMA support"
+ depends on (ARCH_MMP || ARCH_PXA)
+ select DMA_ENGINE
+ help
+ Support the MMP PDMA engine for PXA and MMP platfrom.
+
config DMA_ENGINE
bool
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 4cf6b12..7428fea 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_IMX_DMA) += imx-dma.o
obj-$(CONFIG_MXS_DMA) += mxs-dma.o
obj-$(CONFIG_TIMB_DMA) += timb_dma.o
obj-$(CONFIG_SIRF_DMA) += sirf-dma.o
+obj-$(CONFIG_TI_EDMA) += edma.o
obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o
obj-$(CONFIG_PL330_DMA) += pl330.o
@@ -32,3 +33,4 @@ obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o
obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
obj-$(CONFIG_DMA_OMAP) += omap-dma.o
+obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 6fbeebb..d1cc579 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -1892,6 +1892,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
pl08x->pd = dev_get_platdata(&adev->dev);
if (!pl08x->pd) {
dev_err(&adev->dev, "no platform data supplied\n");
+ ret = -EINVAL;
goto out_no_platdata;
}
@@ -1943,6 +1944,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
dev_err(&adev->dev, "%s failed to allocate "
"physical channel holders\n",
__func__);
+ ret = -ENOMEM;
goto out_no_phychans;
}
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 17d6958..13a02f4 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -852,12 +852,13 @@ atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc,
* @buf_len: total number of bytes for the entire buffer
* @period_len: number of bytes for each period
* @direction: transfer direction, to or from device
+ * @flags: tx descriptor status flags
* @context: transfer context (ignored)
*/
static struct dma_async_tx_descriptor *
atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction direction,
- void *context)
+ unsigned long flags, void *context)
{
struct at_dma_chan *atchan = to_at_dma_chan(chan);
struct at_dma_slave *atslave = chan->private;
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index d3c5a5a..c4b0eb3c 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -36,12 +36,22 @@
* which does not support descriptor writeback.
*/
+static inline unsigned int dwc_get_dms(struct dw_dma_slave *slave)
+{
+ return slave ? slave->dst_master : 0;
+}
+
+static inline unsigned int dwc_get_sms(struct dw_dma_slave *slave)
+{
+ return slave ? slave->src_master : 1;
+}
+
#define DWC_DEFAULT_CTLLO(_chan) ({ \
struct dw_dma_slave *__slave = (_chan->private); \
struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \
struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \
- int _dms = __slave ? __slave->dst_master : 0; \
- int _sms = __slave ? __slave->src_master : 1; \
+ int _dms = dwc_get_dms(__slave); \
+ int _sms = dwc_get_sms(__slave); \
u8 _smsize = __slave ? _sconfig->src_maxburst : \
DW_DMA_MSIZE_16; \
u8 _dmsize = __slave ? _sconfig->dst_maxburst : \
@@ -56,16 +66,6 @@
})
/*
- * This is configuration-dependent and usually a funny size like 4095.
- *
- * Note that this is a transfer count, i.e. if we transfer 32-bit
- * words, we can do 16380 bytes per descriptor.
- *
- * This parameter is also system-specific.
- */
-#define DWC_MAX_COUNT 4095U
-
-/*
* Number of descriptors to allocate for each channel. This should be
* made configurable somehow; preferably, the clients (at least the
* ones using slave transfers) should be able to give us a hint.
@@ -177,6 +177,11 @@ static void dwc_initialize(struct dw_dma_chan *dwc)
cfghi = dws->cfg_hi;
cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK;
+ } else {
+ if (dwc->dma_sconfig.direction == DMA_MEM_TO_DEV)
+ cfghi = DWC_CFGH_DST_PER(dwc->dma_sconfig.slave_id);
+ else if (dwc->dma_sconfig.direction == DMA_DEV_TO_MEM)
+ cfghi = DWC_CFGH_SRC_PER(dwc->dma_sconfig.slave_id);
}
channel_writel(dwc, CFG_LO, cfglo);
@@ -206,7 +211,7 @@ static inline unsigned int dwc_fast_fls(unsigned long long v)
return 0;
}
-static void dwc_dump_chan_regs(struct dw_dma_chan *dwc)
+static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc)
{
dev_err(chan2dev(&dwc->chan),
" SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
@@ -227,10 +232,29 @@ static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc)
/*----------------------------------------------------------------------*/
+/* Perform single block transfer */
+static inline void dwc_do_single_block(struct dw_dma_chan *dwc,
+ struct dw_desc *desc)
+{
+ struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+ u32 ctllo;
+
+ /* Software emulation of LLP mode relies on interrupts to continue
+ * multi block transfer. */
+ ctllo = desc->lli.ctllo | DWC_CTLL_INT_EN;
+
+ channel_writel(dwc, SAR, desc->lli.sar);
+ channel_writel(dwc, DAR, desc->lli.dar);
+ channel_writel(dwc, CTL_LO, ctllo);
+ channel_writel(dwc, CTL_HI, desc->lli.ctlhi);
+ channel_set_bit(dw, CH_EN, dwc->mask);
+}
+
/* Called with dwc->lock held and bh disabled */
static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
{
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+ unsigned long was_soft_llp;
/* ASSERT: channel is idle */
if (dma_readl(dw, CH_EN) & dwc->mask) {
@@ -242,6 +266,26 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
return;
}
+ if (dwc->nollp) {
+ was_soft_llp = test_and_set_bit(DW_DMA_IS_SOFT_LLP,
+ &dwc->flags);
+ if (was_soft_llp) {
+ dev_err(chan2dev(&dwc->chan),
+ "BUG: Attempted to start new LLP transfer "
+ "inside ongoing one\n");
+ return;
+ }
+
+ dwc_initialize(dwc);
+
+ dwc->tx_list = &first->tx_list;
+ dwc->tx_node_active = first->tx_list.next;
+
+ dwc_do_single_block(dwc, first);
+
+ return;
+ }
+
dwc_initialize(dwc);
channel_writel(dwc, LLP, first->txd.phys);
@@ -553,8 +597,36 @@ static void dw_dma_tasklet(unsigned long data)
dwc_handle_cyclic(dw, dwc, status_err, status_xfer);
else if (status_err & (1 << i))
dwc_handle_error(dw, dwc);
- else if (status_xfer & (1 << i))
+ else if (status_xfer & (1 << i)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
+ if (dwc->tx_node_active != dwc->tx_list) {
+ struct dw_desc *desc =
+ list_entry(dwc->tx_node_active,
+ struct dw_desc,
+ desc_node);
+
+ dma_writel(dw, CLEAR.XFER, dwc->mask);
+
+ /* move pointer to next descriptor */
+ dwc->tx_node_active =
+ dwc->tx_node_active->next;
+
+ dwc_do_single_block(dwc, desc);
+
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ continue;
+ } else {
+ /* we are done here */
+ clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
+ }
+ }
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
dwc_scan_descriptors(dw, dwc);
+ }
}
/*
@@ -636,6 +708,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
size_t len, unsigned long flags)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ struct dw_dma_slave *dws = chan->private;
struct dw_desc *desc;
struct dw_desc *first;
struct dw_desc *prev;
@@ -643,6 +716,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
size_t offset;
unsigned int src_width;
unsigned int dst_width;
+ unsigned int data_width;
u32 ctllo;
dev_vdbg(chan2dev(chan),
@@ -655,7 +729,11 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
return NULL;
}
- src_width = dst_width = dwc_fast_fls(src | dest | len);
+ data_width = min_t(unsigned int, dwc->dw->data_width[dwc_get_sms(dws)],
+ dwc->dw->data_width[dwc_get_dms(dws)]);
+
+ src_width = dst_width = min_t(unsigned int, data_width,
+ dwc_fast_fls(src | dest | len));
ctllo = DWC_DEFAULT_CTLLO(chan)
| DWC_CTLL_DST_WIDTH(dst_width)
@@ -667,7 +745,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
for (offset = 0; offset < len; offset += xfer_count << src_width) {
xfer_count = min_t(size_t, (len - offset) >> src_width,
- DWC_MAX_COUNT);
+ dwc->block_size);
desc = dwc_desc_get(dwc);
if (!desc)
@@ -725,6 +803,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
dma_addr_t reg;
unsigned int reg_width;
unsigned int mem_width;
+ unsigned int data_width;
unsigned int i;
struct scatterlist *sg;
size_t total_len = 0;
@@ -748,6 +827,8 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
DWC_CTLL_FC(DW_DMA_FC_D_M2P);
+ data_width = dwc->dw->data_width[dwc_get_sms(dws)];
+
for_each_sg(sgl, sg, sg_len, i) {
struct dw_desc *desc;
u32 len, dlen, mem;
@@ -755,7 +836,8 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
mem = sg_dma_address(sg);
len = sg_dma_len(sg);
- mem_width = dwc_fast_fls(mem | len);
+ mem_width = min_t(unsigned int,
+ data_width, dwc_fast_fls(mem | len));
slave_sg_todev_fill_desc:
desc = dwc_desc_get(dwc);
@@ -768,8 +850,8 @@ slave_sg_todev_fill_desc:
desc->lli.sar = mem;
desc->lli.dar = reg;
desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width);
- if ((len >> mem_width) > DWC_MAX_COUNT) {
- dlen = DWC_MAX_COUNT << mem_width;
+ if ((len >> mem_width) > dwc->block_size) {
+ dlen = dwc->block_size << mem_width;
mem += dlen;
len -= dlen;
} else {
@@ -808,6 +890,8 @@ slave_sg_todev_fill_desc:
ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
DWC_CTLL_FC(DW_DMA_FC_D_P2M);
+ data_width = dwc->dw->data_width[dwc_get_dms(dws)];
+
for_each_sg(sgl, sg, sg_len, i) {
struct dw_desc *desc;
u32 len, dlen, mem;
@@ -815,7 +899,8 @@ slave_sg_todev_fill_desc:
mem = sg_dma_address(sg);
len = sg_dma_len(sg);
- mem_width = dwc_fast_fls(mem | len);
+ mem_width = min_t(unsigned int,
+ data_width, dwc_fast_fls(mem | len));
slave_sg_fromdev_fill_desc:
desc = dwc_desc_get(dwc);
@@ -828,8 +913,8 @@ slave_sg_fromdev_fill_desc:
desc->lli.sar = reg;
desc->lli.dar = mem;
desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width);
- if ((len >> reg_width) > DWC_MAX_COUNT) {
- dlen = DWC_MAX_COUNT << reg_width;
+ if ((len >> reg_width) > dwc->block_size) {
+ dlen = dwc->block_size << reg_width;
mem += dlen;
len -= dlen;
} else {
@@ -945,6 +1030,8 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
} else if (cmd == DMA_TERMINATE_ALL) {
spin_lock_irqsave(&dwc->lock, flags);
+ clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
+
dwc_chan_disable(dw, dwc);
dwc->paused = false;
@@ -1187,6 +1274,13 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
unsigned long flags;
spin_lock_irqsave(&dwc->lock, flags);
+ if (dwc->nollp) {
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ dev_dbg(chan2dev(&dwc->chan),
+ "channel doesn't support LLP transfers\n");
+ return ERR_PTR(-EINVAL);
+ }
+
if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) {
spin_unlock_irqrestore(&dwc->lock, flags);
dev_dbg(chan2dev(&dwc->chan),
@@ -1212,7 +1306,7 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
periods = buf_len / period_len;
/* Check for too big/unaligned periods and unaligned DMA buffer. */
- if (period_len > (DWC_MAX_COUNT << reg_width))
+ if (period_len > (dwc->block_size << reg_width))
goto out_err;
if (unlikely(period_len & ((1 << reg_width) - 1)))
goto out_err;
@@ -1374,6 +1468,11 @@ static int __devinit dw_probe(struct platform_device *pdev)
struct resource *io;
struct dw_dma *dw;
size_t size;
+ void __iomem *regs;
+ bool autocfg;
+ unsigned int dw_params;
+ unsigned int nr_channels;
+ unsigned int max_blk_size = 0;
int irq;
int err;
int i;
@@ -1390,32 +1489,46 @@ static int __devinit dw_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- size = sizeof(struct dw_dma);
- size += pdata->nr_channels * sizeof(struct dw_dma_chan);
- dw = kzalloc(size, GFP_KERNEL);
+ regs = devm_request_and_ioremap(&pdev->dev, io);
+ if (!regs)
+ return -EBUSY;
+
+ dw_params = dma_read_byaddr(regs, DW_PARAMS);
+ autocfg = dw_params >> DW_PARAMS_EN & 0x1;
+
+ if (autocfg)
+ nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 0x7) + 1;
+ else
+ nr_channels = pdata->nr_channels;
+
+ size = sizeof(struct dw_dma) + nr_channels * sizeof(struct dw_dma_chan);
+ dw = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
if (!dw)
return -ENOMEM;
- if (!request_mem_region(io->start, DW_REGLEN, pdev->dev.driver->name)) {
- err = -EBUSY;
- goto err_kfree;
- }
+ dw->clk = devm_clk_get(&pdev->dev, "hclk");
+ if (IS_ERR(dw->clk))
+ return PTR_ERR(dw->clk);
+ clk_prepare_enable(dw->clk);
- dw->regs = ioremap(io->start, DW_REGLEN);
- if (!dw->regs) {
- err = -ENOMEM;
- goto err_release_r;
- }
+ dw->regs = regs;
+
+ /* get hardware configuration parameters */
+ if (autocfg) {
+ max_blk_size = dma_readl(dw, MAX_BLK_SIZE);
- dw->clk = clk_get(&pdev->dev, "hclk");
- if (IS_ERR(dw->clk)) {
- err = PTR_ERR(dw->clk);
- goto err_clk;
+ dw->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1;
+ for (i = 0; i < dw->nr_masters; i++) {
+ dw->data_width[i] =
+ (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3) + 2;
+ }
+ } else {
+ dw->nr_masters = pdata->nr_masters;
+ memcpy(dw->data_width, pdata->data_width, 4);
}
- clk_prepare_enable(dw->clk);
/* Calculate all channel mask before DMA setup */
- dw->all_chan_mask = (1 << pdata->nr_channels) - 1;
+ dw->all_chan_mask = (1 << nr_channels) - 1;
/* force dma off, just in case */
dw_dma_off(dw);
@@ -1423,17 +1536,19 @@ static int __devinit dw_probe(struct platform_device *pdev)
/* disable BLOCK interrupts as well */
channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
- err = request_irq(irq, dw_dma_interrupt, 0, "dw_dmac", dw);
+ err = devm_request_irq(&pdev->dev, irq, dw_dma_interrupt, 0,
+ "dw_dmac", dw);
if (err)
- goto err_irq;
+ return err;
platform_set_drvdata(pdev, dw);
tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
INIT_LIST_HEAD(&dw->dma.channels);
- for (i = 0; i < pdata->nr_channels; i++) {
+ for (i = 0; i < nr_channels; i++) {
struct dw_dma_chan *dwc = &dw->chan[i];
+ int r = nr_channels - i - 1;
dwc->chan.device = &dw->dma;
dma_cookie_init(&dwc->chan);
@@ -1445,7 +1560,7 @@ static int __devinit dw_probe(struct platform_device *pdev)
/* 7 is highest priority & 0 is lowest. */
if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
- dwc->priority = pdata->nr_channels - i - 1;
+ dwc->priority = r;
else
dwc->priority = i;
@@ -1458,6 +1573,32 @@ static int __devinit dw_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&dwc->free_list);
channel_clear_bit(dw, CH_EN, dwc->mask);
+
+ dwc->dw = dw;
+
+ /* hardware configuration */
+ if (autocfg) {
+ unsigned int dwc_params;
+
+ dwc_params = dma_read_byaddr(regs + r * sizeof(u32),
+ DWC_PARAMS);
+
+ /* Decode maximum block size for given channel. The
+ * stored 4 bit value represents blocks from 0x00 for 3
+ * up to 0x0a for 4095. */
+ dwc->block_size =
+ (4 << ((max_blk_size >> 4 * i) & 0xf)) - 1;
+ dwc->nollp =
+ (dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0;
+ } else {
+ dwc->block_size = pdata->block_size;
+
+ /* Check if channel supports multi block transfer */
+ channel_writel(dwc, LLP, 0xfffffffc);
+ dwc->nollp =
+ (channel_readl(dwc, LLP) & 0xfffffffc) == 0;
+ channel_writel(dwc, LLP, 0);
+ }
}
/* Clear all interrupts on all channels. */
@@ -1486,35 +1627,21 @@ static int __devinit dw_probe(struct platform_device *pdev)
dma_writel(dw, CFG, DW_CFG_DMA_EN);
printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n",
- dev_name(&pdev->dev), pdata->nr_channels);
+ dev_name(&pdev->dev), nr_channels);
dma_async_device_register(&dw->dma);
return 0;
-
-err_irq:
- clk_disable_unprepare(dw->clk);
- clk_put(dw->clk);
-err_clk:
- iounmap(dw->regs);
- dw->regs = NULL;
-err_release_r:
- release_resource(io);
-err_kfree:
- kfree(dw);
- return err;
}
static int __devexit dw_remove(struct platform_device *pdev)
{
struct dw_dma *dw = platform_get_drvdata(pdev);
struct dw_dma_chan *dwc, *_dwc;
- struct resource *io;
dw_dma_off(dw);
dma_async_device_unregister(&dw->dma);
- free_irq(platform_get_irq(pdev, 0), dw);
tasklet_kill(&dw->tasklet);
list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
@@ -1523,17 +1650,6 @@ static int __devexit dw_remove(struct platform_device *pdev)
channel_clear_bit(dw, CH_EN, dwc->mask);
}
- clk_disable_unprepare(dw->clk);
- clk_put(dw->clk);
-
- iounmap(dw->regs);
- dw->regs = NULL;
-
- io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(io->start, DW_REGLEN);
-
- kfree(dw);
-
return 0;
}
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h
index 50830be..ff39fa6 100644
--- a/drivers/dma/dw_dmac_regs.h
+++ b/drivers/dma/dw_dmac_regs.h
@@ -82,9 +82,39 @@ struct dw_dma_regs {
DW_REG(ID);
DW_REG(TEST);
+ /* reserved */
+ DW_REG(__reserved0);
+ DW_REG(__reserved1);
+
/* optional encoded params, 0x3c8..0x3f7 */
+ u32 __reserved;
+
+ /* per-channel configuration registers */
+ u32 DWC_PARAMS[DW_DMA_MAX_NR_CHANNELS];
+ u32 MULTI_BLK_TYPE;
+ u32 MAX_BLK_SIZE;
+
+ /* top-level parameters */
+ u32 DW_PARAMS;
};
+/* To access the registers in early stage of probe */
+#define dma_read_byaddr(addr, name) \
+ readl((addr) + offsetof(struct dw_dma_regs, name))
+
+/* Bitfields in DW_PARAMS */
+#define DW_PARAMS_NR_CHAN 8 /* number of channels */
+#define DW_PARAMS_NR_MASTER 11 /* number of AHB masters */
+#define DW_PARAMS_DATA_WIDTH(n) (15 + 2 * (n))
+#define DW_PARAMS_DATA_WIDTH1 15 /* master 1 data width */
+#define DW_PARAMS_DATA_WIDTH2 17 /* master 2 data width */
+#define DW_PARAMS_DATA_WIDTH3 19 /* master 3 data width */
+#define DW_PARAMS_DATA_WIDTH4 21 /* master 4 data width */
+#define DW_PARAMS_EN 28 /* encoded parameters */
+
+/* Bitfields in DWC_PARAMS */
+#define DWC_PARAMS_MBLK_EN 11 /* multi block transfer */
+
/* Bitfields in CTL_LO */
#define DWC_CTLL_INT_EN (1 << 0) /* irqs enabled? */
#define DWC_CTLL_DST_WIDTH(n) ((n)<<1) /* bytes per element */
@@ -140,10 +170,9 @@ struct dw_dma_regs {
/* Bitfields in CFG */
#define DW_CFG_DMA_EN (1 << 0)
-#define DW_REGLEN 0x400
-
enum dw_dmac_flags {
DW_DMA_IS_CYCLIC = 0,
+ DW_DMA_IS_SOFT_LLP = 1,
};
struct dw_dma_chan {
@@ -154,6 +183,10 @@ struct dw_dma_chan {
bool paused;
bool initialized;
+ /* software emulation of the LLP transfers */
+ struct list_head *tx_list;
+ struct list_head *tx_node_active;
+
spinlock_t lock;
/* these other elements are all protected by lock */
@@ -165,8 +198,15 @@ struct dw_dma_chan {
unsigned int descs_allocated;
+ /* hardware configuration */
+ unsigned int block_size;
+ bool nollp;
+
/* configuration passed via DMA_SLAVE_CONFIG */
struct dma_slave_config dma_sconfig;
+
+ /* backlink to dw_dma */
+ struct dw_dma *dw;
};
static inline struct dw_dma_chan_regs __iomem *
@@ -193,6 +233,10 @@ struct dw_dma {
u8 all_chan_mask;
+ /* hardware configuration */
+ unsigned char nr_masters;
+ unsigned char data_width[4];
+
struct dw_dma_chan chan[0];
};
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
new file mode 100644
index 0000000..05aea3c
--- /dev/null
+++ b/drivers/dma/edma.c
@@ -0,0 +1,671 @@
+/*
+ * TI EDMA DMA engine driver
+ *
+ * Copyright 2012 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include <mach/edma.h>
+
+#include "dmaengine.h"
+#include "virt-dma.h"
+
+/*
+ * This will go away when the private EDMA API is folded
+ * into this driver and the platform device(s) are
+ * instantiated in the arch code. We can only get away
+ * with this simplification because DA8XX may not be built
+ * in the same kernel image with other DaVinci parts. This
+ * avoids having to sprinkle dmaengine driver platform devices
+ * and data throughout all the existing board files.
+ */
+#ifdef CONFIG_ARCH_DAVINCI_DA8XX
+#define EDMA_CTLRS 2
+#define EDMA_CHANS 32
+#else
+#define EDMA_CTLRS 1
+#define EDMA_CHANS 64
+#endif /* CONFIG_ARCH_DAVINCI_DA8XX */
+
+/* Max of 16 segments per channel to conserve PaRAM slots */
+#define MAX_NR_SG 16
+#define EDMA_MAX_SLOTS MAX_NR_SG
+#define EDMA_DESCRIPTORS 16
+
+struct edma_desc {
+ struct virt_dma_desc vdesc;
+ struct list_head node;
+ int absync;
+ int pset_nr;
+ struct edmacc_param pset[0];
+};
+
+struct edma_cc;
+
+struct edma_chan {
+ struct virt_dma_chan vchan;
+ struct list_head node;
+ struct edma_desc *edesc;
+ struct edma_cc *ecc;
+ int ch_num;
+ bool alloced;
+ int slot[EDMA_MAX_SLOTS];
+ dma_addr_t addr;
+ int addr_width;
+ int maxburst;
+};
+
+struct edma_cc {
+ int ctlr;
+ struct dma_device dma_slave;
+ struct edma_chan slave_chans[EDMA_CHANS];
+ int num_slave_chans;
+ int dummy_slot;
+};
+
+static inline struct edma_cc *to_edma_cc(struct dma_device *d)
+{
+ return container_of(d, struct edma_cc, dma_slave);
+}
+
+static inline struct edma_chan *to_edma_chan(struct dma_chan *c)
+{
+ return container_of(c, struct edma_chan, vchan.chan);
+}
+
+static inline struct edma_desc
+*to_edma_desc(struct dma_async_tx_descriptor *tx)
+{
+ return container_of(tx, struct edma_desc, vdesc.tx);
+}
+
+static void edma_desc_free(struct virt_dma_desc *vdesc)
+{
+ kfree(container_of(vdesc, struct edma_desc, vdesc));
+}
+
+/* Dispatch a queued descriptor to the controller (caller holds lock) */
+static void edma_execute(struct edma_chan *echan)
+{
+ struct virt_dma_desc *vdesc = vchan_next_desc(&echan->vchan);
+ struct edma_desc *edesc;
+ int i;
+
+ if (!vdesc) {
+ echan->edesc = NULL;
+ return;
+ }
+
+ list_del(&vdesc->node);
+
+ echan->edesc = edesc = to_edma_desc(&vdesc->tx);
+
+ /* Write descriptor PaRAM set(s) */
+ for (i = 0; i < edesc->pset_nr; i++) {
+ edma_write_slot(echan->slot[i], &edesc->pset[i]);
+ dev_dbg(echan->vchan.chan.device->dev,
+ "\n pset[%d]:\n"
+ " chnum\t%d\n"
+ " slot\t%d\n"
+ " opt\t%08x\n"
+ " src\t%08x\n"
+ " dst\t%08x\n"
+ " abcnt\t%08x\n"
+ " ccnt\t%08x\n"
+ " bidx\t%08x\n"
+ " cidx\t%08x\n"
+ " lkrld\t%08x\n",
+ i, echan->ch_num, echan->slot[i],
+ edesc->pset[i].opt,
+ edesc->pset[i].src,
+ edesc->pset[i].dst,
+ edesc->pset[i].a_b_cnt,
+ edesc->pset[i].ccnt,
+ edesc->pset[i].src_dst_bidx,
+ edesc->pset[i].src_dst_cidx,
+ edesc->pset[i].link_bcntrld);
+ /* Link to the previous slot if not the last set */
+ if (i != (edesc->pset_nr - 1))
+ edma_link(echan->slot[i], echan->slot[i+1]);
+ /* Final pset links to the dummy pset */
+ else
+ edma_link(echan->slot[i], echan->ecc->dummy_slot);
+ }
+
+ edma_start(echan->ch_num);
+}
+
+static int edma_terminate_all(struct edma_chan *echan)
+{
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&echan->vchan.lock, flags);
+
+ /*
+ * Stop DMA activity: we assume the callback will not be called
+ * after edma_dma() returns (even if it does, it will see
+ * echan->edesc is NULL and exit.)
+ */
+ if (echan->edesc) {
+ echan->edesc = NULL;
+ edma_stop(echan->ch_num);
+ }
+
+ vchan_get_all_descriptors(&echan->vchan, &head);
+ spin_unlock_irqrestore(&echan->vchan.lock, flags);
+ vchan_dma_desc_free_list(&echan->vchan, &head);
+
+ return 0;
+}
+
+
+static int edma_slave_config(struct edma_chan *echan,
+ struct dma_slave_config *config)
+{
+ if ((config->src_addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES) ||
+ (config->dst_addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES))
+ return -EINVAL;
+
+ if (config->direction == DMA_MEM_TO_DEV) {
+ if (config->dst_addr)
+ echan->addr = config->dst_addr;
+ if (config->dst_addr_width)
+ echan->addr_width = config->dst_addr_width;
+ if (config->dst_maxburst)
+ echan->maxburst = config->dst_maxburst;
+ } else if (config->direction == DMA_DEV_TO_MEM) {
+ if (config->src_addr)
+ echan->addr = config->src_addr;
+ if (config->src_addr_width)
+ echan->addr_width = config->src_addr_width;
+ if (config->src_maxburst)
+ echan->maxburst = config->src_maxburst;
+ }
+
+ return 0;
+}
+
+static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ int ret = 0;
+ struct dma_slave_config *config;
+ struct edma_chan *echan = to_edma_chan(chan);
+
+ switch (cmd) {
+ case DMA_TERMINATE_ALL:
+ edma_terminate_all(echan);
+ break;
+ case DMA_SLAVE_CONFIG:
+ config = (struct dma_slave_config *)arg;
+ ret = edma_slave_config(echan, config);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ return ret;
+}
+
+static struct dma_async_tx_descriptor *edma_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned long tx_flags, void *context)
+{
+ struct edma_chan *echan = to_edma_chan(chan);
+ struct device *dev = chan->device->dev;
+ struct edma_desc *edesc;
+ struct scatterlist *sg;
+ int i;
+ int acnt, bcnt, ccnt, src, dst, cidx;
+ int src_bidx, dst_bidx, src_cidx, dst_cidx;
+
+ if (unlikely(!echan || !sgl || !sg_len))
+ return NULL;
+
+ if (echan->addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
+ dev_err(dev, "Undefined slave buswidth\n");
+ return NULL;
+ }
+
+ if (sg_len > MAX_NR_SG) {
+ dev_err(dev, "Exceeded max SG segments %d > %d\n",
+ sg_len, MAX_NR_SG);
+ return NULL;
+ }
+
+ edesc = kzalloc(sizeof(*edesc) + sg_len *
+ sizeof(edesc->pset[0]), GFP_ATOMIC);
+ if (!edesc) {
+ dev_dbg(dev, "Failed to allocate a descriptor\n");
+ return NULL;
+ }
+
+ edesc->pset_nr = sg_len;
+
+ for_each_sg(sgl, sg, sg_len, i) {
+ /* Allocate a PaRAM slot, if needed */
+ if (echan->slot[i] < 0) {
+ echan->slot[i] =
+ edma_alloc_slot(EDMA_CTLR(echan->ch_num),
+ EDMA_SLOT_ANY);
+ if (echan->slot[i] < 0) {
+ dev_err(dev, "Failed to allocate slot\n");
+ return NULL;
+ }
+ }
+
+ acnt = echan->addr_width;
+
+ /*
+ * If the maxburst is equal to the fifo width, use
+ * A-synced transfers. This allows for large contiguous
+ * buffer transfers using only one PaRAM set.
+ */
+ if (echan->maxburst == 1) {
+ edesc->absync = false;
+ ccnt = sg_dma_len(sg) / acnt / (SZ_64K - 1);
+ bcnt = sg_dma_len(sg) / acnt - ccnt * (SZ_64K - 1);
+ if (bcnt)
+ ccnt++;
+ else
+ bcnt = SZ_64K - 1;
+ cidx = acnt;
+ /*
+ * If maxburst is greater than the fifo address_width,
+ * use AB-synced transfers where A count is the fifo
+ * address_width and B count is the maxburst. In this
+ * case, we are limited to transfers of C count frames
+ * of (address_width * maxburst) where C count is limited
+ * to SZ_64K-1. This places an upper bound on the length
+ * of an SG segment that can be handled.
+ */
+ } else {
+ edesc->absync = true;
+ bcnt = echan->maxburst;
+ ccnt = sg_dma_len(sg) / (acnt * bcnt);
+ if (ccnt > (SZ_64K - 1)) {
+ dev_err(dev, "Exceeded max SG segment size\n");
+ return NULL;
+ }
+ cidx = acnt * bcnt;
+ }
+
+ if (direction == DMA_MEM_TO_DEV) {
+ src = sg_dma_address(sg);
+ dst = echan->addr;
+ src_bidx = acnt;
+ src_cidx = cidx;
+ dst_bidx = 0;
+ dst_cidx = 0;
+ } else {
+ src = echan->addr;
+ dst = sg_dma_address(sg);
+ src_bidx = 0;
+ src_cidx = 0;
+ dst_bidx = acnt;
+ dst_cidx = cidx;
+ }
+
+ edesc->pset[i].opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
+ /* Configure A or AB synchronized transfers */
+ if (edesc->absync)
+ edesc->pset[i].opt |= SYNCDIM;
+ /* If this is the last set, enable completion interrupt flag */
+ if (i == sg_len - 1)
+ edesc->pset[i].opt |= TCINTEN;
+
+ edesc->pset[i].src = src;
+ edesc->pset[i].dst = dst;
+
+ edesc->pset[i].src_dst_bidx = (dst_bidx << 16) | src_bidx;
+ edesc->pset[i].src_dst_cidx = (dst_cidx << 16) | src_cidx;
+
+ edesc->pset[i].a_b_cnt = bcnt << 16 | acnt;
+ edesc->pset[i].ccnt = ccnt;
+ edesc->pset[i].link_bcntrld = 0xffffffff;
+
+ }
+
+ return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
+}
+
+static void edma_callback(unsigned ch_num, u16 ch_status, void *data)
+{
+ struct edma_chan *echan = data;
+ struct device *dev = echan->vchan.chan.device->dev;
+ struct edma_desc *edesc;
+ unsigned long flags;
+
+ /* Stop the channel */
+ edma_stop(echan->ch_num);
+
+ switch (ch_status) {
+ case DMA_COMPLETE:
+ dev_dbg(dev, "transfer complete on channel %d\n", ch_num);
+
+ spin_lock_irqsave(&echan->vchan.lock, flags);
+
+ edesc = echan->edesc;
+ if (edesc) {
+ edma_execute(echan);
+ vchan_cookie_complete(&edesc->vdesc);
+ }
+
+ spin_unlock_irqrestore(&echan->vchan.lock, flags);
+
+ break;
+ case DMA_CC_ERROR:
+ dev_dbg(dev, "transfer error on channel %d\n", ch_num);
+ break;
+ default:
+ break;
+ }
+}
+
+/* Alloc channel resources */
+static int edma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct edma_chan *echan = to_edma_chan(chan);
+ struct device *dev = chan->device->dev;
+ int ret;
+ int a_ch_num;
+ LIST_HEAD(descs);
+
+ a_ch_num = edma_alloc_channel(echan->ch_num, edma_callback,
+ chan, EVENTQ_DEFAULT);
+
+ if (a_ch_num < 0) {
+ ret = -ENODEV;
+ goto err_no_chan;
+ }
+
+ if (a_ch_num != echan->ch_num) {
+ dev_err(dev, "failed to allocate requested channel %u:%u\n",
+ EDMA_CTLR(echan->ch_num),
+ EDMA_CHAN_SLOT(echan->ch_num));
+ ret = -ENODEV;
+ goto err_wrong_chan;
+ }
+
+ echan->alloced = true;
+ echan->slot[0] = echan->ch_num;
+
+ dev_info(dev, "allocated channel for %u:%u\n",
+ EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num));
+
+ return 0;
+
+err_wrong_chan:
+ edma_free_channel(a_ch_num);
+err_no_chan:
+ return ret;
+}
+
+/* Free channel resources */
+static void edma_free_chan_resources(struct dma_chan *chan)
+{
+ struct edma_chan *echan = to_edma_chan(chan);
+ struct device *dev = chan->device->dev;
+ int i;
+
+ /* Terminate transfers */
+ edma_stop(echan->ch_num);
+
+ vchan_free_chan_resources(&echan->vchan);
+
+ /* Free EDMA PaRAM slots */
+ for (i = 1; i < EDMA_MAX_SLOTS; i++) {
+ if (echan->slot[i] >= 0) {
+ edma_free_slot(echan->slot[i]);
+ echan->slot[i] = -1;
+ }
+ }
+
+ /* Free EDMA channel */
+ if (echan->alloced) {
+ edma_free_channel(echan->ch_num);
+ echan->alloced = false;
+ }
+
+ dev_info(dev, "freeing channel for %u\n", echan->ch_num);
+}
+
+/* Send pending descriptor to hardware */
+static void edma_issue_pending(struct dma_chan *chan)
+{
+ struct edma_chan *echan = to_edma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&echan->vchan.lock, flags);
+ if (vchan_issue_pending(&echan->vchan) && !echan->edesc)
+ edma_execute(echan);
+ spin_unlock_irqrestore(&echan->vchan.lock, flags);
+}
+
+static size_t edma_desc_size(struct edma_desc *edesc)
+{
+ int i;
+ size_t size;
+
+ if (edesc->absync)
+ for (size = i = 0; i < edesc->pset_nr; i++)
+ size += (edesc->pset[i].a_b_cnt & 0xffff) *
+ (edesc->pset[i].a_b_cnt >> 16) *
+ edesc->pset[i].ccnt;
+ else
+ size = (edesc->pset[0].a_b_cnt & 0xffff) *
+ (edesc->pset[0].a_b_cnt >> 16) +
+ (edesc->pset[0].a_b_cnt & 0xffff) *
+ (SZ_64K - 1) * edesc->pset[0].ccnt;
+
+ return size;
+}
+
+/* Check request completion status */
+static enum dma_status edma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct edma_chan *echan = to_edma_chan(chan);
+ struct virt_dma_desc *vdesc;
+ enum dma_status ret;
+ unsigned long flags;
+
+ ret = dma_cookie_status(chan, cookie, txstate);
+ if (ret == DMA_SUCCESS || !txstate)
+ return ret;
+
+ spin_lock_irqsave(&echan->vchan.lock, flags);
+ vdesc = vchan_find_desc(&echan->vchan, cookie);
+ if (vdesc) {
+ txstate->residue = edma_desc_size(to_edma_desc(&vdesc->tx));
+ } else if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie) {
+ struct edma_desc *edesc = echan->edesc;
+ txstate->residue = edma_desc_size(edesc);
+ } else {
+ txstate->residue = 0;
+ }
+ spin_unlock_irqrestore(&echan->vchan.lock, flags);
+
+ return ret;
+}
+
+static void __init edma_chan_init(struct edma_cc *ecc,
+ struct dma_device *dma,
+ struct edma_chan *echans)
+{
+ int i, j;
+
+ for (i = 0; i < EDMA_CHANS; i++) {
+ struct edma_chan *echan = &echans[i];
+ echan->ch_num = EDMA_CTLR_CHAN(ecc->ctlr, i);
+ echan->ecc = ecc;
+ echan->vchan.desc_free = edma_desc_free;
+
+ vchan_init(&echan->vchan, dma);
+
+ INIT_LIST_HEAD(&echan->node);
+ for (j = 0; j < EDMA_MAX_SLOTS; j++)
+ echan->slot[j] = -1;
+ }
+}
+
+static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma,
+ struct device *dev)
+{
+ dma->device_prep_slave_sg = edma_prep_slave_sg;
+ dma->device_alloc_chan_resources = edma_alloc_chan_resources;
+ dma->device_free_chan_resources = edma_free_chan_resources;
+ dma->device_issue_pending = edma_issue_pending;
+ dma->device_tx_status = edma_tx_status;
+ dma->device_control = edma_control;
+ dma->dev = dev;
+
+ INIT_LIST_HEAD(&dma->channels);
+}
+
+static int __devinit edma_probe(struct platform_device *pdev)
+{
+ struct edma_cc *ecc;
+ int ret;
+
+ ecc = devm_kzalloc(&pdev->dev, sizeof(*ecc), GFP_KERNEL);
+ if (!ecc) {
+ dev_err(&pdev->dev, "Can't allocate controller\n");
+ return -ENOMEM;
+ }
+
+ ecc->ctlr = pdev->id;
+ ecc->dummy_slot = edma_alloc_slot(ecc->ctlr, EDMA_SLOT_ANY);
+ if (ecc->dummy_slot < 0) {
+ dev_err(&pdev->dev, "Can't allocate PaRAM dummy slot\n");
+ return -EIO;
+ }
+
+ dma_cap_zero(ecc->dma_slave.cap_mask);
+ dma_cap_set(DMA_SLAVE, ecc->dma_slave.cap_mask);
+
+ edma_dma_init(ecc, &ecc->dma_slave, &pdev->dev);
+
+ edma_chan_init(ecc, &ecc->dma_slave, ecc->slave_chans);
+
+ ret = dma_async_device_register(&ecc->dma_slave);
+ if (ret)
+ goto err_reg1;
+
+ platform_set_drvdata(pdev, ecc);
+
+ dev_info(&pdev->dev, "TI EDMA DMA engine driver\n");
+
+ return 0;
+
+err_reg1:
+ edma_free_slot(ecc->dummy_slot);
+ return ret;
+}
+
+static int __devexit edma_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct edma_cc *ecc = dev_get_drvdata(dev);
+
+ dma_async_device_unregister(&ecc->dma_slave);
+ edma_free_slot(ecc->dummy_slot);
+
+ return 0;
+}
+
+static struct platform_driver edma_driver = {
+ .probe = edma_probe,
+ .remove = __devexit_p(edma_remove),
+ .driver = {
+ .name = "edma-dma-engine",
+ .owner = THIS_MODULE,
+ },
+};
+
+bool edma_filter_fn(struct dma_chan *chan, void *param)
+{
+ if (chan->device->dev->driver == &edma_driver.driver) {
+ struct edma_chan *echan = to_edma_chan(chan);
+ unsigned ch_req = *(unsigned *)param;
+ return ch_req == echan->ch_num;
+ }
+ return false;
+}
+EXPORT_SYMBOL(edma_filter_fn);
+
+static struct platform_device *pdev0, *pdev1;
+
+static const struct platform_device_info edma_dev_info0 = {
+ .name = "edma-dma-engine",
+ .id = 0,
+ .dma_mask = DMA_BIT_MASK(32),
+};
+
+static const struct platform_device_info edma_dev_info1 = {
+ .name = "edma-dma-engine",
+ .id = 1,
+ .dma_mask = DMA_BIT_MASK(32),
+};
+
+static int edma_init(void)
+{
+ int ret = platform_driver_register(&edma_driver);
+
+ if (ret == 0) {
+ pdev0 = platform_device_register_full(&edma_dev_info0);
+ if (IS_ERR(pdev0)) {
+ platform_driver_unregister(&edma_driver);
+ ret = PTR_ERR(pdev0);
+ goto out;
+ }
+ }
+
+ if (EDMA_CTLRS == 2) {
+ pdev1 = platform_device_register_full(&edma_dev_info1);
+ if (IS_ERR(pdev1)) {
+ platform_driver_unregister(&edma_driver);
+ platform_device_unregister(pdev0);
+ ret = PTR_ERR(pdev1);
+ }
+ }
+
+out:
+ return ret;
+}
+subsys_initcall(edma_init);
+
+static void __exit edma_exit(void)
+{
+ platform_device_unregister(pdev0);
+ if (pdev1)
+ platform_device_unregister(pdev1);
+ platform_driver_unregister(&edma_driver);
+}
+module_exit(edma_exit);
+
+MODULE_AUTHOR("Matt Porter <mporter@ti.com>");
+MODULE_DESCRIPTION("TI EDMA DMA engine driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index 64256f6..bcfde40 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -1120,6 +1120,7 @@ fail:
* @buf_len: length of the buffer (in bytes)
* @period_len: length of a single period
* @dir: direction of the operation
+ * @flags: tx descriptor status flags
* @context: operation context (ignored)
*
* Prepares a descriptor for cyclic DMA operation. This means that once the
@@ -1133,7 +1134,8 @@ fail:
static struct dma_async_tx_descriptor *
ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
size_t buf_len, size_t period_len,
- enum dma_transfer_direction dir, void *context)
+ enum dma_transfer_direction dir, unsigned long flags,
+ void *context)
{
struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
struct ep93xx_dma_desc *desc, *first;
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 2a3fab2..f11b5b2 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -801,7 +801,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction direction,
- void *context)
+ unsigned long flags, void *context)
{
struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
struct imxdma_engine *imxdma = imxdmac->imxdma;
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 1b781d6..c099ca0 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -1012,7 +1012,7 @@ err_out:
static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction direction,
- void *context)
+ unsigned long flags, void *context)
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
struct sdma_engine *sdma = sdmac->sdma;
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index 8689576..b9d6678 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -434,12 +434,11 @@ static struct ioat_ring_ent *ioat2_alloc_ring_ent(struct dma_chan *chan, gfp_t f
return NULL;
memset(hw, 0, sizeof(*hw));
- desc = kmem_cache_alloc(ioat2_cache, flags);
+ desc = kmem_cache_zalloc(ioat2_cache, flags);
if (!desc) {
pci_pool_free(dma->dma_pool, hw, phys);
return NULL;
}
- memset(desc, 0, sizeof(*desc));
dma_async_tx_descriptor_init(&desc->txd, chan);
desc->txd.tx_submit = ioat2_tx_submit_unlock;
diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c
index 5e3a40f..c057306 100644
--- a/drivers/dma/ioat/pci.c
+++ b/drivers/dma/ioat/pci.c
@@ -40,6 +40,17 @@ MODULE_VERSION(IOAT_DMA_VERSION);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Intel Corporation");
+#define PCI_DEVICE_ID_INTEL_IOAT_IVB0 0x0e20
+#define PCI_DEVICE_ID_INTEL_IOAT_IVB1 0x0e21
+#define PCI_DEVICE_ID_INTEL_IOAT_IVB2 0x0e22
+#define PCI_DEVICE_ID_INTEL_IOAT_IVB3 0x0e23
+#define PCI_DEVICE_ID_INTEL_IOAT_IVB4 0x0e24
+#define PCI_DEVICE_ID_INTEL_IOAT_IVB5 0x0e25
+#define PCI_DEVICE_ID_INTEL_IOAT_IVB6 0x0e26
+#define PCI_DEVICE_ID_INTEL_IOAT_IVB7 0x0e27
+#define PCI_DEVICE_ID_INTEL_IOAT_IVB8 0x0e2e
+#define PCI_DEVICE_ID_INTEL_IOAT_IVB9 0x0e2f
+
static struct pci_device_id ioat_pci_tbl[] = {
/* I/OAT v1 platforms */
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT) },
@@ -83,6 +94,17 @@ static struct pci_device_id ioat_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB8) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB9) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB0) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB1) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB2) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB3) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB4) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB5) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB6) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB7) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB8) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB9) },
+
{ 0, }
};
MODULE_DEVICE_TABLE(pci, ioat_pci_tbl);
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
new file mode 100644
index 0000000..14da1f4
--- /dev/null
+++ b/drivers/dma/mmp_pdma.c
@@ -0,0 +1,875 @@
+/*
+ * Copyright 2012 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/dmaengine.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/platform_data/mmp_dma.h>
+#include <linux/dmapool.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+
+#include "dmaengine.h"
+
+#define DCSR 0x0000
+#define DALGN 0x00a0
+#define DINT 0x00f0
+#define DDADR 0x0200
+#define DSADR 0x0204
+#define DTADR 0x0208
+#define DCMD 0x020c
+
+#define DCSR_RUN (1 << 31) /* Run Bit (read / write) */
+#define DCSR_NODESC (1 << 30) /* No-Descriptor Fetch (read / write) */
+#define DCSR_STOPIRQEN (1 << 29) /* Stop Interrupt Enable (read / write) */
+#define DCSR_REQPEND (1 << 8) /* Request Pending (read-only) */
+#define DCSR_STOPSTATE (1 << 3) /* Stop State (read-only) */
+#define DCSR_ENDINTR (1 << 2) /* End Interrupt (read / write) */
+#define DCSR_STARTINTR (1 << 1) /* Start Interrupt (read / write) */
+#define DCSR_BUSERR (1 << 0) /* Bus Error Interrupt (read / write) */
+
+#define DCSR_EORIRQEN (1 << 28) /* End of Receive Interrupt Enable (R/W) */
+#define DCSR_EORJMPEN (1 << 27) /* Jump to next descriptor on EOR */
+#define DCSR_EORSTOPEN (1 << 26) /* STOP on an EOR */
+#define DCSR_SETCMPST (1 << 25) /* Set Descriptor Compare Status */
+#define DCSR_CLRCMPST (1 << 24) /* Clear Descriptor Compare Status */
+#define DCSR_CMPST (1 << 10) /* The Descriptor Compare Status */
+#define DCSR_EORINTR (1 << 9) /* The end of Receive */
+
+#define DRCMR_MAPVLD (1 << 7) /* Map Valid (read / write) */
+#define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */
+
+#define DDADR_DESCADDR 0xfffffff0 /* Address of next descriptor (mask) */
+#define DDADR_STOP (1 << 0) /* Stop (read / write) */
+
+#define DCMD_INCSRCADDR (1 << 31) /* Source Address Increment Setting. */
+#define DCMD_INCTRGADDR (1 << 30) /* Target Address Increment Setting. */
+#define DCMD_FLOWSRC (1 << 29) /* Flow Control by the source. */
+#define DCMD_FLOWTRG (1 << 28) /* Flow Control by the target. */
+#define DCMD_STARTIRQEN (1 << 22) /* Start Interrupt Enable */
+#define DCMD_ENDIRQEN (1 << 21) /* End Interrupt Enable */
+#define DCMD_ENDIAN (1 << 18) /* Device Endian-ness. */
+#define DCMD_BURST8 (1 << 16) /* 8 byte burst */
+#define DCMD_BURST16 (2 << 16) /* 16 byte burst */
+#define DCMD_BURST32 (3 << 16) /* 32 byte burst */
+#define DCMD_WIDTH1 (1 << 14) /* 1 byte width */
+#define DCMD_WIDTH2 (2 << 14) /* 2 byte width (HalfWord) */
+#define DCMD_WIDTH4 (3 << 14) /* 4 byte width (Word) */
+#define DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */
+
+#define PDMA_ALIGNMENT 3
+#define PDMA_MAX_DESC_BYTES 0x1000
+
+struct mmp_pdma_desc_hw {
+ u32 ddadr; /* Points to the next descriptor + flags */
+ u32 dsadr; /* DSADR value for the current transfer */
+ u32 dtadr; /* DTADR value for the current transfer */
+ u32 dcmd; /* DCMD value for the current transfer */
+} __aligned(32);
+
+struct mmp_pdma_desc_sw {
+ struct mmp_pdma_desc_hw desc;
+ struct list_head node;
+ struct list_head tx_list;
+ struct dma_async_tx_descriptor async_tx;
+};
+
+struct mmp_pdma_phy;
+
+struct mmp_pdma_chan {
+ struct device *dev;
+ struct dma_chan chan;
+ struct dma_async_tx_descriptor desc;
+ struct mmp_pdma_phy *phy;
+ enum dma_transfer_direction dir;
+
+ /* channel's basic info */
+ struct tasklet_struct tasklet;
+ u32 dcmd;
+ u32 drcmr;
+ u32 dev_addr;
+
+ /* list for desc */
+ spinlock_t desc_lock; /* Descriptor list lock */
+ struct list_head chain_pending; /* Link descriptors queue for pending */
+ struct list_head chain_running; /* Link descriptors queue for running */
+ bool idle; /* channel statue machine */
+
+ struct dma_pool *desc_pool; /* Descriptors pool */
+};
+
+struct mmp_pdma_phy {
+ int idx;
+ void __iomem *base;
+ struct mmp_pdma_chan *vchan;
+};
+
+struct mmp_pdma_device {
+ int dma_channels;
+ void __iomem *base;
+ struct device *dev;
+ struct dma_device device;
+ struct mmp_pdma_phy *phy;
+};
+
+#define tx_to_mmp_pdma_desc(tx) container_of(tx, struct mmp_pdma_desc_sw, async_tx)
+#define to_mmp_pdma_desc(lh) container_of(lh, struct mmp_pdma_desc_sw, node)
+#define to_mmp_pdma_chan(dchan) container_of(dchan, struct mmp_pdma_chan, chan)
+#define to_mmp_pdma_dev(dmadev) container_of(dmadev, struct mmp_pdma_device, device)
+
+static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr)
+{
+ u32 reg = (phy->idx << 4) + DDADR;
+
+ writel(addr, phy->base + reg);
+}
+
+static void enable_chan(struct mmp_pdma_phy *phy)
+{
+ u32 reg;
+
+ if (!phy->vchan)
+ return;
+
+ reg = phy->vchan->drcmr;
+ reg = (((reg) < 64) ? 0x0100 : 0x1100) + (((reg) & 0x3f) << 2);
+ writel(DRCMR_MAPVLD | phy->idx, phy->base + reg);
+
+ reg = (phy->idx << 2) + DCSR;
+ writel(readl(phy->base + reg) | DCSR_RUN,
+ phy->base + reg);
+}
+
+static void disable_chan(struct mmp_pdma_phy *phy)
+{
+ u32 reg;
+
+ if (phy) {
+ reg = (phy->idx << 2) + DCSR;
+ writel(readl(phy->base + reg) & ~DCSR_RUN,
+ phy->base + reg);
+ }
+}
+
+static int clear_chan_irq(struct mmp_pdma_phy *phy)
+{
+ u32 dcsr;
+ u32 dint = readl(phy->base + DINT);
+ u32 reg = (phy->idx << 2) + DCSR;
+
+ if (dint & BIT(phy->idx)) {
+ /* clear irq */
+ dcsr = readl(phy->base + reg);
+ writel(dcsr, phy->base + reg);
+ if ((dcsr & DCSR_BUSERR) && (phy->vchan))
+ dev_warn(phy->vchan->dev, "DCSR_BUSERR\n");
+ return 0;
+ }
+ return -EAGAIN;
+}
+
+static irqreturn_t mmp_pdma_chan_handler(int irq, void *dev_id)
+{
+ struct mmp_pdma_phy *phy = dev_id;
+
+ if (clear_chan_irq(phy) == 0) {
+ tasklet_schedule(&phy->vchan->tasklet);
+ return IRQ_HANDLED;
+ } else
+ return IRQ_NONE;
+}
+
+static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id)
+{
+ struct mmp_pdma_device *pdev = dev_id;
+ struct mmp_pdma_phy *phy;
+ u32 dint = readl(pdev->base + DINT);
+ int i, ret;
+ int irq_num = 0;
+
+ while (dint) {
+ i = __ffs(dint);
+ dint &= (dint - 1);
+ phy = &pdev->phy[i];
+ ret = mmp_pdma_chan_handler(irq, phy);
+ if (ret == IRQ_HANDLED)
+ irq_num++;
+ }
+
+ if (irq_num)
+ return IRQ_HANDLED;
+ else
+ return IRQ_NONE;
+}
+
+/* lookup free phy channel as descending priority */
+static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan)
+{
+ int prio, i;
+ struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device);
+ struct mmp_pdma_phy *phy;
+
+ /*
+ * dma channel priorities
+ * ch 0 - 3, 16 - 19 <--> (0)
+ * ch 4 - 7, 20 - 23 <--> (1)
+ * ch 8 - 11, 24 - 27 <--> (2)
+ * ch 12 - 15, 28 - 31 <--> (3)
+ */
+ for (prio = 0; prio <= (((pdev->dma_channels - 1) & 0xf) >> 2); prio++) {
+ for (i = 0; i < pdev->dma_channels; i++) {
+ if (prio != ((i & 0xf) >> 2))
+ continue;
+ phy = &pdev->phy[i];
+ if (!phy->vchan) {
+ phy->vchan = pchan;
+ return phy;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+/* desc->tx_list ==> pending list */
+static void append_pending_queue(struct mmp_pdma_chan *chan,
+ struct mmp_pdma_desc_sw *desc)
+{
+ struct mmp_pdma_desc_sw *tail =
+ to_mmp_pdma_desc(chan->chain_pending.prev);
+
+ if (list_empty(&chan->chain_pending))
+ goto out_splice;
+
+ /* one irq per queue, even appended */
+ tail->desc.ddadr = desc->async_tx.phys;
+ tail->desc.dcmd &= ~DCMD_ENDIRQEN;
+
+ /* softly link to pending list */
+out_splice:
+ list_splice_tail_init(&desc->tx_list, &chan->chain_pending);
+}
+
+/**
+ * start_pending_queue - transfer any pending transactions
+ * pending list ==> running list
+ */
+static void start_pending_queue(struct mmp_pdma_chan *chan)
+{
+ struct mmp_pdma_desc_sw *desc;
+
+ /* still in running, irq will start the pending list */
+ if (!chan->idle) {
+ dev_dbg(chan->dev, "DMA controller still busy\n");
+ return;
+ }
+
+ if (list_empty(&chan->chain_pending)) {
+ /* chance to re-fetch phy channel with higher prio */
+ if (chan->phy) {
+ chan->phy->vchan = NULL;
+ chan->phy = NULL;
+ }
+ dev_dbg(chan->dev, "no pending list\n");
+ return;
+ }
+
+ if (!chan->phy) {
+ chan->phy = lookup_phy(chan);
+ if (!chan->phy) {
+ dev_dbg(chan->dev, "no free dma channel\n");
+ return;
+ }
+ }
+
+ /*
+ * pending -> running
+ * reintilize pending list
+ */
+ desc = list_first_entry(&chan->chain_pending,
+ struct mmp_pdma_desc_sw, node);
+ list_splice_tail_init(&chan->chain_pending, &chan->chain_running);
+
+ /*
+ * Program the descriptor's address into the DMA controller,
+ * then start the DMA transaction
+ */
+ set_desc(chan->phy, desc->async_tx.phys);
+ enable_chan(chan->phy);
+ chan->idle = false;
+}
+
+
+/* desc->tx_list ==> pending list */
+static dma_cookie_t mmp_pdma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct mmp_pdma_chan *chan = to_mmp_pdma_chan(tx->chan);
+ struct mmp_pdma_desc_sw *desc = tx_to_mmp_pdma_desc(tx);
+ struct mmp_pdma_desc_sw *child;
+ unsigned long flags;
+ dma_cookie_t cookie = -EBUSY;
+
+ spin_lock_irqsave(&chan->desc_lock, flags);
+
+ list_for_each_entry(child, &desc->tx_list, node) {
+ cookie = dma_cookie_assign(&child->async_tx);
+ }
+
+ append_pending_queue(chan, desc);
+
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
+
+ return cookie;
+}
+
+struct mmp_pdma_desc_sw *mmp_pdma_alloc_descriptor(struct mmp_pdma_chan *chan)
+{
+ struct mmp_pdma_desc_sw *desc;
+ dma_addr_t pdesc;
+
+ desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
+ if (!desc) {
+ dev_err(chan->dev, "out of memory for link descriptor\n");
+ return NULL;
+ }
+
+ memset(desc, 0, sizeof(*desc));
+ INIT_LIST_HEAD(&desc->tx_list);
+ dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan);
+ /* each desc has submit */
+ desc->async_tx.tx_submit = mmp_pdma_tx_submit;
+ desc->async_tx.phys = pdesc;
+
+ return desc;
+}
+
+/**
+ * mmp_pdma_alloc_chan_resources - Allocate resources for DMA channel.
+ *
+ * This function will create a dma pool for descriptor allocation.
+ * Request irq only when channel is requested
+ * Return - The number of allocated descriptors.
+ */
+
+static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan)
+{
+ struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
+
+ if (chan->desc_pool)
+ return 1;
+
+ chan->desc_pool =
+ dma_pool_create(dev_name(&dchan->dev->device), chan->dev,
+ sizeof(struct mmp_pdma_desc_sw),
+ __alignof__(struct mmp_pdma_desc_sw), 0);
+ if (!chan->desc_pool) {
+ dev_err(chan->dev, "unable to allocate descriptor pool\n");
+ return -ENOMEM;
+ }
+ if (chan->phy) {
+ chan->phy->vchan = NULL;
+ chan->phy = NULL;
+ }
+ chan->idle = true;
+ chan->dev_addr = 0;
+ return 1;
+}
+
+static void mmp_pdma_free_desc_list(struct mmp_pdma_chan *chan,
+ struct list_head *list)
+{
+ struct mmp_pdma_desc_sw *desc, *_desc;
+
+ list_for_each_entry_safe(desc, _desc, list, node) {
+ list_del(&desc->node);
+ dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
+ }
+}
+
+static void mmp_pdma_free_chan_resources(struct dma_chan *dchan)
+{
+ struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->desc_lock, flags);
+ mmp_pdma_free_desc_list(chan, &chan->chain_pending);
+ mmp_pdma_free_desc_list(chan, &chan->chain_running);
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
+
+ dma_pool_destroy(chan->desc_pool);
+ chan->desc_pool = NULL;
+ chan->idle = true;
+ chan->dev_addr = 0;
+ if (chan->phy) {
+ chan->phy->vchan = NULL;
+ chan->phy = NULL;
+ }
+ return;
+}
+
+static struct dma_async_tx_descriptor *
+mmp_pdma_prep_memcpy(struct dma_chan *dchan,
+ dma_addr_t dma_dst, dma_addr_t dma_src,
+ size_t len, unsigned long flags)
+{
+ struct mmp_pdma_chan *chan;
+ struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
+ size_t copy = 0;
+
+ if (!dchan)
+ return NULL;
+
+ if (!len)
+ return NULL;
+
+ chan = to_mmp_pdma_chan(dchan);
+
+ if (!chan->dir) {
+ chan->dir = DMA_MEM_TO_MEM;
+ chan->dcmd = DCMD_INCTRGADDR | DCMD_INCSRCADDR;
+ chan->dcmd |= DCMD_BURST32;
+ }
+
+ do {
+ /* Allocate the link descriptor from DMA pool */
+ new = mmp_pdma_alloc_descriptor(chan);
+ if (!new) {
+ dev_err(chan->dev, "no memory for desc\n");
+ goto fail;
+ }
+
+ copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES);
+
+ new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & copy);
+ new->desc.dsadr = dma_src;
+ new->desc.dtadr = dma_dst;
+
+ if (!first)
+ first = new;
+ else
+ prev->desc.ddadr = new->async_tx.phys;
+
+ new->async_tx.cookie = 0;
+ async_tx_ack(&new->async_tx);
+
+ prev = new;
+ len -= copy;
+
+ if (chan->dir == DMA_MEM_TO_DEV) {
+ dma_src += copy;
+ } else if (chan->dir == DMA_DEV_TO_MEM) {
+ dma_dst += copy;
+ } else if (chan->dir == DMA_MEM_TO_MEM) {
+ dma_src += copy;
+ dma_dst += copy;
+ }
+
+ /* Insert the link descriptor to the LD ring */
+ list_add_tail(&new->node, &first->tx_list);
+ } while (len);
+
+ first->async_tx.flags = flags; /* client is in control of this ack */
+ first->async_tx.cookie = -EBUSY;
+
+ /* last desc and fire IRQ */
+ new->desc.ddadr = DDADR_STOP;
+ new->desc.dcmd |= DCMD_ENDIRQEN;
+
+ return &first->async_tx;
+
+fail:
+ if (first)
+ mmp_pdma_free_desc_list(chan, &first->tx_list);
+ return NULL;
+}
+
+static struct dma_async_tx_descriptor *
+mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction dir,
+ unsigned long flags, void *context)
+{
+ struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
+ struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new = NULL;
+ size_t len, avail;
+ struct scatterlist *sg;
+ dma_addr_t addr;
+ int i;
+
+ if ((sgl == NULL) || (sg_len == 0))
+ return NULL;
+
+ for_each_sg(sgl, sg, sg_len, i) {
+ addr = sg_dma_address(sg);
+ avail = sg_dma_len(sgl);
+
+ do {
+ len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES);
+
+ /* allocate and populate the descriptor */
+ new = mmp_pdma_alloc_descriptor(chan);
+ if (!new) {
+ dev_err(chan->dev, "no memory for desc\n");
+ goto fail;
+ }
+
+ new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & len);
+ if (dir == DMA_MEM_TO_DEV) {
+ new->desc.dsadr = addr;
+ new->desc.dtadr = chan->dev_addr;
+ } else {
+ new->desc.dsadr = chan->dev_addr;
+ new->desc.dtadr = addr;
+ }
+
+ if (!first)
+ first = new;
+ else
+ prev->desc.ddadr = new->async_tx.phys;
+
+ new->async_tx.cookie = 0;
+ async_tx_ack(&new->async_tx);
+ prev = new;
+
+ /* Insert the link descriptor to the LD ring */
+ list_add_tail(&new->node, &first->tx_list);
+
+ /* update metadata */
+ addr += len;
+ avail -= len;
+ } while (avail);
+ }
+
+ first->async_tx.cookie = -EBUSY;
+ first->async_tx.flags = flags;
+
+ /* last desc and fire IRQ */
+ new->desc.ddadr = DDADR_STOP;
+ new->desc.dcmd |= DCMD_ENDIRQEN;
+
+ return &first->async_tx;
+
+fail:
+ if (first)
+ mmp_pdma_free_desc_list(chan, &first->tx_list);
+ return NULL;
+}
+
+static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
+ struct dma_slave_config *cfg = (void *)arg;
+ unsigned long flags;
+ int ret = 0;
+ u32 maxburst = 0, addr = 0;
+ enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
+
+ if (!dchan)
+ return -EINVAL;
+
+ switch (cmd) {
+ case DMA_TERMINATE_ALL:
+ disable_chan(chan->phy);
+ if (chan->phy) {
+ chan->phy->vchan = NULL;
+ chan->phy = NULL;
+ }
+ spin_lock_irqsave(&chan->desc_lock, flags);
+ mmp_pdma_free_desc_list(chan, &chan->chain_pending);
+ mmp_pdma_free_desc_list(chan, &chan->chain_running);
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
+ chan->idle = true;
+ break;
+ case DMA_SLAVE_CONFIG:
+ if (cfg->direction == DMA_DEV_TO_MEM) {
+ chan->dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC;
+ maxburst = cfg->src_maxburst;
+ width = cfg->src_addr_width;
+ addr = cfg->src_addr;
+ } else if (cfg->direction == DMA_MEM_TO_DEV) {
+ chan->dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG;
+ maxburst = cfg->dst_maxburst;
+ width = cfg->dst_addr_width;
+ addr = cfg->dst_addr;
+ }
+
+ if (width == DMA_SLAVE_BUSWIDTH_1_BYTE)
+ chan->dcmd |= DCMD_WIDTH1;
+ else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
+ chan->dcmd |= DCMD_WIDTH2;
+ else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES)
+ chan->dcmd |= DCMD_WIDTH4;
+
+ if (maxburst == 8)
+ chan->dcmd |= DCMD_BURST8;
+ else if (maxburst == 16)
+ chan->dcmd |= DCMD_BURST16;
+ else if (maxburst == 32)
+ chan->dcmd |= DCMD_BURST32;
+
+ if (cfg) {
+ chan->dir = cfg->direction;
+ chan->drcmr = cfg->slave_id;
+ }
+ chan->dev_addr = addr;
+ break;
+ default:
+ return -ENOSYS;
+ }
+
+ return ret;
+}
+
+static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan,
+ dma_cookie_t cookie, struct dma_tx_state *txstate)
+{
+ struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
+ enum dma_status ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->desc_lock, flags);
+ ret = dma_cookie_status(dchan, cookie, txstate);
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
+
+ return ret;
+}
+
+/**
+ * mmp_pdma_issue_pending - Issue the DMA start command
+ * pending list ==> running list
+ */
+static void mmp_pdma_issue_pending(struct dma_chan *dchan)
+{
+ struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->desc_lock, flags);
+ start_pending_queue(chan);
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
+}
+
+/*
+ * dma_do_tasklet
+ * Do call back
+ * Start pending list
+ */
+static void dma_do_tasklet(unsigned long data)
+{
+ struct mmp_pdma_chan *chan = (struct mmp_pdma_chan *)data;
+ struct mmp_pdma_desc_sw *desc, *_desc;
+ LIST_HEAD(chain_cleanup);
+ unsigned long flags;
+
+ /* submit pending list; callback for each desc; free desc */
+
+ spin_lock_irqsave(&chan->desc_lock, flags);
+
+ /* update the cookie if we have some descriptors to cleanup */
+ if (!list_empty(&chan->chain_running)) {
+ dma_cookie_t cookie;
+
+ desc = to_mmp_pdma_desc(chan->chain_running.prev);
+ cookie = desc->async_tx.cookie;
+ dma_cookie_complete(&desc->async_tx);
+
+ dev_dbg(chan->dev, "completed_cookie=%d\n", cookie);
+ }
+
+ /*
+ * move the descriptors to a temporary list so we can drop the lock
+ * during the entire cleanup operation
+ */
+ list_splice_tail_init(&chan->chain_running, &chain_cleanup);
+
+ /* the hardware is now idle and ready for more */
+ chan->idle = true;
+
+ /* Start any pending transactions automatically */
+ start_pending_queue(chan);
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
+
+ /* Run the callback for each descriptor, in order */
+ list_for_each_entry_safe(desc, _desc, &chain_cleanup, node) {
+ struct dma_async_tx_descriptor *txd = &desc->async_tx;
+
+ /* Remove from the list of transactions */
+ list_del(&desc->node);
+ /* Run the link descriptor callback function */
+ if (txd->callback)
+ txd->callback(txd->callback_param);
+
+ dma_pool_free(chan->desc_pool, desc, txd->phys);
+ }
+}
+
+static int __devexit mmp_pdma_remove(struct platform_device *op)
+{
+ struct mmp_pdma_device *pdev = platform_get_drvdata(op);
+
+ dma_async_device_unregister(&pdev->device);
+ return 0;
+}
+
+static int __devinit mmp_pdma_chan_init(struct mmp_pdma_device *pdev,
+ int idx, int irq)
+{
+ struct mmp_pdma_phy *phy = &pdev->phy[idx];
+ struct mmp_pdma_chan *chan;
+ int ret;
+
+ chan = devm_kzalloc(pdev->dev,
+ sizeof(struct mmp_pdma_chan), GFP_KERNEL);
+ if (chan == NULL)
+ return -ENOMEM;
+
+ phy->idx = idx;
+ phy->base = pdev->base;
+
+ if (irq) {
+ ret = devm_request_irq(pdev->dev, irq,
+ mmp_pdma_chan_handler, IRQF_DISABLED, "pdma", phy);
+ if (ret) {
+ dev_err(pdev->dev, "channel request irq fail!\n");
+ return ret;
+ }
+ }
+
+ spin_lock_init(&chan->desc_lock);
+ chan->dev = pdev->dev;
+ chan->chan.device = &pdev->device;
+ tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
+ INIT_LIST_HEAD(&chan->chain_pending);
+ INIT_LIST_HEAD(&chan->chain_running);
+
+ /* register virt channel to dma engine */
+ list_add_tail(&chan->chan.device_node,
+ &pdev->device.channels);
+
+ return 0;
+}
+
+static struct of_device_id mmp_pdma_dt_ids[] = {
+ { .compatible = "marvell,pdma-1.0", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mmp_pdma_dt_ids);
+
+static int __devinit mmp_pdma_probe(struct platform_device *op)
+{
+ struct mmp_pdma_device *pdev;
+ const struct of_device_id *of_id;
+ struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
+ struct resource *iores;
+ int i, ret, irq = 0;
+ int dma_channels = 0, irq_num = 0;
+
+ pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL);
+ if (!pdev)
+ return -ENOMEM;
+ pdev->dev = &op->dev;
+
+ iores = platform_get_resource(op, IORESOURCE_MEM, 0);
+ if (!iores)
+ return -EINVAL;
+
+ pdev->base = devm_request_and_ioremap(pdev->dev, iores);
+ if (!pdev->base)
+ return -EADDRNOTAVAIL;
+
+ of_id = of_match_device(mmp_pdma_dt_ids, pdev->dev);
+ if (of_id)
+ of_property_read_u32(pdev->dev->of_node,
+ "#dma-channels", &dma_channels);
+ else if (pdata && pdata->dma_channels)
+ dma_channels = pdata->dma_channels;
+ else
+ dma_channels = 32; /* default 32 channel */
+ pdev->dma_channels = dma_channels;
+
+ for (i = 0; i < dma_channels; i++) {
+ if (platform_get_irq(op, i) > 0)
+ irq_num++;
+ }
+
+ pdev->phy = devm_kzalloc(pdev->dev,
+ dma_channels * sizeof(struct mmp_pdma_chan), GFP_KERNEL);
+ if (pdev->phy == NULL)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&pdev->device.channels);
+
+ if (irq_num != dma_channels) {
+ /* all chan share one irq, demux inside */
+ irq = platform_get_irq(op, 0);
+ ret = devm_request_irq(pdev->dev, irq,
+ mmp_pdma_int_handler, IRQF_DISABLED, "pdma", pdev);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < dma_channels; i++) {
+ irq = (irq_num != dma_channels) ? 0 : platform_get_irq(op, i);
+ ret = mmp_pdma_chan_init(pdev, i, irq);
+ if (ret)
+ return ret;
+ }
+
+ dma_cap_set(DMA_SLAVE, pdev->device.cap_mask);
+ dma_cap_set(DMA_MEMCPY, pdev->device.cap_mask);
+ dma_cap_set(DMA_SLAVE, pdev->device.cap_mask);
+ pdev->device.dev = &op->dev;
+ pdev->device.device_alloc_chan_resources = mmp_pdma_alloc_chan_resources;
+ pdev->device.device_free_chan_resources = mmp_pdma_free_chan_resources;
+ pdev->device.device_tx_status = mmp_pdma_tx_status;
+ pdev->device.device_prep_dma_memcpy = mmp_pdma_prep_memcpy;
+ pdev->device.device_prep_slave_sg = mmp_pdma_prep_slave_sg;
+ pdev->device.device_issue_pending = mmp_pdma_issue_pending;
+ pdev->device.device_control = mmp_pdma_control;
+ pdev->device.copy_align = PDMA_ALIGNMENT;
+
+ if (pdev->dev->coherent_dma_mask)
+ dma_set_mask(pdev->dev, pdev->dev->coherent_dma_mask);
+ else
+ dma_set_mask(pdev->dev, DMA_BIT_MASK(64));
+
+ ret = dma_async_device_register(&pdev->device);
+ if (ret) {
+ dev_err(pdev->device.dev, "unable to register\n");
+ return ret;
+ }
+
+ dev_info(pdev->device.dev, "initialized\n");
+ return 0;
+}
+
+static const struct platform_device_id mmp_pdma_id_table[] = {
+ { "mmp-pdma", },
+ { },
+};
+
+static struct platform_driver mmp_pdma_driver = {
+ .driver = {
+ .name = "mmp-pdma",
+ .owner = THIS_MODULE,
+ .of_match_table = mmp_pdma_dt_ids,
+ },
+ .id_table = mmp_pdma_id_table,
+ .probe = mmp_pdma_probe,
+ .remove = __devexit_p(mmp_pdma_remove),
+};
+
+module_platform_driver(mmp_pdma_driver);
+
+MODULE_DESCRIPTION("MARVELL MMP Periphera DMA Driver");
+MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index 07fa486..f3e8d71 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -20,6 +20,7 @@
#include <linux/device.h>
#include <mach/regs-icu.h>
#include <linux/platform_data/dma-mmp_tdma.h>
+#include <linux/of_device.h>
#include "dmaengine.h"
@@ -127,7 +128,6 @@ struct mmp_tdma_device {
void __iomem *base;
struct dma_device device;
struct mmp_tdma_chan *tdmac[TDMA_CHANNEL_NUM];
- int irq;
};
#define to_mmp_tdma_chan(dchan) container_of(dchan, struct mmp_tdma_chan, chan)
@@ -358,7 +358,7 @@ struct mmp_tdma_desc *mmp_tdma_alloc_descriptor(struct mmp_tdma_chan *tdmac)
static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction direction,
- void *context)
+ unsigned long flags, void *context)
{
struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
struct mmp_tdma_desc *desc;
@@ -492,7 +492,7 @@ static int __devinit mmp_tdma_chan_init(struct mmp_tdma_device *tdev,
return -ENOMEM;
}
if (irq)
- tdmac->irq = irq + idx;
+ tdmac->irq = irq;
tdmac->dev = tdev->dev;
tdmac->chan.device = &tdev->device;
tdmac->idx = idx;
@@ -505,34 +505,43 @@ static int __devinit mmp_tdma_chan_init(struct mmp_tdma_device *tdev,
/* add the channel to tdma_chan list */
list_add_tail(&tdmac->chan.device_node,
&tdev->device.channels);
-
return 0;
}
+static struct of_device_id mmp_tdma_dt_ids[] = {
+ { .compatible = "marvell,adma-1.0", .data = (void *)MMP_AUD_TDMA},
+ { .compatible = "marvell,pxa910-squ", .data = (void *)PXA910_SQU},
+ {}
+};
+MODULE_DEVICE_TABLE(of, mmp_tdma_dt_ids);
+
static int __devinit mmp_tdma_probe(struct platform_device *pdev)
{
- const struct platform_device_id *id = platform_get_device_id(pdev);
- enum mmp_tdma_type type = id->driver_data;
+ enum mmp_tdma_type type;
+ const struct of_device_id *of_id;
struct mmp_tdma_device *tdev;
struct resource *iores;
int i, ret;
- int irq = 0;
+ int irq = 0, irq_num = 0;
int chan_num = TDMA_CHANNEL_NUM;
+ of_id = of_match_device(mmp_tdma_dt_ids, &pdev->dev);
+ if (of_id)
+ type = (enum mmp_tdma_type) of_id->data;
+ else
+ type = platform_get_device_id(pdev)->driver_data;
+
/* always have couple channels */
tdev = devm_kzalloc(&pdev->dev, sizeof(*tdev), GFP_KERNEL);
if (!tdev)
return -ENOMEM;
tdev->dev = &pdev->dev;
- iores = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!iores)
- return -EINVAL;
- if (resource_size(iores) != chan_num)
- tdev->irq = iores->start;
- else
- irq = iores->start;
+ for (i = 0; i < chan_num; i++) {
+ if (platform_get_irq(pdev, i) > 0)
+ irq_num++;
+ }
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!iores)
@@ -542,25 +551,26 @@ static int __devinit mmp_tdma_probe(struct platform_device *pdev)
if (!tdev->base)
return -EADDRNOTAVAIL;
- if (tdev->irq) {
- ret = devm_request_irq(&pdev->dev, tdev->irq,
+ INIT_LIST_HEAD(&tdev->device.channels);
+
+ if (irq_num != chan_num) {
+ irq = platform_get_irq(pdev, 0);
+ ret = devm_request_irq(&pdev->dev, irq,
mmp_tdma_int_handler, IRQF_DISABLED, "tdma", tdev);
if (ret)
return ret;
}
- dma_cap_set(DMA_SLAVE, tdev->device.cap_mask);
- dma_cap_set(DMA_CYCLIC, tdev->device.cap_mask);
-
- INIT_LIST_HEAD(&tdev->device.channels);
-
/* initialize channel parameters */
for (i = 0; i < chan_num; i++) {
+ irq = (irq_num != chan_num) ? 0 : platform_get_irq(pdev, i);
ret = mmp_tdma_chan_init(tdev, i, irq, type);
if (ret)
return ret;
}
+ dma_cap_set(DMA_SLAVE, tdev->device.cap_mask);
+ dma_cap_set(DMA_CYCLIC, tdev->device.cap_mask);
tdev->device.dev = &pdev->dev;
tdev->device.device_alloc_chan_resources =
mmp_tdma_alloc_chan_resources;
@@ -595,6 +605,7 @@ static struct platform_driver mmp_tdma_driver = {
.driver = {
.name = "mmp-tdma",
.owner = THIS_MODULE,
+ .of_match_table = mmp_tdma_dt_ids,
},
.id_table = mmp_tdma_id_table,
.probe = mmp_tdma_probe,
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index 7f41b25..9f02e79 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -101,7 +101,8 @@ struct mxs_dma_ccw {
u32 pio_words[MXS_PIO_WORDS];
};
-#define NUM_CCW (int)(PAGE_SIZE / sizeof(struct mxs_dma_ccw))
+#define CCW_BLOCK_SIZE (4 * PAGE_SIZE)
+#define NUM_CCW (int)(CCW_BLOCK_SIZE / sizeof(struct mxs_dma_ccw))
struct mxs_dma_chan {
struct mxs_dma_engine *mxs_dma;
@@ -354,14 +355,15 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
mxs_chan->chan_irq = data->chan_irq;
- mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev, PAGE_SIZE,
- &mxs_chan->ccw_phys, GFP_KERNEL);
+ mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev,
+ CCW_BLOCK_SIZE, &mxs_chan->ccw_phys,
+ GFP_KERNEL);
if (!mxs_chan->ccw) {
ret = -ENOMEM;
goto err_alloc;
}
- memset(mxs_chan->ccw, 0, PAGE_SIZE);
+ memset(mxs_chan->ccw, 0, CCW_BLOCK_SIZE);
if (mxs_chan->chan_irq != NO_IRQ) {
ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler,
@@ -387,7 +389,7 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
err_clk:
free_irq(mxs_chan->chan_irq, mxs_dma);
err_irq:
- dma_free_coherent(mxs_dma->dma_device.dev, PAGE_SIZE,
+ dma_free_coherent(mxs_dma->dma_device.dev, CCW_BLOCK_SIZE,
mxs_chan->ccw, mxs_chan->ccw_phys);
err_alloc:
return ret;
@@ -402,7 +404,7 @@ static void mxs_dma_free_chan_resources(struct dma_chan *chan)
free_irq(mxs_chan->chan_irq, mxs_dma);
- dma_free_coherent(mxs_dma->dma_device.dev, PAGE_SIZE,
+ dma_free_coherent(mxs_dma->dma_device.dev, CCW_BLOCK_SIZE,
mxs_chan->ccw, mxs_chan->ccw_phys);
clk_disable_unprepare(mxs_dma->clk);
@@ -531,7 +533,7 @@ err_out:
static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction direction,
- void *context)
+ unsigned long flags, void *context)
{
struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index 2e16627..bb2d8e7 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -36,6 +36,7 @@ struct omap_chan {
struct dma_slave_config cfg;
unsigned dma_sig;
bool cyclic;
+ bool paused;
int dma_ch;
struct omap_desc *desc;
@@ -367,7 +368,8 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
- size_t period_len, enum dma_transfer_direction dir, void *context)
+ size_t period_len, enum dma_transfer_direction dir, unsigned long flags,
+ void *context)
{
struct omap_chan *c = to_omap_dma_chan(chan);
enum dma_slave_buswidth dev_width;
@@ -415,7 +417,10 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
d->dev_addr = dev_addr;
d->fi = burst;
d->es = es;
- d->sync_mode = OMAP_DMA_SYNC_PACKET;
+ if (burst)
+ d->sync_mode = OMAP_DMA_SYNC_PACKET;
+ else
+ d->sync_mode = OMAP_DMA_SYNC_ELEMENT;
d->sync_type = sync_type;
d->periph_port = OMAP_DMA_PORT_MPUI;
d->sg[0].addr = buf_addr;
@@ -426,7 +431,10 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
if (!c->cyclic) {
c->cyclic = true;
omap_dma_link_lch(c->dma_ch, c->dma_ch);
- omap_enable_dma_irq(c->dma_ch, OMAP_DMA_FRAME_IRQ);
+
+ if (flags & DMA_PREP_INTERRUPT)
+ omap_enable_dma_irq(c->dma_ch, OMAP_DMA_FRAME_IRQ);
+
omap_disable_dma_irq(c->dma_ch, OMAP_DMA_BLOCK_IRQ);
}
@@ -435,7 +443,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
omap_set_dma_dest_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16);
}
- return vchan_tx_prep(&c->vc, &d->vd, DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
+ return vchan_tx_prep(&c->vc, &d->vd, flags);
}
static int omap_dma_slave_config(struct omap_chan *c, struct dma_slave_config *cfg)
@@ -469,11 +477,14 @@ static int omap_dma_terminate_all(struct omap_chan *c)
*/
if (c->desc) {
c->desc = NULL;
- omap_stop_dma(c->dma_ch);
+ /* Avoid stopping the dma twice */
+ if (!c->paused)
+ omap_stop_dma(c->dma_ch);
}
if (c->cyclic) {
c->cyclic = false;
+ c->paused = false;
omap_dma_unlink_lch(c->dma_ch, c->dma_ch);
}
@@ -486,14 +497,30 @@ static int omap_dma_terminate_all(struct omap_chan *c)
static int omap_dma_pause(struct omap_chan *c)
{
- /* FIXME: not supported by platform private API */
- return -EINVAL;
+ /* Pause/Resume only allowed with cyclic mode */
+ if (!c->cyclic)
+ return -EINVAL;
+
+ if (!c->paused) {
+ omap_stop_dma(c->dma_ch);
+ c->paused = true;
+ }
+
+ return 0;
}
static int omap_dma_resume(struct omap_chan *c)
{
- /* FIXME: not supported by platform private API */
- return -EINVAL;
+ /* Pause/Resume only allowed with cyclic mode */
+ if (!c->cyclic)
+ return -EINVAL;
+
+ if (c->paused) {
+ omap_start_dma(c->dma_ch);
+ c->paused = false;
+ }
+
+ return 0;
}
static int omap_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 5d3bbcd..665668b 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -23,7 +23,6 @@
#include <linux/dmaengine.h>
#include <linux/amba/bus.h>
#include <linux/amba/pl330.h>
-#include <linux/pm_runtime.h>
#include <linux/scatterlist.h>
#include <linux/of.h>
@@ -586,8 +585,6 @@ struct dma_pl330_dmac {
/* Peripheral channels connected to this DMAC */
struct dma_pl330_chan *peripherals; /* keep at end */
-
- struct clk *clk;
};
struct dma_pl330_desc {
@@ -2395,7 +2392,7 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan)
pch->pl330_chid = pl330_request_channel(&pdmac->pif);
if (!pch->pl330_chid) {
spin_unlock_irqrestore(&pch->lock, flags);
- return 0;
+ return -ENOMEM;
}
tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
@@ -2685,7 +2682,7 @@ static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t dma_addr, size_t len,
size_t period_len, enum dma_transfer_direction direction,
- void *context)
+ unsigned long flags, void *context)
{
struct dma_pl330_desc *desc;
struct dma_pl330_chan *pch = to_pchan(chan);
@@ -2889,29 +2886,17 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
goto probe_err1;
}
- pdmac->clk = clk_get(&adev->dev, "dma");
- if (IS_ERR(pdmac->clk)) {
- dev_err(&adev->dev, "Cannot get operation clock.\n");
- ret = -EINVAL;
- goto probe_err2;
- }
-
amba_set_drvdata(adev, pdmac);
-#ifndef CONFIG_PM_RUNTIME
- /* enable dma clk */
- clk_enable(pdmac->clk);
-#endif
-
irq = adev->irq[0];
ret = request_irq(irq, pl330_irq_handler, 0,
dev_name(&adev->dev), pi);
if (ret)
- goto probe_err3;
+ goto probe_err2;
ret = pl330_add(pi);
if (ret)
- goto probe_err4;
+ goto probe_err3;
INIT_LIST_HEAD(&pdmac->desc_pool);
spin_lock_init(&pdmac->pool_lock);
@@ -2933,7 +2918,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
if (!pdmac->peripherals) {
ret = -ENOMEM;
dev_err(&adev->dev, "unable to allocate pdmac->peripherals\n");
- goto probe_err5;
+ goto probe_err4;
}
for (i = 0; i < num_chan; i++) {
@@ -2961,6 +2946,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
if (pi->pcfg.num_peri) {
dma_cap_set(DMA_SLAVE, pd->cap_mask);
dma_cap_set(DMA_CYCLIC, pd->cap_mask);
+ dma_cap_set(DMA_PRIVATE, pd->cap_mask);
}
}
@@ -2976,7 +2962,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
ret = dma_async_device_register(pd);
if (ret) {
dev_err(&adev->dev, "unable to register DMAC\n");
- goto probe_err5;
+ goto probe_err4;
}
dev_info(&adev->dev,
@@ -2989,15 +2975,10 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
return 0;
-probe_err5:
- pl330_del(pi);
probe_err4:
- free_irq(irq, pi);
+ pl330_del(pi);
probe_err3:
-#ifndef CONFIG_PM_RUNTIME
- clk_disable(pdmac->clk);
-#endif
- clk_put(pdmac->clk);
+ free_irq(irq, pi);
probe_err2:
iounmap(pi->base);
probe_err1:
@@ -3044,10 +3025,6 @@ static int __devexit pl330_remove(struct amba_device *adev)
res = &adev->res;
release_mem_region(res->start, resource_size(res));
-#ifndef CONFIG_PM_RUNTIME
- clk_disable(pdmac->clk);
-#endif
-
kfree(pdmac);
return 0;
@@ -3063,49 +3040,10 @@ static struct amba_id pl330_ids[] = {
MODULE_DEVICE_TABLE(amba, pl330_ids);
-#ifdef CONFIG_PM_RUNTIME
-static int pl330_runtime_suspend(struct device *dev)
-{
- struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev);
-
- if (!pdmac) {
- dev_err(dev, "failed to get dmac\n");
- return -ENODEV;
- }
-
- clk_disable(pdmac->clk);
-
- return 0;
-}
-
-static int pl330_runtime_resume(struct device *dev)
-{
- struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev);
-
- if (!pdmac) {
- dev_err(dev, "failed to get dmac\n");
- return -ENODEV;
- }
-
- clk_enable(pdmac->clk);
-
- return 0;
-}
-#else
-#define pl330_runtime_suspend NULL
-#define pl330_runtime_resume NULL
-#endif /* CONFIG_PM_RUNTIME */
-
-static const struct dev_pm_ops pl330_pm_ops = {
- .runtime_suspend = pl330_runtime_suspend,
- .runtime_resume = pl330_runtime_resume,
-};
-
static struct amba_driver pl330_driver = {
.drv = {
.owner = THIS_MODULE,
.name = "dma-pl330",
- .pm = &pl330_pm_ops,
},
.id_table = pl330_ids,
.probe = pl330_probe,
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c
index f5a7360..b893159 100644
--- a/drivers/dma/sa11x0-dma.c
+++ b/drivers/dma/sa11x0-dma.c
@@ -614,7 +614,7 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
static struct dma_async_tx_descriptor *sa11x0_dma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t addr, size_t size, size_t period,
- enum dma_transfer_direction dir, void *context)
+ enum dma_transfer_direction dir, unsigned long flags, void *context)
{
struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
struct sa11x0_dma_desc *txd;
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
index 434ad31..64385cd 100644
--- a/drivers/dma/sirf-dma.c
+++ b/drivers/dma/sirf-dma.c
@@ -489,7 +489,7 @@ err_dir:
static struct dma_async_tx_descriptor *
sirfsoc_dma_prep_cyclic(struct dma_chan *chan, dma_addr_t addr,
size_t buf_len, size_t period_len,
- enum dma_transfer_direction direction, void *context)
+ enum dma_transfer_direction direction, unsigned long flags, void *context)
{
struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
struct sirfsoc_dma_desc *sdesc = NULL;
@@ -570,21 +570,19 @@ static int __devinit sirfsoc_dma_probe(struct platform_device *op)
if (of_property_read_u32(dn, "cell-index", &id)) {
dev_err(dev, "Fail to get DMAC index\n");
- ret = -ENODEV;
- goto free_mem;
+ return -ENODEV;
}
sdma->irq = irq_of_parse_and_map(dn, 0);
if (sdma->irq == NO_IRQ) {
dev_err(dev, "Error mapping IRQ!\n");
- ret = -EINVAL;
- goto free_mem;
+ return -EINVAL;
}
ret = of_address_to_resource(dn, 0, &res);
if (ret) {
dev_err(dev, "Error parsing memory region!\n");
- goto free_mem;
+ goto irq_dispose;
}
regs_start = res.start;
@@ -597,12 +595,11 @@ static int __devinit sirfsoc_dma_probe(struct platform_device *op)
goto irq_dispose;
}
- ret = devm_request_irq(dev, sdma->irq, &sirfsoc_dma_irq, 0, DRV_NAME,
- sdma);
+ ret = request_irq(sdma->irq, &sirfsoc_dma_irq, 0, DRV_NAME, sdma);
if (ret) {
dev_err(dev, "Error requesting IRQ!\n");
ret = -EINVAL;
- goto unmap_mem;
+ goto irq_dispose;
}
dma = &sdma->dma;
@@ -652,13 +649,9 @@ static int __devinit sirfsoc_dma_probe(struct platform_device *op)
return 0;
free_irq:
- devm_free_irq(dev, sdma->irq, sdma);
+ free_irq(sdma->irq, sdma);
irq_dispose:
irq_dispose_mapping(sdma->irq);
-unmap_mem:
- iounmap(sdma->base);
-free_mem:
- devm_kfree(dev, sdma);
return ret;
}
@@ -668,10 +661,8 @@ static int __devexit sirfsoc_dma_remove(struct platform_device *op)
struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
dma_async_device_unregister(&sdma->dma);
- devm_free_irq(dev, sdma->irq, sdma);
+ free_irq(sdma->irq, sdma);
irq_dispose_mapping(sdma->irq);
- iounmap(sdma->base);
- devm_kfree(dev, sdma);
return 0;
}
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 000d309..ae55091 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -2347,7 +2347,8 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
static struct dma_async_tx_descriptor *
dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
size_t buf_len, size_t period_len,
- enum dma_transfer_direction direction, void *context)
+ enum dma_transfer_direction direction, unsigned long flags,
+ void *context)
{
unsigned int periods = buf_len / period_len;
struct dma_async_tx_descriptor *txd;
@@ -2920,19 +2921,23 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
struct d40_base *base = NULL;
int num_log_chans = 0;
int num_phy_chans;
+ int clk_ret = -EINVAL;
int i;
u32 pid;
u32 cid;
u8 rev;
clk = clk_get(&pdev->dev, NULL);
-
if (IS_ERR(clk)) {
d40_err(&pdev->dev, "No matching clock found\n");
goto failure;
}
- clk_enable(clk);
+ clk_ret = clk_prepare_enable(clk);
+ if (clk_ret) {
+ d40_err(&pdev->dev, "Failed to prepare/enable clock\n");
+ goto failure;
+ }
/* Get IO for DMAC base address */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
@@ -3062,10 +3067,10 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
return base;
failure:
- if (!IS_ERR(clk)) {
- clk_disable(clk);
+ if (!clk_ret)
+ clk_disable_unprepare(clk);
+ if (!IS_ERR(clk))
clk_put(clk);
- }
if (virtbase)
iounmap(virtbase);
if (res)
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index 4708467..528c62d 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -169,6 +169,7 @@ typedef void (*dma_isr_handler)(struct tegra_dma_channel *tdc,
/* tegra_dma_channel: Channel specific information */
struct tegra_dma_channel {
struct dma_chan dma_chan;
+ char name[30];
bool config_init;
int id;
int irq;
@@ -475,8 +476,7 @@ static void tegra_dma_abort_all(struct tegra_dma_channel *tdc)
while (!list_empty(&tdc->pending_sg_req)) {
sgreq = list_first_entry(&tdc->pending_sg_req,
typeof(*sgreq), node);
- list_del(&sgreq->node);
- list_add_tail(&sgreq->node, &tdc->free_sg_req);
+ list_move_tail(&sgreq->node, &tdc->free_sg_req);
if (sgreq->last_sg) {
dma_desc = sgreq->dma_desc;
dma_desc->dma_status = DMA_ERROR;
@@ -570,8 +570,7 @@ static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc,
/* If not last req then put at end of pending list */
if (!list_is_last(&sgreq->node, &tdc->pending_sg_req)) {
- list_del(&sgreq->node);
- list_add_tail(&sgreq->node, &tdc->pending_sg_req);
+ list_move_tail(&sgreq->node, &tdc->pending_sg_req);
sgreq->configured = false;
st = handle_continuous_head_request(tdc, sgreq, to_terminate);
if (!st)
@@ -990,7 +989,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction direction,
- void *context)
+ unsigned long flags, void *context)
{
struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
struct tegra_dma_desc *dma_desc = NULL;
@@ -1284,7 +1283,6 @@ static int __devinit tegra_dma_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&tdma->dma_dev.channels);
for (i = 0; i < cdata->nr_channels; i++) {
struct tegra_dma_channel *tdc = &tdma->channels[i];
- char irq_name[30];
tdc->chan_base_offset = TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET +
i * TEGRA_APBDMA_CHANNEL_REGISTER_SIZE;
@@ -1296,9 +1294,9 @@ static int __devinit tegra_dma_probe(struct platform_device *pdev)
goto err_irq;
}
tdc->irq = res->start;
- snprintf(irq_name, sizeof(irq_name), "apbdma.%d", i);
+ snprintf(tdc->name, sizeof(tdc->name), "apbdma.%d", i);
ret = devm_request_irq(&pdev->dev, tdc->irq,
- tegra_dma_isr, 0, irq_name, tdc);
+ tegra_dma_isr, 0, tdc->name, tdc);
if (ret) {
dev_err(&pdev->dev,
"request_irq failed with err %d channel %d\n",
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index aa73ef3..d055cee 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -150,6 +150,12 @@ config GPIO_MSM_V2
Qualcomm MSM chips. Most of the pins on the MSM can be
selected for GPIO, and are controlled by this driver.
+config GPIO_MVEBU
+ def_bool y
+ depends on ARCH_MVEBU
+ select GPIO_GENERIC
+ select GENERIC_IRQ_CHIP
+
config GPIO_MXC
def_bool y
depends on ARCH_MXC
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index b2c109d..9aeed67 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -42,6 +42,7 @@ obj-$(CONFIG_GPIO_MPC8XXX) += gpio-mpc8xxx.o
obj-$(CONFIG_GPIO_MSIC) += gpio-msic.o
obj-$(CONFIG_GPIO_MSM_V1) += gpio-msm-v1.o
obj-$(CONFIG_GPIO_MSM_V2) += gpio-msm-v2.o
+obj-$(CONFIG_GPIO_MVEBU) += gpio-mvebu.o
obj-$(CONFIG_GPIO_MXC) += gpio-mxc.o
obj-$(CONFIG_GPIO_MXS) += gpio-mxs.o
obj-$(CONFIG_ARCH_OMAP) += gpio-omap.o
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
new file mode 100644
index 0000000..902af43
--- /dev/null
+++ b/drivers/gpio/gpio-mvebu.c
@@ -0,0 +1,679 @@
+/*
+ * GPIO driver for Marvell SoCs
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ * Andrew Lunn <andrew@lunn.ch>
+ * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * This driver is a fairly straightforward GPIO driver for the
+ * complete family of Marvell EBU SoC platforms (Orion, Dove,
+ * Kirkwood, Discovery, Armada 370/XP). The only complexity of this
+ * driver is the different register layout that exists between the
+ * non-SMP platforms (Orion, Dove, Kirkwood, Armada 370) and the SMP
+ * platforms (MV78200 from the Discovery family and the Armada
+ * XP). Therefore, this driver handles three variants of the GPIO
+ * block:
+ * - the basic variant, called "orion-gpio", with the simplest
+ * register set. Used on Orion, Dove, Kirkwoord, Armada 370 and
+ * non-SMP Discovery systems
+ * - the mv78200 variant for MV78200 Discovery systems. This variant
+ * turns the edge mask and level mask registers into CPU0 edge
+ * mask/level mask registers, and adds CPU1 edge mask/level mask
+ * registers.
+ * - the armadaxp variant for Armada XP systems. This variant keeps
+ * the normal cause/edge mask/level mask registers when the global
+ * interrupts are used, but adds per-CPU cause/edge mask/level mask
+ * registers n a separate memory area for the per-CPU GPIO
+ * interrupts.
+ */
+
+#include <linux/module.h>
+#include <linux/gpio.h>
+#include <linux/irq.h>
+#include <linux/slab.h>
+#include <linux/irqdomain.h>
+#include <linux/io.h>
+#include <linux/of_irq.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/consumer.h>
+
+/*
+ * GPIO unit register offsets.
+ */
+#define GPIO_OUT_OFF 0x0000
+#define GPIO_IO_CONF_OFF 0x0004
+#define GPIO_BLINK_EN_OFF 0x0008
+#define GPIO_IN_POL_OFF 0x000c
+#define GPIO_DATA_IN_OFF 0x0010
+#define GPIO_EDGE_CAUSE_OFF 0x0014
+#define GPIO_EDGE_MASK_OFF 0x0018
+#define GPIO_LEVEL_MASK_OFF 0x001c
+
+/* The MV78200 has per-CPU registers for edge mask and level mask */
+#define GPIO_EDGE_MASK_MV78200_OFF(cpu) ((cpu) ? 0x30 : 0x18)
+#define GPIO_LEVEL_MASK_MV78200_OFF(cpu) ((cpu) ? 0x34 : 0x1C)
+
+/* The Armada XP has per-CPU registers for interrupt cause, interrupt
+ * mask and interrupt level mask. Those are relative to the
+ * percpu_membase. */
+#define GPIO_EDGE_CAUSE_ARMADAXP_OFF(cpu) ((cpu) * 0x4)
+#define GPIO_EDGE_MASK_ARMADAXP_OFF(cpu) (0x10 + (cpu) * 0x4)
+#define GPIO_LEVEL_MASK_ARMADAXP_OFF(cpu) (0x20 + (cpu) * 0x4)
+
+#define MVEBU_GPIO_SOC_VARIANT_ORION 0x1
+#define MVEBU_GPIO_SOC_VARIANT_MV78200 0x2
+#define MVEBU_GPIO_SOC_VARIANT_ARMADAXP 0x3
+
+#define MVEBU_MAX_GPIO_PER_BANK 32
+
+struct mvebu_gpio_chip {
+ struct gpio_chip chip;
+ spinlock_t lock;
+ void __iomem *membase;
+ void __iomem *percpu_membase;
+ unsigned int irqbase;
+ struct irq_domain *domain;
+ int soc_variant;
+};
+
+/*
+ * Functions returning addresses of individual registers for a given
+ * GPIO controller.
+ */
+static inline void __iomem *mvebu_gpioreg_out(struct mvebu_gpio_chip *mvchip)
+{
+ return mvchip->membase + GPIO_OUT_OFF;
+}
+
+static inline void __iomem *mvebu_gpioreg_io_conf(struct mvebu_gpio_chip *mvchip)
+{
+ return mvchip->membase + GPIO_IO_CONF_OFF;
+}
+
+static inline void __iomem *mvebu_gpioreg_in_pol(struct mvebu_gpio_chip *mvchip)
+{
+ return mvchip->membase + GPIO_IN_POL_OFF;
+}
+
+static inline void __iomem *mvebu_gpioreg_data_in(struct mvebu_gpio_chip *mvchip)
+{
+ return mvchip->membase + GPIO_DATA_IN_OFF;
+}
+
+static inline void __iomem *mvebu_gpioreg_edge_cause(struct mvebu_gpio_chip *mvchip)
+{
+ int cpu;
+
+ switch(mvchip->soc_variant) {
+ case MVEBU_GPIO_SOC_VARIANT_ORION:
+ case MVEBU_GPIO_SOC_VARIANT_MV78200:
+ return mvchip->membase + GPIO_EDGE_CAUSE_OFF;
+ case MVEBU_GPIO_SOC_VARIANT_ARMADAXP:
+ cpu = smp_processor_id();
+ return mvchip->percpu_membase + GPIO_EDGE_CAUSE_ARMADAXP_OFF(cpu);
+ default:
+ BUG();
+ }
+}
+
+static inline void __iomem *mvebu_gpioreg_edge_mask(struct mvebu_gpio_chip *mvchip)
+{
+ int cpu;
+
+ switch(mvchip->soc_variant) {
+ case MVEBU_GPIO_SOC_VARIANT_ORION:
+ return mvchip->membase + GPIO_EDGE_MASK_OFF;
+ case MVEBU_GPIO_SOC_VARIANT_MV78200:
+ cpu = smp_processor_id();
+ return mvchip->membase + GPIO_EDGE_MASK_MV78200_OFF(cpu);
+ case MVEBU_GPIO_SOC_VARIANT_ARMADAXP:
+ cpu = smp_processor_id();
+ return mvchip->percpu_membase + GPIO_EDGE_MASK_ARMADAXP_OFF(cpu);
+ default:
+ BUG();
+ }
+}
+
+static void __iomem *mvebu_gpioreg_level_mask(struct mvebu_gpio_chip *mvchip)
+{
+ int cpu;
+
+ switch(mvchip->soc_variant) {
+ case MVEBU_GPIO_SOC_VARIANT_ORION:
+ return mvchip->membase + GPIO_LEVEL_MASK_OFF;
+ case MVEBU_GPIO_SOC_VARIANT_MV78200:
+ cpu = smp_processor_id();
+ return mvchip->membase + GPIO_LEVEL_MASK_MV78200_OFF(cpu);
+ case MVEBU_GPIO_SOC_VARIANT_ARMADAXP:
+ cpu = smp_processor_id();
+ return mvchip->percpu_membase + GPIO_LEVEL_MASK_ARMADAXP_OFF(cpu);
+ default:
+ BUG();
+ }
+}
+
+/*
+ * Functions implementing the gpio_chip methods
+ */
+
+int mvebu_gpio_request(struct gpio_chip *chip, unsigned pin)
+{
+ return pinctrl_request_gpio(chip->base + pin);
+}
+
+void mvebu_gpio_free(struct gpio_chip *chip, unsigned pin)
+{
+ pinctrl_free_gpio(chip->base + pin);
+}
+
+static void mvebu_gpio_set(struct gpio_chip *chip, unsigned pin, int value)
+{
+ struct mvebu_gpio_chip *mvchip =
+ container_of(chip, struct mvebu_gpio_chip, chip);
+ unsigned long flags;
+ u32 u;
+
+ spin_lock_irqsave(&mvchip->lock, flags);
+ u = readl_relaxed(mvebu_gpioreg_out(mvchip));
+ if (value)
+ u |= 1 << pin;
+ else
+ u &= ~(1 << pin);
+ writel_relaxed(u, mvebu_gpioreg_out(mvchip));
+ spin_unlock_irqrestore(&mvchip->lock, flags);
+}
+
+static int mvebu_gpio_get(struct gpio_chip *chip, unsigned pin)
+{
+ struct mvebu_gpio_chip *mvchip =
+ container_of(chip, struct mvebu_gpio_chip, chip);
+ u32 u;
+
+ if (readl_relaxed(mvebu_gpioreg_io_conf(mvchip)) & (1 << pin)) {
+ u = readl_relaxed(mvebu_gpioreg_data_in(mvchip)) ^
+ readl_relaxed(mvebu_gpioreg_in_pol(mvchip));
+ } else {
+ u = readl_relaxed(mvebu_gpioreg_out(mvchip));
+ }
+
+ return (u >> pin) & 1;
+}
+
+static int mvebu_gpio_direction_input(struct gpio_chip *chip, unsigned pin)
+{
+ struct mvebu_gpio_chip *mvchip =
+ container_of(chip, struct mvebu_gpio_chip, chip);
+ unsigned long flags;
+ int ret;
+ u32 u;
+
+ /* Check with the pinctrl driver whether this pin is usable as
+ * an input GPIO */
+ ret = pinctrl_gpio_direction_input(chip->base + pin);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&mvchip->lock, flags);
+ u = readl_relaxed(mvebu_gpioreg_io_conf(mvchip));
+ u |= 1 << pin;
+ writel_relaxed(u, mvebu_gpioreg_io_conf(mvchip));
+ spin_unlock_irqrestore(&mvchip->lock, flags);
+
+ return 0;
+}
+
+static int mvebu_gpio_direction_output(struct gpio_chip *chip, unsigned pin,
+ int value)
+{
+ struct mvebu_gpio_chip *mvchip =
+ container_of(chip, struct mvebu_gpio_chip, chip);
+ unsigned long flags;
+ int ret;
+ u32 u;
+
+ /* Check with the pinctrl driver whether this pin is usable as
+ * an output GPIO */
+ ret = pinctrl_gpio_direction_output(chip->base + pin);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&mvchip->lock, flags);
+ u = readl_relaxed(mvebu_gpioreg_io_conf(mvchip));
+ u &= ~(1 << pin);
+ writel_relaxed(u, mvebu_gpioreg_io_conf(mvchip));
+ spin_unlock_irqrestore(&mvchip->lock, flags);
+
+ return 0;
+}
+
+static int mvebu_gpio_to_irq(struct gpio_chip *chip, unsigned pin)
+{
+ struct mvebu_gpio_chip *mvchip =
+ container_of(chip, struct mvebu_gpio_chip, chip);
+ return irq_create_mapping(mvchip->domain, pin);
+}
+
+/*
+ * Functions implementing the irq_chip methods
+ */
+static void mvebu_gpio_irq_ack(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct mvebu_gpio_chip *mvchip = gc->private;
+ u32 mask = ~(1 << (d->irq - gc->irq_base));
+
+ irq_gc_lock(gc);
+ writel_relaxed(mask, mvebu_gpioreg_edge_cause(mvchip));
+ irq_gc_unlock(gc);
+}
+
+static void mvebu_gpio_edge_irq_mask(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct mvebu_gpio_chip *mvchip = gc->private;
+ u32 mask = 1 << (d->irq - gc->irq_base);
+
+ irq_gc_lock(gc);
+ gc->mask_cache &= ~mask;
+ writel_relaxed(gc->mask_cache, mvebu_gpioreg_edge_mask(mvchip));
+ irq_gc_unlock(gc);
+}
+
+static void mvebu_gpio_edge_irq_unmask(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct mvebu_gpio_chip *mvchip = gc->private;
+ u32 mask = 1 << (d->irq - gc->irq_base);
+
+ irq_gc_lock(gc);
+ gc->mask_cache |= mask;
+ writel_relaxed(gc->mask_cache, mvebu_gpioreg_edge_mask(mvchip));
+ irq_gc_unlock(gc);
+}
+
+static void mvebu_gpio_level_irq_mask(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct mvebu_gpio_chip *mvchip = gc->private;
+ u32 mask = 1 << (d->irq - gc->irq_base);
+
+ irq_gc_lock(gc);
+ gc->mask_cache &= ~mask;
+ writel_relaxed(gc->mask_cache, mvebu_gpioreg_level_mask(mvchip));
+ irq_gc_unlock(gc);
+}
+
+static void mvebu_gpio_level_irq_unmask(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct mvebu_gpio_chip *mvchip = gc->private;
+ u32 mask = 1 << (d->irq - gc->irq_base);
+
+ irq_gc_lock(gc);
+ gc->mask_cache |= mask;
+ writel_relaxed(gc->mask_cache, mvebu_gpioreg_level_mask(mvchip));
+ irq_gc_unlock(gc);
+}
+
+/*****************************************************************************
+ * MVEBU GPIO IRQ
+ *
+ * GPIO_IN_POL register controls whether GPIO_DATA_IN will hold the same
+ * value of the line or the opposite value.
+ *
+ * Level IRQ handlers: DATA_IN is used directly as cause register.
+ * Interrupt are masked by LEVEL_MASK registers.
+ * Edge IRQ handlers: Change in DATA_IN are latched in EDGE_CAUSE.
+ * Interrupt are masked by EDGE_MASK registers.
+ * Both-edge handlers: Similar to regular Edge handlers, but also swaps
+ * the polarity to catch the next line transaction.
+ * This is a race condition that might not perfectly
+ * work on some use cases.
+ *
+ * Every eight GPIO lines are grouped (OR'ed) before going up to main
+ * cause register.
+ *
+ * EDGE cause mask
+ * data-in /--------| |-----| |----\
+ * -----| |----- ---- to main cause reg
+ * X \----------------| |----/
+ * polarity LEVEL mask
+ *
+ ****************************************************************************/
+
+static int mvebu_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
+ struct mvebu_gpio_chip *mvchip = gc->private;
+ int pin;
+ u32 u;
+
+ pin = d->hwirq;
+
+ u = readl_relaxed(mvebu_gpioreg_io_conf(mvchip)) & (1 << pin);
+ if (!u) {
+ return -EINVAL;
+ }
+
+ type &= IRQ_TYPE_SENSE_MASK;
+ if (type == IRQ_TYPE_NONE)
+ return -EINVAL;
+
+ /* Check if we need to change chip and handler */
+ if (!(ct->type & type))
+ if (irq_setup_alt_chip(d, type))
+ return -EINVAL;
+
+ /*
+ * Configure interrupt polarity.
+ */
+ switch(type) {
+ case IRQ_TYPE_EDGE_RISING:
+ case IRQ_TYPE_LEVEL_HIGH:
+ u = readl_relaxed(mvebu_gpioreg_in_pol(mvchip));
+ u &= ~(1 << pin);
+ writel_relaxed(u, mvebu_gpioreg_in_pol(mvchip));
+ case IRQ_TYPE_EDGE_FALLING:
+ case IRQ_TYPE_LEVEL_LOW:
+ u = readl_relaxed(mvebu_gpioreg_in_pol(mvchip));
+ u |= 1 << pin;
+ writel_relaxed(u, mvebu_gpioreg_in_pol(mvchip));
+ case IRQ_TYPE_EDGE_BOTH: {
+ u32 v;
+
+ v = readl_relaxed(mvebu_gpioreg_in_pol(mvchip)) ^
+ readl_relaxed(mvebu_gpioreg_data_in(mvchip));
+
+ /*
+ * set initial polarity based on current input level
+ */
+ u = readl_relaxed(mvebu_gpioreg_in_pol(mvchip));
+ if (v & (1 << pin))
+ u |= 1 << pin; /* falling */
+ else
+ u &= ~(1 << pin); /* rising */
+ writel_relaxed(u, mvebu_gpioreg_in_pol(mvchip));
+ }
+ }
+ return 0;
+}
+
+static void mvebu_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+ struct mvebu_gpio_chip *mvchip = irq_get_handler_data(irq);
+ u32 cause, type;
+ int i;
+
+ if (mvchip == NULL)
+ return;
+
+ cause = readl_relaxed(mvebu_gpioreg_data_in(mvchip)) &
+ readl_relaxed(mvebu_gpioreg_level_mask(mvchip));
+ cause |= readl_relaxed(mvebu_gpioreg_edge_cause(mvchip)) &
+ readl_relaxed(mvebu_gpioreg_edge_mask(mvchip));
+
+ for (i = 0; i < mvchip->chip.ngpio; i++) {
+ int irq;
+
+ irq = mvchip->irqbase + i;
+
+ if (!(cause & (1 << i)))
+ continue;
+
+ type = irqd_get_trigger_type(irq_get_irq_data(irq));
+ if ((type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) {
+ /* Swap polarity (race with GPIO line) */
+ u32 polarity;
+
+ polarity = readl_relaxed(mvebu_gpioreg_in_pol(mvchip));
+ polarity ^= 1 << i;
+ writel_relaxed(polarity, mvebu_gpioreg_in_pol(mvchip));
+ }
+ generic_handle_irq(irq);
+ }
+}
+
+static struct platform_device_id mvebu_gpio_ids[] = {
+ {
+ .name = "orion-gpio",
+ }, {
+ .name = "mv78200-gpio",
+ }, {
+ .name = "armadaxp-gpio",
+ }, {
+ /* sentinel */
+ },
+};
+MODULE_DEVICE_TABLE(platform, mvebu_gpio_ids);
+
+static struct of_device_id mvebu_gpio_of_match[] __devinitdata = {
+ {
+ .compatible = "marvell,orion-gpio",
+ .data = (void*) MVEBU_GPIO_SOC_VARIANT_ORION,
+ },
+ {
+ .compatible = "marvell,mv78200-gpio",
+ .data = (void*) MVEBU_GPIO_SOC_VARIANT_MV78200,
+ },
+ {
+ .compatible = "marvell,armadaxp-gpio",
+ .data = (void*) MVEBU_GPIO_SOC_VARIANT_ARMADAXP,
+ },
+ {
+ /* sentinel */
+ },
+};
+MODULE_DEVICE_TABLE(of, mvebu_gpio_of_match);
+
+static int __devinit mvebu_gpio_probe(struct platform_device *pdev)
+{
+ struct mvebu_gpio_chip *mvchip;
+ const struct of_device_id *match;
+ struct device_node *np = pdev->dev.of_node;
+ struct resource *res;
+ struct irq_chip_generic *gc;
+ struct irq_chip_type *ct;
+ unsigned int ngpios;
+ int soc_variant;
+ int i, cpu, id;
+
+ match = of_match_device(mvebu_gpio_of_match, &pdev->dev);
+ if (match)
+ soc_variant = (int) match->data;
+ else
+ soc_variant = MVEBU_GPIO_SOC_VARIANT_ORION;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (! res) {
+ dev_err(&pdev->dev, "Cannot get memory resource\n");
+ return -ENODEV;
+ }
+
+ mvchip = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_gpio_chip), GFP_KERNEL);
+ if (! mvchip){
+ dev_err(&pdev->dev, "Cannot allocate memory\n");
+ return -ENOMEM;
+ }
+
+ if (of_property_read_u32(pdev->dev.of_node, "ngpios", &ngpios)) {
+ dev_err(&pdev->dev, "Missing ngpios OF property\n");
+ return -ENODEV;
+ }
+
+ id = of_alias_get_id(pdev->dev.of_node, "gpio");
+ if (id < 0) {
+ dev_err(&pdev->dev, "Couldn't get OF id\n");
+ return id;
+ }
+
+ mvchip->soc_variant = soc_variant;
+ mvchip->chip.label = dev_name(&pdev->dev);
+ mvchip->chip.dev = &pdev->dev;
+ mvchip->chip.request = mvebu_gpio_request;
+ mvchip->chip.direction_input = mvebu_gpio_direction_input;
+ mvchip->chip.get = mvebu_gpio_get;
+ mvchip->chip.direction_output = mvebu_gpio_direction_output;
+ mvchip->chip.set = mvebu_gpio_set;
+ mvchip->chip.to_irq = mvebu_gpio_to_irq;
+ mvchip->chip.base = id * MVEBU_MAX_GPIO_PER_BANK;
+ mvchip->chip.ngpio = ngpios;
+ mvchip->chip.can_sleep = 0;
+#ifdef CONFIG_OF
+ mvchip->chip.of_node = np;
+#endif
+
+ spin_lock_init(&mvchip->lock);
+ mvchip->membase = devm_request_and_ioremap(&pdev->dev, res);
+ if (! mvchip->membase) {
+ dev_err(&pdev->dev, "Cannot ioremap\n");
+ kfree(mvchip->chip.label);
+ return -ENOMEM;
+ }
+
+ /* The Armada XP has a second range of registers for the
+ * per-CPU registers */
+ if (soc_variant == MVEBU_GPIO_SOC_VARIANT_ARMADAXP) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (! res) {
+ dev_err(&pdev->dev, "Cannot get memory resource\n");
+ kfree(mvchip->chip.label);
+ return -ENODEV;
+ }
+
+ mvchip->percpu_membase = devm_request_and_ioremap(&pdev->dev, res);
+ if (! mvchip->percpu_membase) {
+ dev_err(&pdev->dev, "Cannot ioremap\n");
+ kfree(mvchip->chip.label);
+ return -ENOMEM;
+ }
+ }
+
+ /*
+ * Mask and clear GPIO interrupts.
+ */
+ switch(soc_variant) {
+ case MVEBU_GPIO_SOC_VARIANT_ORION:
+ writel_relaxed(0, mvchip->membase + GPIO_EDGE_CAUSE_OFF);
+ writel_relaxed(0, mvchip->membase + GPIO_EDGE_MASK_OFF);
+ writel_relaxed(0, mvchip->membase + GPIO_LEVEL_MASK_OFF);
+ break;
+ case MVEBU_GPIO_SOC_VARIANT_MV78200:
+ writel_relaxed(0, mvchip->membase + GPIO_EDGE_CAUSE_OFF);
+ for (cpu = 0; cpu < 2; cpu++) {
+ writel_relaxed(0, mvchip->membase +
+ GPIO_EDGE_MASK_MV78200_OFF(cpu));
+ writel_relaxed(0, mvchip->membase +
+ GPIO_LEVEL_MASK_MV78200_OFF(cpu));
+ }
+ break;
+ case MVEBU_GPIO_SOC_VARIANT_ARMADAXP:
+ writel_relaxed(0, mvchip->membase + GPIO_EDGE_CAUSE_OFF);
+ writel_relaxed(0, mvchip->membase + GPIO_EDGE_MASK_OFF);
+ writel_relaxed(0, mvchip->membase + GPIO_LEVEL_MASK_OFF);
+ for (cpu = 0; cpu < 4; cpu++) {
+ writel_relaxed(0, mvchip->percpu_membase +
+ GPIO_EDGE_CAUSE_ARMADAXP_OFF(cpu));
+ writel_relaxed(0, mvchip->percpu_membase +
+ GPIO_EDGE_MASK_ARMADAXP_OFF(cpu));
+ writel_relaxed(0, mvchip->percpu_membase +
+ GPIO_LEVEL_MASK_ARMADAXP_OFF(cpu));
+ }
+ break;
+ default:
+ BUG();
+ }
+
+ gpiochip_add(&mvchip->chip);
+
+ /* Some gpio controllers do not provide irq support */
+ if (!of_irq_count(np))
+ return 0;
+
+ /* Setup the interrupt handlers. Each chip can have up to 4
+ * interrupt handlers, with each handler dealing with 8 GPIO
+ * pins. */
+ for (i = 0; i < 4; i++) {
+ int irq;
+ irq = platform_get_irq(pdev, i);
+ if (irq < 0)
+ continue;
+ irq_set_handler_data(irq, mvchip);
+ irq_set_chained_handler(irq, mvebu_gpio_irq_handler);
+ }
+
+ mvchip->irqbase = irq_alloc_descs(-1, 0, ngpios, -1);
+ if (mvchip->irqbase < 0) {
+ dev_err(&pdev->dev, "no irqs\n");
+ kfree(mvchip->chip.label);
+ return -ENOMEM;
+ }
+
+ gc = irq_alloc_generic_chip("mvebu_gpio_irq", 2, mvchip->irqbase,
+ mvchip->membase, handle_level_irq);
+ if (! gc) {
+ dev_err(&pdev->dev, "Cannot allocate generic irq_chip\n");
+ kfree(mvchip->chip.label);
+ return -ENOMEM;
+ }
+
+ gc->private = mvchip;
+ ct = &gc->chip_types[0];
+ ct->type = IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW;
+ ct->chip.irq_mask = mvebu_gpio_level_irq_mask;
+ ct->chip.irq_unmask = mvebu_gpio_level_irq_unmask;
+ ct->chip.irq_set_type = mvebu_gpio_irq_set_type;
+ ct->chip.name = mvchip->chip.label;
+
+ ct = &gc->chip_types[1];
+ ct->type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
+ ct->chip.irq_ack = mvebu_gpio_irq_ack;
+ ct->chip.irq_mask = mvebu_gpio_edge_irq_mask;
+ ct->chip.irq_unmask = mvebu_gpio_edge_irq_unmask;
+ ct->chip.irq_set_type = mvebu_gpio_irq_set_type;
+ ct->handler = handle_edge_irq;
+ ct->chip.name = mvchip->chip.label;
+
+ irq_setup_generic_chip(gc, IRQ_MSK(ngpios), IRQ_GC_INIT_MASK_CACHE,
+ IRQ_NOREQUEST, IRQ_LEVEL | IRQ_NOPROBE);
+
+ /* Setup irq domain on top of the generic chip. */
+ mvchip->domain = irq_domain_add_legacy(np, mvchip->chip.ngpio,
+ mvchip->irqbase, 0,
+ &irq_domain_simple_ops,
+ mvchip);
+ if (!mvchip->domain) {
+ dev_err(&pdev->dev, "couldn't allocate irq domain %s (DT).\n",
+ mvchip->chip.label);
+ irq_remove_generic_chip(gc, IRQ_MSK(ngpios), IRQ_NOREQUEST,
+ IRQ_LEVEL | IRQ_NOPROBE);
+ kfree(gc);
+ kfree(mvchip->chip.label);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static struct platform_driver mvebu_gpio_driver = {
+ .driver = {
+ .name = "mvebu-gpio",
+ .owner = THIS_MODULE,
+ .of_match_table = mvebu_gpio_of_match,
+ },
+ .probe = mvebu_gpio_probe,
+ .id_table = mvebu_gpio_ids,
+};
+
+static int __init mvebu_gpio_init(void)
+{
+ return platform_driver_register(&mvebu_gpio_driver);
+}
+postcore_initcall(mvebu_gpio_init);
diff --git a/drivers/gpio/gpio-stp-xway.c b/drivers/gpio/gpio-stp-xway.c
index e35096b..8bead0b 100644
--- a/drivers/gpio/gpio-stp-xway.c
+++ b/drivers/gpio/gpio-stp-xway.c
@@ -82,7 +82,7 @@ struct xway_stp {
struct gpio_chip gc;
void __iomem *virt;
u32 edge; /* rising or falling edge triggered shift register */
- u16 shadow; /* shadow the shift registers state */
+ u32 shadow; /* shadow the shift registers state */
u8 groups; /* we can drive 1-3 groups of 8bit each */
u8 dsl; /* the 2 LSBs can be driven by the dsl core */
u8 phy1; /* 3 bits can be driven by phy1 */
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 92177d5..24efae4 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -706,7 +706,7 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
goto out_unlock;
}
- vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
+ vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_ops = obj->dev->driver->gem_vm_ops;
vma->vm_private_data = map->handle;
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 23a824e..db7bd29 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -514,8 +514,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
vma->vm_ops = &drm_vm_dma_ops;
- vma->vm_flags |= VM_RESERVED; /* Don't swap */
- vma->vm_flags |= VM_DONTEXPAND;
+ vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
drm_vm_open_locked(dev, vma);
return 0;
@@ -643,21 +642,16 @@ int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
case _DRM_SHM:
vma->vm_ops = &drm_vm_shm_ops;
vma->vm_private_data = (void *)map;
- /* Don't let this area swap. Change when
- DRM_KERNEL advisory is supported. */
- vma->vm_flags |= VM_RESERVED;
break;
case _DRM_SCATTER_GATHER:
vma->vm_ops = &drm_vm_sg_ops;
vma->vm_private_data = (void *)map;
- vma->vm_flags |= VM_RESERVED;
vma->vm_page_prot = drm_dma_prot(map->type, vma);
break;
default:
return -EINVAL; /* This should never happen. */
}
- vma->vm_flags |= VM_RESERVED; /* Don't swap */
- vma->vm_flags |= VM_DONTEXPAND;
+ vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
drm_vm_open_locked(dev, vma);
return 0;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index fcdbe46..d254556 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -500,7 +500,7 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
DRM_DEBUG_KMS("%s\n", __FILE__);
- vma->vm_flags |= (VM_IO | VM_RESERVED);
+ vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
update_vm_cache_attr(exynos_gem_obj, vma);
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 884ba73..afded54 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -178,8 +178,7 @@ static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
*/
vma->vm_ops = &psbfb_vm_ops;
vma->vm_private_data = (void *)psbfb;
- vma->vm_flags |= VM_RESERVED | VM_IO |
- VM_MIXEDMAP | VM_DONTEXPAND;
+ vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
return 0;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index a877813..3ba72db 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -285,7 +285,7 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
*/
vma->vm_private_data = bo;
- vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
+ vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
return 0;
out_unref:
ttm_bo_unref(&bo);
@@ -300,7 +300,7 @@ int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
vma->vm_ops = &ttm_bo_vm_ops;
vma->vm_private_data = ttm_bo_reference(bo);
- vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
+ vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
return 0;
}
EXPORT_SYMBOL(ttm_fbdev_mmap);
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index 67df842..69a2b16 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -243,7 +243,7 @@ static int udl_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
size = 0;
}
- vma->vm_flags |= VM_RESERVED; /* avoid to swap out this VMA */
+ /* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */
return 0;
}
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index 2f4b01b..36509ae 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -31,6 +31,8 @@
#include <linux/hwmon.h>
#include <linux/gpio.h>
#include <linux/gpio-fan.h>
+#include <linux/of_platform.h>
+#include <linux/of_gpio.h>
struct gpio_fan_data {
struct platform_device *pdev;
@@ -400,14 +402,131 @@ static ssize_t show_name(struct device *dev,
static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+
+#ifdef CONFIG_OF_GPIO
+/*
+ * Translate OpenFirmware node properties into platform_data
+ */
+static int gpio_fan_get_of_pdata(struct device *dev,
+ struct gpio_fan_platform_data *pdata)
+{
+ struct device_node *node;
+ struct gpio_fan_speed *speed;
+ unsigned *ctrl;
+ unsigned i;
+ u32 u;
+ struct property *prop;
+ const __be32 *p;
+
+ node = dev->of_node;
+
+ /* Fill GPIO pin array */
+ pdata->num_ctrl = of_gpio_count(node);
+ if (!pdata->num_ctrl) {
+ dev_err(dev, "gpios DT property empty / missing");
+ return -ENODEV;
+ }
+ ctrl = devm_kzalloc(dev, pdata->num_ctrl * sizeof(unsigned),
+ GFP_KERNEL);
+ if (!ctrl)
+ return -ENOMEM;
+ for (i = 0; i < pdata->num_ctrl; i++) {
+ int val;
+
+ val = of_get_gpio(node, i);
+ if (val < 0)
+ return val;
+ ctrl[i] = val;
+ }
+ pdata->ctrl = ctrl;
+
+ /* Get number of RPM/ctrl_val pairs in speed map */
+ prop = of_find_property(node, "gpio-fan,speed-map", &i);
+ if (!prop) {
+ dev_err(dev, "gpio-fan,speed-map DT property missing");
+ return -ENODEV;
+ }
+ i = i / sizeof(u32);
+ if (i == 0 || i & 1) {
+ dev_err(dev, "gpio-fan,speed-map contains zero/odd number of entries");
+ return -ENODEV;
+ }
+ pdata->num_speed = i / 2;
+
+ /*
+ * Populate speed map
+ * Speed map is in the form <RPM ctrl_val RPM ctrl_val ...>
+ * this needs splitting into pairs to create gpio_fan_speed structs
+ */
+ speed = devm_kzalloc(dev,
+ pdata->num_speed * sizeof(struct gpio_fan_speed),
+ GFP_KERNEL);
+ if (!speed)
+ return -ENOMEM;
+ p = NULL;
+ for (i = 0; i < pdata->num_speed; i++) {
+ p = of_prop_next_u32(prop, p, &u);
+ if (!p)
+ return -ENODEV;
+ speed[i].rpm = u;
+ p = of_prop_next_u32(prop, p, &u);
+ if (!p)
+ return -ENODEV;
+ speed[i].ctrl_val = u;
+ }
+ pdata->speed = speed;
+
+ /* Alarm GPIO if one exists */
+ if (of_gpio_named_count(node, "alarm-gpios")) {
+ struct gpio_fan_alarm *alarm;
+ int val;
+ enum of_gpio_flags flags;
+
+ alarm = devm_kzalloc(dev, sizeof(struct gpio_fan_alarm),
+ GFP_KERNEL);
+ if (!alarm)
+ return -ENOMEM;
+
+ val = of_get_named_gpio_flags(node, "alarm-gpios", 0, &flags);
+ if (val < 0)
+ return val;
+ alarm->gpio = val;
+ alarm->active_low = flags & OF_GPIO_ACTIVE_LOW;
+
+ pdata->alarm = alarm;
+ }
+
+ return 0;
+}
+
+static struct of_device_id of_gpio_fan_match[] __devinitdata = {
+ { .compatible = "gpio-fan", },
+ {},
+};
+#endif /* CONFIG_OF_GPIO */
+
static int __devinit gpio_fan_probe(struct platform_device *pdev)
{
int err;
struct gpio_fan_data *fan_data;
struct gpio_fan_platform_data *pdata = pdev->dev.platform_data;
+#ifdef CONFIG_OF_GPIO
+ if (!pdata) {
+ pdata = devm_kzalloc(&pdev->dev,
+ sizeof(struct gpio_fan_platform_data),
+ GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ err = gpio_fan_get_of_pdata(&pdev->dev, pdata);
+ if (err)
+ return err;
+ }
+#else /* CONFIG_OF_GPIO */
if (!pdata)
return -EINVAL;
+#endif /* CONFIG_OF_GPIO */
fan_data = devm_kzalloc(&pdev->dev, sizeof(struct gpio_fan_data),
GFP_KERNEL);
@@ -511,6 +630,7 @@ static struct platform_driver gpio_fan_driver = {
.driver = {
.name = "gpio-fan",
.pm = GPIO_FAN_PM,
+ .of_match_table = of_match_ptr(of_gpio_fan_match),
},
};
diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig
index 5a3bb3d..2f8c76b 100644
--- a/drivers/i2c/Kconfig
+++ b/drivers/i2c/Kconfig
@@ -4,7 +4,7 @@
menuconfig I2C
tristate "I2C support"
- depends on HAS_IOMEM
+ depends on !S390
select RT_MUTEXES
---help---
I2C (pronounce: I-squared-C) is a slow serial bus protocol used in
@@ -49,6 +49,7 @@ config I2C_CHARDEV
config I2C_MUX
tristate "I2C bus multiplexing support"
+ depends on HAS_IOMEM
help
Say Y here if you want the I2C core to support the ability to
handle multiplexed I2C bus topologies, by presenting each
@@ -86,6 +87,19 @@ config I2C_SMBUS
source drivers/i2c/algos/Kconfig
source drivers/i2c/busses/Kconfig
+config I2C_STUB
+ tristate "I2C/SMBus Test Stub"
+ depends on EXPERIMENTAL && m
+ default 'n'
+ help
+ This module may be useful to developers of SMBus client drivers,
+ especially for certain kinds of sensor chips.
+
+ If you do build this module, be sure to read the notes and warnings
+ in <file:Documentation/i2c/i2c-stub>.
+
+ If you don't know what to do here, definitely say N.
+
config I2C_DEBUG_CORE
bool "I2C Core debugging messages"
help
@@ -103,6 +117,7 @@ config I2C_DEBUG_ALGO
config I2C_DEBUG_BUS
bool "I2C Bus debugging messages"
+ depends on HAS_IOMEM
help
Say Y here if you want the I2C bus drivers to produce a bunch of
debug messages to the system log. Select this if you are having
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 42d9fdd..ff01c38 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -3,6 +3,7 @@
#
menu "I2C Hardware Bus support"
+ depends on HAS_IOMEM
comment "PC SMBus host controller drivers"
depends on PCI
@@ -80,6 +81,7 @@ config I2C_I801
tristate "Intel 82801 (ICH/PCH)"
depends on PCI
select CHECK_SIGNATURE if X86 && DMI
+ select GPIOLIB if I2C_MUX
help
If you say yes to this option, support will be included for the Intel
801 family of mainboard I2C interfaces. Specifically, the following
@@ -224,7 +226,7 @@ config I2C_VIA
will be called i2c-via.
config I2C_VIAPRO
- tristate "VIA VT82C596/82C686/82xx and CX700/VX8xx"
+ tristate "VIA VT82C596/82C686/82xx and CX700/VX8xx/VX900"
depends on PCI
help
If you say yes to this option, support will be included for the VIA
@@ -240,6 +242,7 @@ config I2C_VIAPRO
CX700
VX800/VX820
VX855/VX875
+ VX900
This driver can also be built as a module. If so, the module
will be called i2c-viapro.
@@ -849,19 +852,6 @@ config I2C_SIBYTE
help
Supports the SiByte SOC on-chip I2C interfaces (2 channels).
-config I2C_STUB
- tristate "I2C/SMBus Test Stub"
- depends on EXPERIMENTAL && m
- default 'n'
- help
- This module may be useful to developers of SMBus client drivers,
- especially for certain kinds of sensor chips.
-
- If you do build this module, be sure to read the notes and warnings
- in <file:Documentation/i2c/i2c-stub>.
-
- If you don't know what to do here, definitely say N.
-
config SCx200_I2C
tristate "NatSemi SCx200 I2C using GPIO pins (DEPRECATED)"
depends on SCx200_GPIO
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index 7b8ebbe..cbba7db 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -370,7 +370,7 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
* messages into the tx buffer. Even if the size of i2c_msg data is
* longer than the size of the tx buffer, it handles everything.
*/
-void
+static void
i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
{
struct i2c_msg *msgs = dev->msgs;
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 33e9b0c..3779315 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -80,6 +80,13 @@
#include <linux/dmi.h>
#include <linux/slab.h>
#include <linux/wait.h>
+#include <linux/err.h>
+
+#if defined CONFIG_I2C_MUX || defined CONFIG_I2C_MUX_MODULE
+#include <linux/gpio.h>
+#include <linux/i2c-mux-gpio.h>
+#include <linux/platform_device.h>
+#endif
/* I801 SMBus address offsets */
#define SMBHSTSTS(p) (0 + (p)->smba)
@@ -158,6 +165,15 @@
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_SMBUS 0x8c22
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_SMBUS 0x9c22
+struct i801_mux_config {
+ char *gpio_chip;
+ unsigned values[3];
+ int n_values;
+ unsigned classes[3];
+ unsigned gpios[2]; /* Relative to gpio_chip->base */
+ int n_gpios;
+};
+
struct i801_priv {
struct i2c_adapter adapter;
unsigned long smba;
@@ -175,6 +191,11 @@ struct i801_priv {
int count;
int len;
u8 *data;
+
+#if defined CONFIG_I2C_MUX || defined CONFIG_I2C_MUX_MODULE
+ const struct i801_mux_config *mux_drvdata;
+ struct platform_device *mux_pdev;
+#endif
};
static struct pci_driver i801_driver;
@@ -900,6 +921,165 @@ static void __init input_apanel_init(void) {}
static void __devinit i801_probe_optional_slaves(struct i801_priv *priv) {}
#endif /* CONFIG_X86 && CONFIG_DMI */
+#if defined CONFIG_I2C_MUX || defined CONFIG_I2C_MUX_MODULE
+static struct i801_mux_config i801_mux_config_asus_z8_d12 = {
+ .gpio_chip = "gpio_ich",
+ .values = { 0x02, 0x03 },
+ .n_values = 2,
+ .classes = { I2C_CLASS_SPD, I2C_CLASS_SPD },
+ .gpios = { 52, 53 },
+ .n_gpios = 2,
+};
+
+static struct i801_mux_config i801_mux_config_asus_z8_d18 = {
+ .gpio_chip = "gpio_ich",
+ .values = { 0x02, 0x03, 0x01 },
+ .n_values = 3,
+ .classes = { I2C_CLASS_SPD, I2C_CLASS_SPD, I2C_CLASS_SPD },
+ .gpios = { 52, 53 },
+ .n_gpios = 2,
+};
+
+static struct dmi_system_id __devinitdata mux_dmi_table[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "Z8NA-D6(C)"),
+ },
+ .driver_data = &i801_mux_config_asus_z8_d12,
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "Z8P(N)E-D12(X)"),
+ },
+ .driver_data = &i801_mux_config_asus_z8_d12,
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "Z8NH-D12"),
+ },
+ .driver_data = &i801_mux_config_asus_z8_d12,
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "Z8PH-D12/IFB"),
+ },
+ .driver_data = &i801_mux_config_asus_z8_d12,
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "Z8NR-D12"),
+ },
+ .driver_data = &i801_mux_config_asus_z8_d12,
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "Z8P(N)H-D12"),
+ },
+ .driver_data = &i801_mux_config_asus_z8_d12,
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "Z8PG-D18"),
+ },
+ .driver_data = &i801_mux_config_asus_z8_d18,
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "Z8PE-D18"),
+ },
+ .driver_data = &i801_mux_config_asus_z8_d18,
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "Z8PS-D12"),
+ },
+ .driver_data = &i801_mux_config_asus_z8_d12,
+ },
+ { }
+};
+
+/* Setup multiplexing if needed */
+static int __devinit i801_add_mux(struct i801_priv *priv)
+{
+ struct device *dev = &priv->adapter.dev;
+ const struct i801_mux_config *mux_config;
+ struct i2c_mux_gpio_platform_data gpio_data;
+ int err;
+
+ if (!priv->mux_drvdata)
+ return 0;
+ mux_config = priv->mux_drvdata;
+
+ /* Prepare the platform data */
+ memset(&gpio_data, 0, sizeof(struct i2c_mux_gpio_platform_data));
+ gpio_data.parent = priv->adapter.nr;
+ gpio_data.values = mux_config->values;
+ gpio_data.n_values = mux_config->n_values;
+ gpio_data.classes = mux_config->classes;
+ gpio_data.gpio_chip = mux_config->gpio_chip;
+ gpio_data.gpios = mux_config->gpios;
+ gpio_data.n_gpios = mux_config->n_gpios;
+ gpio_data.idle = I2C_MUX_GPIO_NO_IDLE;
+
+ /* Register the mux device */
+ priv->mux_pdev = platform_device_register_data(dev, "i2c-mux-gpio",
+ PLATFORM_DEVID_AUTO, &gpio_data,
+ sizeof(struct i2c_mux_gpio_platform_data));
+ if (IS_ERR(priv->mux_pdev)) {
+ err = PTR_ERR(priv->mux_pdev);
+ priv->mux_pdev = NULL;
+ dev_err(dev, "Failed to register i2c-mux-gpio device\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static void __devexit i801_del_mux(struct i801_priv *priv)
+{
+ if (priv->mux_pdev)
+ platform_device_unregister(priv->mux_pdev);
+}
+
+static unsigned int __devinit i801_get_adapter_class(struct i801_priv *priv)
+{
+ const struct dmi_system_id *id;
+ const struct i801_mux_config *mux_config;
+ unsigned int class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
+ int i;
+
+ id = dmi_first_match(mux_dmi_table);
+ if (id) {
+ /* Remove from branch classes from trunk */
+ mux_config = id->driver_data;
+ for (i = 0; i < mux_config->n_values; i++)
+ class &= ~mux_config->classes[i];
+
+ /* Remember for later */
+ priv->mux_drvdata = mux_config;
+ }
+
+ return class;
+}
+#else
+static inline int i801_add_mux(struct i801_priv *priv) { return 0; }
+static inline void i801_del_mux(struct i801_priv *priv) { }
+
+static inline unsigned int i801_get_adapter_class(struct i801_priv *priv)
+{
+ return I2C_CLASS_HWMON | I2C_CLASS_SPD;
+}
+#endif
+
static int __devinit i801_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
@@ -913,7 +1093,7 @@ static int __devinit i801_probe(struct pci_dev *dev,
i2c_set_adapdata(&priv->adapter, priv);
priv->adapter.owner = THIS_MODULE;
- priv->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
+ priv->adapter.class = i801_get_adapter_class(priv);
priv->adapter.algo = &smbus_algorithm;
priv->pci_dev = dev;
@@ -1033,6 +1213,8 @@ static int __devinit i801_probe(struct pci_dev *dev,
}
i801_probe_optional_slaves(priv);
+ /* We ignore errors - multiplexing is optional */
+ i801_add_mux(priv);
pci_set_drvdata(dev, priv);
@@ -1052,6 +1234,7 @@ static void __devexit i801_remove(struct pci_dev *dev)
{
struct i801_priv *priv = pci_get_drvdata(dev);
+ i801_del_mux(priv);
i2c_del_adapter(&priv->adapter);
pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg);
diff --git a/drivers/i2c/busses/i2c-parport.c b/drivers/i2c/busses/i2c-parport.c
index 2456568..81d8878 100644
--- a/drivers/i2c/busses/i2c-parport.c
+++ b/drivers/i2c/busses/i2c-parport.c
@@ -151,7 +151,7 @@ static const struct i2c_algo_bit_data parport_algo_data = {
/* ----- I2c and parallel port call-back functions and structures --------- */
-void i2c_parport_irq(void *data)
+static void i2c_parport_irq(void *data)
{
struct i2c_par *adapter = data;
struct i2c_client *ara = adapter->ara;
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index ef511df..8bbd6ec 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -37,6 +37,7 @@
#include <linux/stddef.h>
#include <linux/ioport.h>
#include <linux/i2c.h>
+#include <linux/slab.h>
#include <linux/init.h>
#include <linux/dmi.h>
#include <linux/acpi.h>
diff --git a/drivers/i2c/busses/i2c-scmi.c b/drivers/i2c/busses/i2c-scmi.c
index 388cbdc..6aafa3d 100644
--- a/drivers/i2c/busses/i2c-scmi.c
+++ b/drivers/i2c/busses/i2c-scmi.c
@@ -426,19 +426,7 @@ static struct acpi_driver acpi_smbus_cmi_driver = {
.remove = acpi_smbus_cmi_remove,
},
};
-
-static int __init acpi_smbus_cmi_init(void)
-{
- return acpi_bus_register_driver(&acpi_smbus_cmi_driver);
-}
-
-static void __exit acpi_smbus_cmi_exit(void)
-{
- acpi_bus_unregister_driver(&acpi_smbus_cmi_driver);
-}
-
-module_init(acpi_smbus_cmi_init);
-module_exit(acpi_smbus_cmi_exit);
+module_acpi_driver(acpi_smbus_cmi_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Crane Cai <crane.cai@amd.com>");
diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c
index 333011c..271c9a2 100644
--- a/drivers/i2c/busses/i2c-viapro.c
+++ b/drivers/i2c/busses/i2c-viapro.c
@@ -401,6 +401,7 @@ found:
case PCI_DEVICE_ID_VIA_CX700:
case PCI_DEVICE_ID_VIA_VX800:
case PCI_DEVICE_ID_VIA_VX855:
+ case PCI_DEVICE_ID_VIA_VX900:
case PCI_DEVICE_ID_VIA_8251:
case PCI_DEVICE_ID_VIA_8237:
case PCI_DEVICE_ID_VIA_8237A:
@@ -470,6 +471,8 @@ static DEFINE_PCI_DEVICE_TABLE(vt596_ids) = {
.driver_data = SMBBA3 },
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VX855),
.driver_data = SMBBA3 },
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VX900),
+ .driver_data = SMBBA3 },
{ 0, }
};
diff --git a/drivers/i2c/busses/scx200_acb.c b/drivers/i2c/busses/scx200_acb.c
index 2eacb77..08aab57 100644
--- a/drivers/i2c/busses/scx200_acb.c
+++ b/drivers/i2c/busses/scx200_acb.c
@@ -23,6 +23,8 @@
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/kernel.h>
@@ -37,8 +39,6 @@
#include <linux/scx200.h>
-#define NAME "scx200_acb"
-
MODULE_AUTHOR("Christer Weinigel <wingel@nano-system.com>");
MODULE_DESCRIPTION("NatSemi SCx200 ACCESS.bus Driver");
MODULE_ALIAS("platform:cs5535-smb");
@@ -398,7 +398,7 @@ static __devinit int scx200_acb_probe(struct scx200_acb_iface *iface)
outb(0x70, ACBCTL2);
if (inb(ACBCTL2) != 0x70) {
- pr_debug(NAME ": ACBCTL2 readback failed\n");
+ pr_debug("ACBCTL2 readback failed\n");
return -ENXIO;
}
@@ -406,8 +406,7 @@ static __devinit int scx200_acb_probe(struct scx200_acb_iface *iface)
val = inb(ACBCTL1);
if (val) {
- pr_debug(NAME ": disabled, but ACBCTL1=0x%02x\n",
- val);
+ pr_debug("disabled, but ACBCTL1=0x%02x\n", val);
return -ENXIO;
}
@@ -417,8 +416,8 @@ static __devinit int scx200_acb_probe(struct scx200_acb_iface *iface)
val = inb(ACBCTL1);
if ((val & ACBCTL1_NMINTE) != ACBCTL1_NMINTE) {
- pr_debug(NAME ": enabled, but NMINTE won't be set, "
- "ACBCTL1=0x%02x\n", val);
+ pr_debug("enabled, but NMINTE won't be set, ACBCTL1=0x%02x\n",
+ val);
return -ENXIO;
}
@@ -433,7 +432,7 @@ static __devinit struct scx200_acb_iface *scx200_create_iface(const char *text,
iface = kzalloc(sizeof(*iface), GFP_KERNEL);
if (!iface) {
- printk(KERN_ERR NAME ": can't allocate memory\n");
+ pr_err("can't allocate memory\n");
return NULL;
}
@@ -459,14 +458,14 @@ static int __devinit scx200_acb_create(struct scx200_acb_iface *iface)
rc = scx200_acb_probe(iface);
if (rc) {
- printk(KERN_WARNING NAME ": probe failed\n");
+ pr_warn("probe failed\n");
return rc;
}
scx200_acb_reset(iface);
if (i2c_add_adapter(adapter) < 0) {
- printk(KERN_ERR NAME ": failed to register\n");
+ pr_err("failed to register\n");
return -ENODEV;
}
@@ -493,8 +492,7 @@ static struct scx200_acb_iface * __devinit scx200_create_dev(const char *text,
return NULL;
if (!request_region(base, 8, iface->adapter.name)) {
- printk(KERN_ERR NAME ": can't allocate io 0x%lx-0x%lx\n",
- base, base + 8 - 1);
+ pr_err("can't allocate io 0x%lx-0x%lx\n", base, base + 8 - 1);
goto errout_free;
}
@@ -583,7 +581,7 @@ static __init void scx200_scan_isa(void)
static int __init scx200_acb_init(void)
{
- pr_debug(NAME ": NatSemi SCx200 ACCESS.bus Driver\n");
+ pr_debug("NatSemi SCx200 ACCESS.bus Driver\n");
/* First scan for ISA-based devices */
scx200_scan_isa(); /* XXX: should we care about errors? */
diff --git a/drivers/i2c/busses/scx200_i2c.c b/drivers/i2c/busses/scx200_i2c.c
index 7ee0d50..ae1258b 100644
--- a/drivers/i2c/busses/scx200_i2c.c
+++ b/drivers/i2c/busses/scx200_i2c.c
@@ -21,6 +21,8 @@
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/kernel.h>
@@ -31,8 +33,6 @@
#include <linux/scx200_gpio.h>
-#define NAME "scx200_i2c"
-
MODULE_AUTHOR("Christer Weinigel <wingel@nano-system.com>");
MODULE_DESCRIPTION("NatSemi SCx200 I2C Driver");
MODULE_LICENSE("GPL");
@@ -88,17 +88,17 @@ static struct i2c_adapter scx200_i2c_ops = {
static int scx200_i2c_init(void)
{
- pr_debug(NAME ": NatSemi SCx200 I2C Driver\n");
+ pr_debug("NatSemi SCx200 I2C Driver\n");
if (!scx200_gpio_present()) {
- printk(KERN_ERR NAME ": no SCx200 gpio pins available\n");
+ pr_err("no SCx200 gpio pins available\n");
return -ENODEV;
}
- pr_debug(NAME ": SCL=GPIO%02u, SDA=GPIO%02u\n", scl, sda);
+ pr_debug("SCL=GPIO%02u, SDA=GPIO%02u\n", scl, sda);
if (scl == -1 || sda == -1 || scl == sda) {
- printk(KERN_ERR NAME ": scl and sda must be specified\n");
+ pr_err("scl and sda must be specified\n");
return -EINVAL;
}
@@ -107,8 +107,7 @@ static int scx200_i2c_init(void)
scx200_gpio_configure(sda, ~2, 5);
if (i2c_bit_add_bus(&scx200_i2c_ops) < 0) {
- printk(KERN_ERR NAME ": adapter %s registration failed\n",
- scx200_i2c_ops.name);
+ pr_err("adapter %s registration failed\n", scx200_i2c_ops.name);
return -ENODEV;
}
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 2421d95..a7edf98 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -1989,12 +1989,22 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
unsigned char msgbuf0[I2C_SMBUS_BLOCK_MAX+3];
unsigned char msgbuf1[I2C_SMBUS_BLOCK_MAX+2];
int num = read_write == I2C_SMBUS_READ ? 2 : 1;
- struct i2c_msg msg[2] = { { addr, flags, 1, msgbuf0 },
- { addr, flags | I2C_M_RD, 0, msgbuf1 }
- };
int i;
u8 partial_pec = 0;
int status;
+ struct i2c_msg msg[2] = {
+ {
+ .addr = addr,
+ .flags = flags,
+ .len = 1,
+ .buf = msgbuf0,
+ }, {
+ .addr = addr,
+ .flags = flags | I2C_M_RD,
+ .len = 0,
+ .buf = msgbuf1,
+ },
+ };
msgbuf0[0] = command;
switch (size) {
diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
index 1038c38..d94e0ce 100644
--- a/drivers/i2c/i2c-mux.c
+++ b/drivers/i2c/i2c-mux.c
@@ -88,9 +88,23 @@ static u32 i2c_mux_functionality(struct i2c_adapter *adap)
return parent->algo->functionality(parent);
}
+/* Return all parent classes, merged */
+static unsigned int i2c_mux_parent_classes(struct i2c_adapter *parent)
+{
+ unsigned int class = 0;
+
+ do {
+ class |= parent->class;
+ parent = i2c_parent_is_i2c_adapter(parent);
+ } while (parent);
+
+ return class;
+}
+
struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent,
struct device *mux_dev,
void *mux_priv, u32 force_nr, u32 chan_id,
+ unsigned int class,
int (*select) (struct i2c_adapter *,
void *, u32),
int (*deselect) (struct i2c_adapter *,
@@ -127,6 +141,14 @@ struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent,
priv->adap.algo_data = priv;
priv->adap.dev.parent = &parent->dev;
+ /* Sanity check on class */
+ if (i2c_mux_parent_classes(parent) & class)
+ dev_err(&parent->dev,
+ "Segment %d behind mux can't share classes with ancestors\n",
+ chan_id);
+ else
+ priv->adap.class = class;
+
/*
* Try to populate the mux adapter's of_node, expands to
* nothing if !CONFIG_OF.
diff --git a/drivers/i2c/i2c-smbus.c b/drivers/i2c/i2c-smbus.c
index df3e0bf..92cdd23 100644
--- a/drivers/i2c/i2c-smbus.c
+++ b/drivers/i2c/i2c-smbus.c
@@ -142,7 +142,8 @@ static int smbalert_probe(struct i2c_client *ara,
struct i2c_adapter *adapter = ara->adapter;
int res;
- alert = kzalloc(sizeof(struct i2c_smbus_alert), GFP_KERNEL);
+ alert = devm_kzalloc(&ara->dev, sizeof(struct i2c_smbus_alert),
+ GFP_KERNEL);
if (!alert)
return -ENOMEM;
@@ -154,10 +155,8 @@ static int smbalert_probe(struct i2c_client *ara,
if (setup->irq > 0) {
res = devm_request_irq(&ara->dev, setup->irq, smbalert_irq,
0, "smbus_alert", alert);
- if (res) {
- kfree(alert);
+ if (res)
return res;
- }
}
i2c_set_clientdata(ara, alert);
@@ -167,14 +166,12 @@ static int smbalert_probe(struct i2c_client *ara,
return 0;
}
-/* IRQ resource is managed so it is freed automatically */
+/* IRQ and memory resources are managed so they are freed automatically */
static int smbalert_remove(struct i2c_client *ara)
{
struct i2c_smbus_alert *alert = i2c_get_clientdata(ara);
cancel_work_sync(&alert->alert);
-
- kfree(alert);
return 0;
}
diff --git a/drivers/i2c/muxes/i2c-mux-gpio.c b/drivers/i2c/muxes/i2c-mux-gpio.c
index 68b1f8e..566a675 100644
--- a/drivers/i2c/muxes/i2c-mux-gpio.c
+++ b/drivers/i2c/muxes/i2c-mux-gpio.c
@@ -21,6 +21,7 @@ struct gpiomux {
struct i2c_adapter *parent;
struct i2c_adapter **adap; /* child busses */
struct i2c_mux_gpio_platform_data data;
+ unsigned gpio_base;
};
static void i2c_mux_gpio_set(const struct gpiomux *mux, unsigned val)
@@ -28,7 +29,8 @@ static void i2c_mux_gpio_set(const struct gpiomux *mux, unsigned val)
int i;
for (i = 0; i < mux->data.n_gpios; i++)
- gpio_set_value(mux->data.gpios[i], val & (1 << i));
+ gpio_set_value(mux->gpio_base + mux->data.gpios[i],
+ val & (1 << i));
}
static int i2c_mux_gpio_select(struct i2c_adapter *adap, void *data, u32 chan)
@@ -49,13 +51,19 @@ static int i2c_mux_gpio_deselect(struct i2c_adapter *adap, void *data, u32 chan)
return 0;
}
+static int __devinit match_gpio_chip_by_label(struct gpio_chip *chip,
+ void *data)
+{
+ return !strcmp(chip->label, data);
+}
+
static int __devinit i2c_mux_gpio_probe(struct platform_device *pdev)
{
struct gpiomux *mux;
struct i2c_mux_gpio_platform_data *pdata;
struct i2c_adapter *parent;
int (*deselect) (struct i2c_adapter *, void *, u32);
- unsigned initial_state;
+ unsigned initial_state, gpio_base;
int i, ret;
pdata = pdev->dev.platform_data;
@@ -64,6 +72,23 @@ static int __devinit i2c_mux_gpio_probe(struct platform_device *pdev)
return -ENODEV;
}
+ /*
+ * If a GPIO chip name is provided, the GPIO pin numbers provided are
+ * relative to its base GPIO number. Otherwise they are absolute.
+ */
+ if (pdata->gpio_chip) {
+ struct gpio_chip *gpio;
+
+ gpio = gpiochip_find(pdata->gpio_chip,
+ match_gpio_chip_by_label);
+ if (!gpio)
+ return -EPROBE_DEFER;
+
+ gpio_base = gpio->base;
+ } else {
+ gpio_base = 0;
+ }
+
parent = i2c_get_adapter(pdata->parent);
if (!parent) {
dev_err(&pdev->dev, "Parent adapter (%d) not found\n",
@@ -71,7 +96,7 @@ static int __devinit i2c_mux_gpio_probe(struct platform_device *pdev)
return -ENODEV;
}
- mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+ mux = devm_kzalloc(&pdev->dev, sizeof(*mux), GFP_KERNEL);
if (!mux) {
ret = -ENOMEM;
goto alloc_failed;
@@ -79,11 +104,13 @@ static int __devinit i2c_mux_gpio_probe(struct platform_device *pdev)
mux->parent = parent;
mux->data = *pdata;
- mux->adap = kzalloc(sizeof(struct i2c_adapter *) * pdata->n_values,
- GFP_KERNEL);
+ mux->gpio_base = gpio_base;
+ mux->adap = devm_kzalloc(&pdev->dev,
+ sizeof(*mux->adap) * pdata->n_values,
+ GFP_KERNEL);
if (!mux->adap) {
ret = -ENOMEM;
- goto alloc_failed2;
+ goto alloc_failed;
}
if (pdata->idle != I2C_MUX_GPIO_NO_IDLE) {
@@ -95,17 +122,19 @@ static int __devinit i2c_mux_gpio_probe(struct platform_device *pdev)
}
for (i = 0; i < pdata->n_gpios; i++) {
- ret = gpio_request(pdata->gpios[i], "i2c-mux-gpio");
+ ret = gpio_request(gpio_base + pdata->gpios[i], "i2c-mux-gpio");
if (ret)
goto err_request_gpio;
- gpio_direction_output(pdata->gpios[i],
+ gpio_direction_output(gpio_base + pdata->gpios[i],
initial_state & (1 << i));
}
for (i = 0; i < pdata->n_values; i++) {
u32 nr = pdata->base_nr ? (pdata->base_nr + i) : 0;
+ unsigned int class = pdata->classes ? pdata->classes[i] : 0;
- mux->adap[i] = i2c_add_mux_adapter(parent, &pdev->dev, mux, nr, i,
+ mux->adap[i] = i2c_add_mux_adapter(parent, &pdev->dev, mux, nr,
+ i, class,
i2c_mux_gpio_select, deselect);
if (!mux->adap[i]) {
ret = -ENODEV;
@@ -127,10 +156,7 @@ add_adapter_failed:
i = pdata->n_gpios;
err_request_gpio:
for (; i > 0; i--)
- gpio_free(pdata->gpios[i - 1]);
- kfree(mux->adap);
-alloc_failed2:
- kfree(mux);
+ gpio_free(gpio_base + pdata->gpios[i - 1]);
alloc_failed:
i2c_put_adapter(parent);
@@ -146,12 +172,10 @@ static int __devexit i2c_mux_gpio_remove(struct platform_device *pdev)
i2c_del_mux_adapter(mux->adap[i]);
for (i = 0; i < mux->data.n_gpios; i++)
- gpio_free(mux->data.gpios[i]);
+ gpio_free(mux->gpio_base + mux->data.gpios[i]);
platform_set_drvdata(pdev, NULL);
i2c_put_adapter(mux->parent);
- kfree(mux->adap);
- kfree(mux);
return 0;
}
diff --git a/drivers/i2c/muxes/i2c-mux-pca9541.c b/drivers/i2c/muxes/i2c-mux-pca9541.c
index f8f72f3..f3b8f9a 100644
--- a/drivers/i2c/muxes/i2c-mux-pca9541.c
+++ b/drivers/i2c/muxes/i2c-mux-pca9541.c
@@ -354,7 +354,7 @@ static int pca9541_probe(struct i2c_client *client,
if (pdata)
force = pdata->modes[0].adap_id;
data->mux_adap = i2c_add_mux_adapter(adap, &client->dev, client,
- force, 0,
+ force, 0, 0,
pca9541_select_chan,
pca9541_release_chan);
diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
index f2dfe0d..8e43872 100644
--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -186,7 +186,7 @@ static int pca954x_probe(struct i2c_client *client,
{
struct i2c_adapter *adap = to_i2c_adapter(client->dev.parent);
struct pca954x_platform_data *pdata = client->dev.platform_data;
- int num, force;
+ int num, force, class;
struct pca954x *data;
int ret = -ENODEV;
@@ -216,18 +216,20 @@ static int pca954x_probe(struct i2c_client *client,
/* Now create an adapter for each channel */
for (num = 0; num < chips[data->type].nchans; num++) {
force = 0; /* dynamic adap number */
+ class = 0; /* no class by default */
if (pdata) {
- if (num < pdata->num_modes)
+ if (num < pdata->num_modes) {
/* force static number */
force = pdata->modes[num].adap_id;
- else
+ class = pdata->modes[num].class;
+ } else
/* discard unconfigured channels */
break;
}
data->virt_adaps[num] =
i2c_add_mux_adapter(adap, &client->dev, client,
- force, num, pca954x_select_chan,
+ force, num, class, pca954x_select_chan,
(pdata && pdata->modes[num].deselect_on_exit)
? pca954x_deselect_mux : NULL);
diff --git a/drivers/i2c/muxes/i2c-mux-pinctrl.c b/drivers/i2c/muxes/i2c-mux-pinctrl.c
index 46a6697..5f097f3 100644
--- a/drivers/i2c/muxes/i2c-mux-pinctrl.c
+++ b/drivers/i2c/muxes/i2c-mux-pinctrl.c
@@ -221,7 +221,7 @@ static int __devinit i2c_mux_pinctrl_probe(struct platform_device *pdev)
(mux->pdata->base_bus_num + i) : 0;
mux->busses[i] = i2c_add_mux_adapter(mux->parent, &pdev->dev,
- mux, bus, i,
+ mux, bus, i, 0,
i2c_mux_pinctrl_select,
deselect);
if (!mux->busses[i]) {
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index e872617..b0f6b4c 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -413,6 +413,7 @@ static const struct x86_cpu_id intel_idle_ids[] = {
ICPU(0x2a, idle_cpu_snb),
ICPU(0x2d, idle_cpu_snb),
ICPU(0x3a, idle_cpu_ivb),
+ ICPU(0x3e, idle_cpu_ivb),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 1983adc..a7568c3 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -3498,7 +3498,8 @@ out:
}
static const struct ibnl_client_cbs cma_cb_table[] = {
- [RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats },
+ [RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats,
+ .module = THIS_MODULE },
};
static int __init cma_init(void)
diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c
index fe10a94..da06abd 100644
--- a/drivers/infiniband/core/netlink.c
+++ b/drivers/infiniband/core/netlink.c
@@ -154,6 +154,7 @@ static int ibnl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct netlink_dump_control c = {
.dump = client->cb_table[op].dump,
+ .module = client->cb_table[op].module,
};
return netlink_dump_start(nls, skb, nlh, &c);
}
diff --git a/drivers/infiniband/hw/ehca/ehca_uverbs.c b/drivers/infiniband/hw/ehca/ehca_uverbs.c
index 45ee89b..1a1d5d9 100644
--- a/drivers/infiniband/hw/ehca/ehca_uverbs.c
+++ b/drivers/infiniband/hw/ehca/ehca_uverbs.c
@@ -117,7 +117,7 @@ static int ehca_mmap_fw(struct vm_area_struct *vma, struct h_galpas *galpas,
physical = galpas->user.fw_handle;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
ehca_gen_dbg("vsize=%llx physical=%llx", vsize, physical);
- /* VM_IO | VM_RESERVED are set by remap_pfn_range() */
+ /* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */
ret = remap_4k_pfn(vma, vma->vm_start, physical >> EHCA_PAGESHIFT,
vma->vm_page_prot);
if (unlikely(ret)) {
@@ -139,7 +139,7 @@ static int ehca_mmap_queue(struct vm_area_struct *vma, struct ipz_queue *queue,
u64 start, ofs;
struct page *page;
- vma->vm_flags |= VM_RESERVED;
+ vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
start = vma->vm_start;
for (ofs = 0; ofs < queue->queue_length; ofs += PAGE_SIZE) {
u64 virt_addr = (u64)ipz_qeit_calc(queue, ofs);
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c
index 736d9ed..3eb7e45 100644
--- a/drivers/infiniband/hw/ipath/ipath_file_ops.c
+++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c
@@ -1225,7 +1225,7 @@ static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT;
vma->vm_ops = &ipath_file_vm_ops;
- vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND;
+ vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
ret = 1;
bail:
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index faa44cb..959a5c4 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -971,7 +971,7 @@ static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT;
vma->vm_ops = &qib_file_vm_ops;
- vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND;
+ vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
ret = 1;
bail:
diff --git a/drivers/input/misc/atlas_btns.c b/drivers/input/misc/atlas_btns.c
index 601f737..26f1313 100644
--- a/drivers/input/misc/atlas_btns.c
+++ b/drivers/input/misc/atlas_btns.c
@@ -151,22 +151,7 @@ static struct acpi_driver atlas_acpi_driver = {
.remove = atlas_acpi_button_remove,
},
};
-
-static int __init atlas_acpi_init(void)
-{
- if (acpi_disabled)
- return -ENODEV;
-
- return acpi_bus_register_driver(&atlas_acpi_driver);
-}
-
-static void __exit atlas_acpi_exit(void)
-{
- acpi_bus_unregister_driver(&atlas_acpi_driver);
-}
-
-module_init(atlas_acpi_init);
-module_exit(atlas_acpi_exit);
+module_acpi_driver(atlas_acpi_driver);
MODULE_AUTHOR("Jaya Kumar");
MODULE_LICENSE("GPL");
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 9f69b56..e39f9db 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -42,7 +42,7 @@ config AMD_IOMMU
select PCI_PRI
select PCI_PASID
select IOMMU_API
- depends on X86_64 && PCI && ACPI
+ depends on X86_64 && PCI && ACPI && X86_IO_APIC
---help---
With this option you can enable support for AMD IOMMU hardware in
your system. An IOMMU is a hardware component which provides
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index e89daf1..55074cb 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -31,6 +31,12 @@
#include <linux/amd-iommu.h>
#include <linux/notifier.h>
#include <linux/export.h>
+#include <linux/irq.h>
+#include <linux/msi.h>
+#include <asm/irq_remapping.h>
+#include <asm/io_apic.h>
+#include <asm/apic.h>
+#include <asm/hw_irq.h>
#include <asm/msidef.h>
#include <asm/proto.h>
#include <asm/iommu.h>
@@ -39,6 +45,7 @@
#include "amd_iommu_proto.h"
#include "amd_iommu_types.h"
+#include "irq_remapping.h"
#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
@@ -72,6 +79,9 @@ static DEFINE_SPINLOCK(iommu_pd_list_lock);
static LIST_HEAD(dev_data_list);
static DEFINE_SPINLOCK(dev_data_list_lock);
+LIST_HEAD(ioapic_map);
+LIST_HEAD(hpet_map);
+
/*
* Domain for untranslated devices - only allocated
* if iommu=pt passed on kernel cmd line.
@@ -92,6 +102,8 @@ struct iommu_cmd {
u32 data[4];
};
+struct kmem_cache *amd_iommu_irq_cache;
+
static void update_domain(struct protection_domain *domain);
static int __init alloc_passthrough_domain(void);
@@ -686,7 +698,7 @@ static void iommu_poll_ppr_log(struct amd_iommu *iommu)
/*
* Release iommu->lock because ppr-handling might need to
- * re-aquire it
+ * re-acquire it
*/
spin_unlock_irqrestore(&iommu->lock, flags);
@@ -804,7 +816,7 @@ static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
if (s) /* size bit - we flush more than one 4kb page */
cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
- if (pde) /* PDE bit - we wan't flush everything not only the PTEs */
+ if (pde) /* PDE bit - we want to flush everything, not only the PTEs */
cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
}
@@ -899,6 +911,13 @@ static void build_inv_all(struct iommu_cmd *cmd)
CMD_SET_TYPE(cmd, CMD_INV_ALL);
}
+static void build_inv_irt(struct iommu_cmd *cmd, u16 devid)
+{
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->data[0] = devid;
+ CMD_SET_TYPE(cmd, CMD_INV_IRT);
+}
+
/*
* Writes the command to the IOMMUs command buffer and informs the
* hardware about the new command.
@@ -1020,12 +1039,32 @@ static void iommu_flush_all(struct amd_iommu *iommu)
iommu_completion_wait(iommu);
}
+static void iommu_flush_irt(struct amd_iommu *iommu, u16 devid)
+{
+ struct iommu_cmd cmd;
+
+ build_inv_irt(&cmd, devid);
+
+ iommu_queue_command(iommu, &cmd);
+}
+
+static void iommu_flush_irt_all(struct amd_iommu *iommu)
+{
+ u32 devid;
+
+ for (devid = 0; devid <= MAX_DEV_TABLE_ENTRIES; devid++)
+ iommu_flush_irt(iommu, devid);
+
+ iommu_completion_wait(iommu);
+}
+
void iommu_flush_all_caches(struct amd_iommu *iommu)
{
if (iommu_feature(iommu, FEATURE_IA)) {
iommu_flush_all(iommu);
} else {
iommu_flush_dte_all(iommu);
+ iommu_flush_irt_all(iommu);
iommu_flush_tlb_all(iommu);
}
}
@@ -2155,7 +2194,7 @@ static bool pci_pri_tlp_required(struct pci_dev *pdev)
}
/*
- * If a device is not yet associated with a domain, this function does
+ * If a device is not yet associated with a domain, this function
* assigns it visible for the hardware
*/
static int attach_device(struct device *dev,
@@ -2405,7 +2444,7 @@ static struct protection_domain *get_domain(struct device *dev)
if (domain != NULL)
return domain;
- /* Device not bount yet - bind it */
+ /* Device not bound yet - bind it */
dma_dom = find_protection_domain(devid);
if (!dma_dom)
dma_dom = amd_iommu_rlookup_table[devid]->default_dom;
@@ -2944,7 +2983,7 @@ static void __init prealloc_protection_domains(void)
alloc_passthrough_domain();
dev_data->passthrough = true;
attach_device(&dev->dev, pt_domain);
- pr_info("AMD-Vi: Using passthough domain for device %s\n",
+ pr_info("AMD-Vi: Using passthrough domain for device %s\n",
dev_name(&dev->dev));
}
@@ -3316,6 +3355,8 @@ static int amd_iommu_domain_has_cap(struct iommu_domain *domain,
switch (cap) {
case IOMMU_CAP_CACHE_COHERENCY:
return 1;
+ case IOMMU_CAP_INTR_REMAP:
+ return irq_remapping_enabled;
}
return 0;
@@ -3743,3 +3784,466 @@ int amd_iommu_device_info(struct pci_dev *pdev,
return 0;
}
EXPORT_SYMBOL(amd_iommu_device_info);
+
+#ifdef CONFIG_IRQ_REMAP
+
+/*****************************************************************************
+ *
+ * Interrupt Remapping Implementation
+ *
+ *****************************************************************************/
+
+union irte {
+ u32 val;
+ struct {
+ u32 valid : 1,
+ no_fault : 1,
+ int_type : 3,
+ rq_eoi : 1,
+ dm : 1,
+ rsvd_1 : 1,
+ destination : 8,
+ vector : 8,
+ rsvd_2 : 8;
+ } fields;
+};
+
+#define DTE_IRQ_PHYS_ADDR_MASK (((1ULL << 45)-1) << 6)
+#define DTE_IRQ_REMAP_INTCTL (2ULL << 60)
+#define DTE_IRQ_TABLE_LEN (8ULL << 1)
+#define DTE_IRQ_REMAP_ENABLE 1ULL
+
+static void set_dte_irq_entry(u16 devid, struct irq_remap_table *table)
+{
+ u64 dte;
+
+ dte = amd_iommu_dev_table[devid].data[2];
+ dte &= ~DTE_IRQ_PHYS_ADDR_MASK;
+ dte |= virt_to_phys(table->table);
+ dte |= DTE_IRQ_REMAP_INTCTL;
+ dte |= DTE_IRQ_TABLE_LEN;
+ dte |= DTE_IRQ_REMAP_ENABLE;
+
+ amd_iommu_dev_table[devid].data[2] = dte;
+}
+
+#define IRTE_ALLOCATED (~1U)
+
+static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic)
+{
+ struct irq_remap_table *table = NULL;
+ struct amd_iommu *iommu;
+ unsigned long flags;
+ u16 alias;
+
+ write_lock_irqsave(&amd_iommu_devtable_lock, flags);
+
+ iommu = amd_iommu_rlookup_table[devid];
+ if (!iommu)
+ goto out_unlock;
+
+ table = irq_lookup_table[devid];
+ if (table)
+ goto out;
+
+ alias = amd_iommu_alias_table[devid];
+ table = irq_lookup_table[alias];
+ if (table) {
+ irq_lookup_table[devid] = table;
+ set_dte_irq_entry(devid, table);
+ iommu_flush_dte(iommu, devid);
+ goto out;
+ }
+
+ /* Nothing there yet, allocate new irq remapping table */
+ table = kzalloc(sizeof(*table), GFP_ATOMIC);
+ if (!table)
+ goto out;
+
+ if (ioapic)
+ /* Keep the first 32 indexes free for IOAPIC interrupts */
+ table->min_index = 32;
+
+ table->table = kmem_cache_alloc(amd_iommu_irq_cache, GFP_ATOMIC);
+ if (!table->table) {
+ kfree(table);
+ table = NULL;
+ goto out;
+ }
+
+ memset(table->table, 0, MAX_IRQS_PER_TABLE * sizeof(u32));
+
+ if (ioapic) {
+ int i;
+
+ for (i = 0; i < 32; ++i)
+ table->table[i] = IRTE_ALLOCATED;
+ }
+
+ irq_lookup_table[devid] = table;
+ set_dte_irq_entry(devid, table);
+ iommu_flush_dte(iommu, devid);
+ if (devid != alias) {
+ irq_lookup_table[alias] = table;
+ set_dte_irq_entry(devid, table);
+ iommu_flush_dte(iommu, alias);
+ }
+
+out:
+ iommu_completion_wait(iommu);
+
+out_unlock:
+ write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
+
+ return table;
+}
+
+static int alloc_irq_index(struct irq_cfg *cfg, u16 devid, int count)
+{
+ struct irq_remap_table *table;
+ unsigned long flags;
+ int index, c;
+
+ table = get_irq_table(devid, false);
+ if (!table)
+ return -ENODEV;
+
+ spin_lock_irqsave(&table->lock, flags);
+
+ /* Scan table for free entries */
+ for (c = 0, index = table->min_index;
+ index < MAX_IRQS_PER_TABLE;
+ ++index) {
+ if (table->table[index] == 0)
+ c += 1;
+ else
+ c = 0;
+
+ if (c == count) {
+ struct irq_2_iommu *irte_info;
+
+ for (; c != 0; --c)
+ table->table[index - c + 1] = IRTE_ALLOCATED;
+
+ index -= count - 1;
+
+ irte_info = &cfg->irq_2_iommu;
+ irte_info->sub_handle = devid;
+ irte_info->irte_index = index;
+ irte_info->iommu = (void *)cfg;
+
+ goto out;
+ }
+ }
+
+ index = -ENOSPC;
+
+out:
+ spin_unlock_irqrestore(&table->lock, flags);
+
+ return index;
+}
+
+static int get_irte(u16 devid, int index, union irte *irte)
+{
+ struct irq_remap_table *table;
+ unsigned long flags;
+
+ table = get_irq_table(devid, false);
+ if (!table)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&table->lock, flags);
+ irte->val = table->table[index];
+ spin_unlock_irqrestore(&table->lock, flags);
+
+ return 0;
+}
+
+static int modify_irte(u16 devid, int index, union irte irte)
+{
+ struct irq_remap_table *table;
+ struct amd_iommu *iommu;
+ unsigned long flags;
+
+ iommu = amd_iommu_rlookup_table[devid];
+ if (iommu == NULL)
+ return -EINVAL;
+
+ table = get_irq_table(devid, false);
+ if (!table)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&table->lock, flags);
+ table->table[index] = irte.val;
+ spin_unlock_irqrestore(&table->lock, flags);
+
+ iommu_flush_irt(iommu, devid);
+ iommu_completion_wait(iommu);
+
+ return 0;
+}
+
+static void free_irte(u16 devid, int index)
+{
+ struct irq_remap_table *table;
+ struct amd_iommu *iommu;
+ unsigned long flags;
+
+ iommu = amd_iommu_rlookup_table[devid];
+ if (iommu == NULL)
+ return;
+
+ table = get_irq_table(devid, false);
+ if (!table)
+ return;
+
+ spin_lock_irqsave(&table->lock, flags);
+ table->table[index] = 0;
+ spin_unlock_irqrestore(&table->lock, flags);
+
+ iommu_flush_irt(iommu, devid);
+ iommu_completion_wait(iommu);
+}
+
+static int setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry,
+ unsigned int destination, int vector,
+ struct io_apic_irq_attr *attr)
+{
+ struct irq_remap_table *table;
+ struct irq_2_iommu *irte_info;
+ struct irq_cfg *cfg;
+ union irte irte;
+ int ioapic_id;
+ int index;
+ int devid;
+ int ret;
+
+ cfg = irq_get_chip_data(irq);
+ if (!cfg)
+ return -EINVAL;
+
+ irte_info = &cfg->irq_2_iommu;
+ ioapic_id = mpc_ioapic_id(attr->ioapic);
+ devid = get_ioapic_devid(ioapic_id);
+
+ if (devid < 0)
+ return devid;
+
+ table = get_irq_table(devid, true);
+ if (table == NULL)
+ return -ENOMEM;
+
+ index = attr->ioapic_pin;
+
+ /* Setup IRQ remapping info */
+ irte_info->sub_handle = devid;
+ irte_info->irte_index = index;
+ irte_info->iommu = (void *)cfg;
+
+ /* Setup IRTE for IOMMU */
+ irte.val = 0;
+ irte.fields.vector = vector;
+ irte.fields.int_type = apic->irq_delivery_mode;
+ irte.fields.destination = destination;
+ irte.fields.dm = apic->irq_dest_mode;
+ irte.fields.valid = 1;
+
+ ret = modify_irte(devid, index, irte);
+ if (ret)
+ return ret;
+
+ /* Setup IOAPIC entry */
+ memset(entry, 0, sizeof(*entry));
+
+ entry->vector = index;
+ entry->mask = 0;
+ entry->trigger = attr->trigger;
+ entry->polarity = attr->polarity;
+
+ /*
+ * Mask level triggered irqs.
+ */
+ if (attr->trigger)
+ entry->mask = 1;
+
+ return 0;
+}
+
+static int set_affinity(struct irq_data *data, const struct cpumask *mask,
+ bool force)
+{
+ struct irq_2_iommu *irte_info;
+ unsigned int dest, irq;
+ struct irq_cfg *cfg;
+ union irte irte;
+ int err;
+
+ if (!config_enabled(CONFIG_SMP))
+ return -1;
+
+ cfg = data->chip_data;
+ irq = data->irq;
+ irte_info = &cfg->irq_2_iommu;
+
+ if (!cpumask_intersects(mask, cpu_online_mask))
+ return -EINVAL;
+
+ if (get_irte(irte_info->sub_handle, irte_info->irte_index, &irte))
+ return -EBUSY;
+
+ if (assign_irq_vector(irq, cfg, mask))
+ return -EBUSY;
+
+ err = apic->cpu_mask_to_apicid_and(cfg->domain, mask, &dest);
+ if (err) {
+ if (assign_irq_vector(irq, cfg, data->affinity))
+ pr_err("AMD-Vi: Failed to recover vector for irq %d\n", irq);
+ return err;
+ }
+
+ irte.fields.vector = cfg->vector;
+ irte.fields.destination = dest;
+
+ modify_irte(irte_info->sub_handle, irte_info->irte_index, irte);
+
+ if (cfg->move_in_progress)
+ send_cleanup_vector(cfg);
+
+ cpumask_copy(data->affinity, mask);
+
+ return 0;
+}
+
+static int free_irq(int irq)
+{
+ struct irq_2_iommu *irte_info;
+ struct irq_cfg *cfg;
+
+ cfg = irq_get_chip_data(irq);
+ if (!cfg)
+ return -EINVAL;
+
+ irte_info = &cfg->irq_2_iommu;
+
+ free_irte(irte_info->sub_handle, irte_info->irte_index);
+
+ return 0;
+}
+
+static void compose_msi_msg(struct pci_dev *pdev,
+ unsigned int irq, unsigned int dest,
+ struct msi_msg *msg, u8 hpet_id)
+{
+ struct irq_2_iommu *irte_info;
+ struct irq_cfg *cfg;
+ union irte irte;
+
+ cfg = irq_get_chip_data(irq);
+ if (!cfg)
+ return;
+
+ irte_info = &cfg->irq_2_iommu;
+
+ irte.val = 0;
+ irte.fields.vector = cfg->vector;
+ irte.fields.int_type = apic->irq_delivery_mode;
+ irte.fields.destination = dest;
+ irte.fields.dm = apic->irq_dest_mode;
+ irte.fields.valid = 1;
+
+ modify_irte(irte_info->sub_handle, irte_info->irte_index, irte);
+
+ msg->address_hi = MSI_ADDR_BASE_HI;
+ msg->address_lo = MSI_ADDR_BASE_LO;
+ msg->data = irte_info->irte_index;
+}
+
+static int msi_alloc_irq(struct pci_dev *pdev, int irq, int nvec)
+{
+ struct irq_cfg *cfg;
+ int index;
+ u16 devid;
+
+ if (!pdev)
+ return -EINVAL;
+
+ cfg = irq_get_chip_data(irq);
+ if (!cfg)
+ return -EINVAL;
+
+ devid = get_device_id(&pdev->dev);
+ index = alloc_irq_index(cfg, devid, nvec);
+
+ return index < 0 ? MAX_IRQS_PER_TABLE : index;
+}
+
+static int msi_setup_irq(struct pci_dev *pdev, unsigned int irq,
+ int index, int offset)
+{
+ struct irq_2_iommu *irte_info;
+ struct irq_cfg *cfg;
+ u16 devid;
+
+ if (!pdev)
+ return -EINVAL;
+
+ cfg = irq_get_chip_data(irq);
+ if (!cfg)
+ return -EINVAL;
+
+ if (index >= MAX_IRQS_PER_TABLE)
+ return 0;
+
+ devid = get_device_id(&pdev->dev);
+ irte_info = &cfg->irq_2_iommu;
+
+ irte_info->sub_handle = devid;
+ irte_info->irte_index = index + offset;
+ irte_info->iommu = (void *)cfg;
+
+ return 0;
+}
+
+static int setup_hpet_msi(unsigned int irq, unsigned int id)
+{
+ struct irq_2_iommu *irte_info;
+ struct irq_cfg *cfg;
+ int index, devid;
+
+ cfg = irq_get_chip_data(irq);
+ if (!cfg)
+ return -EINVAL;
+
+ irte_info = &cfg->irq_2_iommu;
+ devid = get_hpet_devid(id);
+ if (devid < 0)
+ return devid;
+
+ index = alloc_irq_index(cfg, devid, 1);
+ if (index < 0)
+ return index;
+
+ irte_info->sub_handle = devid;
+ irte_info->irte_index = index;
+ irte_info->iommu = (void *)cfg;
+
+ return 0;
+}
+
+struct irq_remap_ops amd_iommu_irq_ops = {
+ .supported = amd_iommu_supported,
+ .prepare = amd_iommu_prepare,
+ .enable = amd_iommu_enable,
+ .disable = amd_iommu_disable,
+ .reenable = amd_iommu_reenable,
+ .enable_faulting = amd_iommu_enable_faulting,
+ .setup_ioapic_entry = setup_ioapic_entry,
+ .set_affinity = set_affinity,
+ .free_irq = free_irq,
+ .compose_msi_msg = compose_msi_msg,
+ .msi_alloc_irq = msi_alloc_irq,
+ .msi_setup_irq = msi_setup_irq,
+ .setup_hpet_msi = setup_hpet_msi,
+};
+#endif
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 18a89b7..18b0d99 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -26,16 +26,18 @@
#include <linux/msi.h>
#include <linux/amd-iommu.h>
#include <linux/export.h>
-#include <linux/acpi.h>
#include <acpi/acpi.h>
#include <asm/pci-direct.h>
#include <asm/iommu.h>
#include <asm/gart.h>
#include <asm/x86_init.h>
#include <asm/iommu_table.h>
+#include <asm/io_apic.h>
+#include <asm/irq_remapping.h>
#include "amd_iommu_proto.h"
#include "amd_iommu_types.h"
+#include "irq_remapping.h"
/*
* definitions for the ACPI scanning code
@@ -55,6 +57,10 @@
#define IVHD_DEV_ALIAS_RANGE 0x43
#define IVHD_DEV_EXT_SELECT 0x46
#define IVHD_DEV_EXT_SELECT_RANGE 0x47
+#define IVHD_DEV_SPECIAL 0x48
+
+#define IVHD_SPECIAL_IOAPIC 1
+#define IVHD_SPECIAL_HPET 2
#define IVHD_FLAG_HT_TUN_EN_MASK 0x01
#define IVHD_FLAG_PASSPW_EN_MASK 0x02
@@ -123,6 +129,7 @@ struct ivmd_header {
} __attribute__((packed));
bool amd_iommu_dump;
+bool amd_iommu_irq_remap __read_mostly;
static bool amd_iommu_detected;
static bool __initdata amd_iommu_disabled;
@@ -178,7 +185,13 @@ u16 *amd_iommu_alias_table;
struct amd_iommu **amd_iommu_rlookup_table;
/*
- * AMD IOMMU allows up to 2^16 differend protection domains. This is a bitmap
+ * This table is used to find the irq remapping table for a given device id
+ * quickly.
+ */
+struct irq_remap_table **irq_lookup_table;
+
+/*
+ * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap
* to know which ones are already in use.
*/
unsigned long *amd_iommu_pd_alloc_bitmap;
@@ -478,7 +491,7 @@ static int __init find_last_devid_acpi(struct acpi_table_header *table)
/****************************************************************************
*
- * The following functions belong the the code path which parses the ACPI table
+ * The following functions belong to the code path which parses the ACPI table
* the second time. In this ACPI parsing iteration we allocate IOMMU specific
* data structures, initialize the device/alias/rlookup table and also
* basically initialize the hardware.
@@ -690,8 +703,33 @@ static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
set_iommu_for_device(iommu, devid);
}
+static int add_special_device(u8 type, u8 id, u16 devid)
+{
+ struct devid_map *entry;
+ struct list_head *list;
+
+ if (type != IVHD_SPECIAL_IOAPIC && type != IVHD_SPECIAL_HPET)
+ return -EINVAL;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ entry->id = id;
+ entry->devid = devid;
+
+ if (type == IVHD_SPECIAL_IOAPIC)
+ list = &ioapic_map;
+ else
+ list = &hpet_map;
+
+ list_add_tail(&entry->list, list);
+
+ return 0;
+}
+
/*
- * Reads the device exclusion range from ACPI and initialize IOMMU with
+ * Reads the device exclusion range from ACPI and initializes the IOMMU with
* it
*/
static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
@@ -717,7 +755,7 @@ static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
* Takes a pointer to an AMD IOMMU entry in the ACPI table and
* initializes the hardware and our data structures with it.
*/
-static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
+static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
struct ivhd_header *h)
{
u8 *p = (u8 *)h;
@@ -867,12 +905,43 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
flags, ext_flags);
}
break;
+ case IVHD_DEV_SPECIAL: {
+ u8 handle, type;
+ const char *var;
+ u16 devid;
+ int ret;
+
+ handle = e->ext & 0xff;
+ devid = (e->ext >> 8) & 0xffff;
+ type = (e->ext >> 24) & 0xff;
+
+ if (type == IVHD_SPECIAL_IOAPIC)
+ var = "IOAPIC";
+ else if (type == IVHD_SPECIAL_HPET)
+ var = "HPET";
+ else
+ var = "UNKNOWN";
+
+ DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %02x:%02x.%x\n",
+ var, (int)handle,
+ PCI_BUS(devid),
+ PCI_SLOT(devid),
+ PCI_FUNC(devid));
+
+ set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
+ ret = add_special_device(type, handle, devid);
+ if (ret)
+ return ret;
+ break;
+ }
default:
break;
}
p += ivhd_entry_length(p);
}
+
+ return 0;
}
/* Initializes the device->iommu mapping for the driver */
@@ -912,6 +981,8 @@ static void __init free_iommu_all(void)
*/
static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
{
+ int ret;
+
spin_lock_init(&iommu->lock);
/* Add IOMMU to internal data structures */
@@ -947,7 +1018,16 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
iommu->int_enabled = false;
- init_iommu_from_acpi(iommu, h);
+ ret = init_iommu_from_acpi(iommu, h);
+ if (ret)
+ return ret;
+
+ /*
+ * Make sure IOMMU is not considered to translate itself. The IVRS
+ * table tells us so, but this is a lie!
+ */
+ amd_iommu_rlookup_table[iommu->devid] = NULL;
+
init_iommu_devices(iommu);
return 0;
@@ -1115,9 +1195,11 @@ static void print_iommu_info(void)
if (iommu_feature(iommu, (1ULL << i)))
pr_cont(" %s", feat_str[i]);
}
- }
pr_cont("\n");
+ }
}
+ if (irq_remapping_enabled)
+ pr_info("AMD-Vi: Interrupt remapping enabled\n");
}
static int __init amd_iommu_init_pci(void)
@@ -1141,7 +1223,7 @@ static int __init amd_iommu_init_pci(void)
/****************************************************************************
*
* The following functions initialize the MSI interrupts for all IOMMUs
- * in the system. Its a bit challenging because there could be multiple
+ * in the system. It's a bit challenging because there could be multiple
* IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per
* pci_dev.
*
@@ -1199,7 +1281,7 @@ enable_faults:
*
* The next functions belong to the third pass of parsing the ACPI
* table. In this last pass the memory mapping requirements are
- * gathered (like exclusion and unity mapping reanges).
+ * gathered (like exclusion and unity mapping ranges).
*
****************************************************************************/
@@ -1308,7 +1390,7 @@ static int __init init_memory_definitions(struct acpi_table_header *table)
* Init the device table to not allow DMA access for devices and
* suppress all page faults
*/
-static void init_device_table(void)
+static void init_device_table_dma(void)
{
u32 devid;
@@ -1318,6 +1400,27 @@ static void init_device_table(void)
}
}
+static void __init uninit_device_table_dma(void)
+{
+ u32 devid;
+
+ for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
+ amd_iommu_dev_table[devid].data[0] = 0ULL;
+ amd_iommu_dev_table[devid].data[1] = 0ULL;
+ }
+}
+
+static void init_device_table(void)
+{
+ u32 devid;
+
+ if (!amd_iommu_irq_remap)
+ return;
+
+ for (devid = 0; devid <= amd_iommu_last_bdf; ++devid)
+ set_dev_entry_bit(devid, DEV_ENTRY_IRQ_TBL_EN);
+}
+
static void iommu_init_flags(struct amd_iommu *iommu)
{
iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
@@ -1466,10 +1569,14 @@ static struct syscore_ops amd_iommu_syscore_ops = {
static void __init free_on_init_error(void)
{
- amd_iommu_uninit_devices();
+ free_pages((unsigned long)irq_lookup_table,
+ get_order(rlookup_table_size));
- free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
- get_order(MAX_DOMAIN_ID/8));
+ if (amd_iommu_irq_cache) {
+ kmem_cache_destroy(amd_iommu_irq_cache);
+ amd_iommu_irq_cache = NULL;
+
+ }
free_pages((unsigned long)amd_iommu_rlookup_table,
get_order(rlookup_table_size));
@@ -1482,8 +1589,6 @@ static void __init free_on_init_error(void)
free_iommu_all();
- free_unity_maps();
-
#ifdef CONFIG_GART_IOMMU
/*
* We failed to initialize the AMD IOMMU - try fallback to GART
@@ -1494,6 +1599,33 @@ static void __init free_on_init_error(void)
#endif
}
+static bool __init check_ioapic_information(void)
+{
+ int idx;
+
+ for (idx = 0; idx < nr_ioapics; idx++) {
+ int id = mpc_ioapic_id(idx);
+
+ if (get_ioapic_devid(id) < 0) {
+ pr_err(FW_BUG "AMD-Vi: IO-APIC[%d] not in IVRS table\n", id);
+ pr_err("AMD-Vi: Disabling interrupt remapping due to BIOS Bug\n");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static void __init free_dma_resources(void)
+{
+ amd_iommu_uninit_devices();
+
+ free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
+ get_order(MAX_DOMAIN_ID/8));
+
+ free_unity_maps();
+}
+
/*
* This is the hardware init function for AMD IOMMU in the system.
* This function is called either from amd_iommu_init or from the interrupt
@@ -1580,9 +1712,6 @@ static int __init early_amd_iommu_init(void)
if (amd_iommu_pd_alloc_bitmap == NULL)
goto out;
- /* init the device table */
- init_device_table();
-
/*
* let all alias entries point to itself
*/
@@ -1605,10 +1734,35 @@ static int __init early_amd_iommu_init(void)
if (ret)
goto out;
+ if (amd_iommu_irq_remap)
+ amd_iommu_irq_remap = check_ioapic_information();
+
+ if (amd_iommu_irq_remap) {
+ /*
+ * Interrupt remapping enabled, create kmem_cache for the
+ * remapping tables.
+ */
+ amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache",
+ MAX_IRQS_PER_TABLE * sizeof(u32),
+ IRQ_TABLE_ALIGNMENT,
+ 0, NULL);
+ if (!amd_iommu_irq_cache)
+ goto out;
+
+ irq_lookup_table = (void *)__get_free_pages(
+ GFP_KERNEL | __GFP_ZERO,
+ get_order(rlookup_table_size));
+ if (!irq_lookup_table)
+ goto out;
+ }
+
ret = init_memory_definitions(ivrs_base);
if (ret)
goto out;
+ /* init the device table */
+ init_device_table();
+
out:
/* Don't leak any ACPI memory */
early_acpi_os_unmap_memory((char __iomem *)ivrs_base, ivrs_size);
@@ -1652,13 +1806,22 @@ static bool detect_ivrs(void)
/* Make sure ACS will be enabled during PCI probe */
pci_request_acs();
+ if (!disable_irq_remap)
+ amd_iommu_irq_remap = true;
+
return true;
}
static int amd_iommu_init_dma(void)
{
+ struct amd_iommu *iommu;
int ret;
+ init_device_table_dma();
+
+ for_each_iommu(iommu)
+ iommu_flush_all_caches(iommu);
+
if (iommu_pass_through)
ret = amd_iommu_init_passthrough();
else
@@ -1749,7 +1912,48 @@ static int __init iommu_go_to_state(enum iommu_init_state state)
return ret;
}
+#ifdef CONFIG_IRQ_REMAP
+int __init amd_iommu_prepare(void)
+{
+ return iommu_go_to_state(IOMMU_ACPI_FINISHED);
+}
+
+int __init amd_iommu_supported(void)
+{
+ return amd_iommu_irq_remap ? 1 : 0;
+}
+
+int __init amd_iommu_enable(void)
+{
+ int ret;
+
+ ret = iommu_go_to_state(IOMMU_ENABLED);
+ if (ret)
+ return ret;
+
+ irq_remapping_enabled = 1;
+
+ return 0;
+}
+
+void amd_iommu_disable(void)
+{
+ amd_iommu_suspend();
+}
+
+int amd_iommu_reenable(int mode)
+{
+ amd_iommu_resume();
+
+ return 0;
+}
+int __init amd_iommu_enable_faulting(void)
+{
+ /* We enable MSI later when PCI is initialized */
+ return 0;
+}
+#endif
/*
* This is the core init function for AMD IOMMU hardware in the system.
@@ -1762,8 +1966,17 @@ static int __init amd_iommu_init(void)
ret = iommu_go_to_state(IOMMU_INITIALIZED);
if (ret) {
- disable_iommus();
- free_on_init_error();
+ free_dma_resources();
+ if (!irq_remapping_enabled) {
+ disable_iommus();
+ free_on_init_error();
+ } else {
+ struct amd_iommu *iommu;
+
+ uninit_device_table_dma();
+ for_each_iommu(iommu)
+ iommu_flush_all_caches(iommu);
+ }
}
return ret;
diff --git a/drivers/iommu/amd_iommu_proto.h b/drivers/iommu/amd_iommu_proto.h
index 1a7f41c..c294961 100644
--- a/drivers/iommu/amd_iommu_proto.h
+++ b/drivers/iommu/amd_iommu_proto.h
@@ -32,6 +32,14 @@ extern void amd_iommu_uninit_devices(void);
extern void amd_iommu_init_notifier(void);
extern void amd_iommu_init_api(void);
+/* Needed for interrupt remapping */
+extern int amd_iommu_supported(void);
+extern int amd_iommu_prepare(void);
+extern int amd_iommu_enable(void);
+extern void amd_iommu_disable(void);
+extern int amd_iommu_reenable(int);
+extern int amd_iommu_enable_faulting(void);
+
/* IOMMUv2 specific functions */
struct iommu_domain;
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index d0dab86..c9aa3d0 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -152,6 +152,7 @@
#define CMD_INV_DEV_ENTRY 0x02
#define CMD_INV_IOMMU_PAGES 0x03
#define CMD_INV_IOTLB_PAGES 0x04
+#define CMD_INV_IRT 0x05
#define CMD_COMPLETE_PPR 0x07
#define CMD_INV_ALL 0x08
@@ -175,6 +176,7 @@
#define DEV_ENTRY_EX 0x67
#define DEV_ENTRY_SYSMGT1 0x68
#define DEV_ENTRY_SYSMGT2 0x69
+#define DEV_ENTRY_IRQ_TBL_EN 0x80
#define DEV_ENTRY_INIT_PASS 0xb8
#define DEV_ENTRY_EINT_PASS 0xb9
#define DEV_ENTRY_NMI_PASS 0xba
@@ -183,6 +185,8 @@
#define DEV_ENTRY_MODE_MASK 0x07
#define DEV_ENTRY_MODE_SHIFT 0x09
+#define MAX_DEV_TABLE_ENTRIES 0xffff
+
/* constants to configure the command buffer */
#define CMD_BUFFER_SIZE 8192
#define CMD_BUFFER_UNINITIALIZED 1
@@ -255,7 +259,7 @@
#define PAGE_SIZE_ALIGN(address, pagesize) \
((address) & ~((pagesize) - 1))
/*
- * Creates an IOMMU PTE for an address an a given pagesize
+ * Creates an IOMMU PTE for an address and a given pagesize
* The PTE has no permission bits set
* Pagesize is expected to be a power-of-two larger than 4096
*/
@@ -334,6 +338,23 @@ extern bool amd_iommu_np_cache;
/* Only true if all IOMMUs support device IOTLBs */
extern bool amd_iommu_iotlb_sup;
+#define MAX_IRQS_PER_TABLE 256
+#define IRQ_TABLE_ALIGNMENT 128
+
+struct irq_remap_table {
+ spinlock_t lock;
+ unsigned min_index;
+ u32 *table;
+};
+
+extern struct irq_remap_table **irq_lookup_table;
+
+/* Interrupt remapping feature used? */
+extern bool amd_iommu_irq_remap;
+
+/* kmem_cache to get tables with 128 byte alignement */
+extern struct kmem_cache *amd_iommu_irq_cache;
+
/*
* Make iterating over all IOMMUs easier
*/
@@ -404,7 +425,7 @@ struct iommu_dev_data {
struct list_head dev_data_list; /* For global dev_data_list */
struct iommu_dev_data *alias_data;/* The alias dev_data */
struct protection_domain *domain; /* Domain the device is bound to */
- atomic_t bind; /* Domain attach reverent count */
+ atomic_t bind; /* Domain attach reference count */
u16 devid; /* PCI Device ID */
bool iommu_v2; /* Device can make use of IOMMUv2 */
bool passthrough; /* Default for device is pt_domain */
@@ -565,6 +586,16 @@ struct amd_iommu {
u32 stored_l2[0x83];
};
+struct devid_map {
+ struct list_head list;
+ u8 id;
+ u16 devid;
+};
+
+/* Map HPET and IOAPIC ids to the devid used by the IOMMU */
+extern struct list_head ioapic_map;
+extern struct list_head hpet_map;
+
/*
* List with all IOMMUs in the system. This list is not locked because it is
* only written and read at driver initialization or suspend time
@@ -678,6 +709,30 @@ static inline u16 calc_devid(u8 bus, u8 devfn)
return (((u16)bus) << 8) | devfn;
}
+static inline int get_ioapic_devid(int id)
+{
+ struct devid_map *entry;
+
+ list_for_each_entry(entry, &ioapic_map, list) {
+ if (entry->id == id)
+ return entry->devid;
+ }
+
+ return -EINVAL;
+}
+
+static inline int get_hpet_devid(int id)
+{
+ struct devid_map *entry;
+
+ list_for_each_entry(entry, &hpet_map, list) {
+ if (entry->id == id)
+ return entry->devid;
+ }
+
+ return -EINVAL;
+}
+
#ifdef CONFIG_AMD_IOMMU_STATS
struct __iommu_counter {
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 80bad32..7fe44f8 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -840,8 +840,7 @@ static void exynos_iommu_detach_device(struct iommu_domain *domain,
if (__exynos_sysmmu_disable(data)) {
dev_dbg(dev, "%s: Detached IOMMU with pgtable %#lx\n",
__func__, __pa(priv->pgtable));
- list_del(&data->node);
- INIT_LIST_HEAD(&data->node);
+ list_del_init(&data->node);
} else {
dev_dbg(dev, "%s: Detaching IOMMU with pgtable %#lx delayed",
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index db820d7..d4a4cd4 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -589,7 +589,9 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain)
{
int i;
- domain->iommu_coherency = 1;
+ i = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
+
+ domain->iommu_coherency = i < g_num_of_iommus ? 1 : 0;
for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
if (!ecap_coherent(g_iommus[i]->ecap)) {
diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
index 151690d..faf85d6 100644
--- a/drivers/iommu/irq_remapping.c
+++ b/drivers/iommu/irq_remapping.c
@@ -51,6 +51,11 @@ early_param("intremap", setup_irqremap);
void __init setup_irq_remapping_ops(void)
{
remap_ops = &intel_irq_remap_ops;
+
+#ifdef CONFIG_AMD_IOMMU
+ if (amd_iommu_irq_ops.prepare() == 0)
+ remap_ops = &amd_iommu_irq_ops;
+#endif
}
int irq_remapping_supported(void)
diff --git a/drivers/iommu/irq_remapping.h b/drivers/iommu/irq_remapping.h
index b12974c..95363ac 100644
--- a/drivers/iommu/irq_remapping.h
+++ b/drivers/iommu/irq_remapping.h
@@ -82,6 +82,12 @@ struct irq_remap_ops {
};
extern struct irq_remap_ops intel_irq_remap_ops;
+extern struct irq_remap_ops amd_iommu_irq_ops;
+
+#else /* CONFIG_IRQ_REMAP */
+
+#define irq_remapping_enabled 0
+#define disable_irq_remap 1
#endif /* CONFIG_IRQ_REMAP */
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 2a4bb36..0b4d62e 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -32,14 +32,55 @@
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_iommu.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
#include <asm/page.h>
#include <asm/cacheflush.h>
#include <mach/iomap.h>
-#include <mach/smmu.h>
#include <mach/tegra-ahb.h>
+enum smmu_hwgrp {
+ HWGRP_AFI,
+ HWGRP_AVPC,
+ HWGRP_DC,
+ HWGRP_DCB,
+ HWGRP_EPP,
+ HWGRP_G2,
+ HWGRP_HC,
+ HWGRP_HDA,
+ HWGRP_ISP,
+ HWGRP_MPE,
+ HWGRP_NV,
+ HWGRP_NV2,
+ HWGRP_PPCS,
+ HWGRP_SATA,
+ HWGRP_VDE,
+ HWGRP_VI,
+
+ HWGRP_COUNT,
+
+ HWGRP_END = ~0,
+};
+
+#define HWG_AFI (1 << HWGRP_AFI)
+#define HWG_AVPC (1 << HWGRP_AVPC)
+#define HWG_DC (1 << HWGRP_DC)
+#define HWG_DCB (1 << HWGRP_DCB)
+#define HWG_EPP (1 << HWGRP_EPP)
+#define HWG_G2 (1 << HWGRP_G2)
+#define HWG_HC (1 << HWGRP_HC)
+#define HWG_HDA (1 << HWGRP_HDA)
+#define HWG_ISP (1 << HWGRP_ISP)
+#define HWG_MPE (1 << HWGRP_MPE)
+#define HWG_NV (1 << HWGRP_NV)
+#define HWG_NV2 (1 << HWGRP_NV2)
+#define HWG_PPCS (1 << HWGRP_PPCS)
+#define HWG_SATA (1 << HWGRP_SATA)
+#define HWG_VDE (1 << HWGRP_VDE)
+#define HWG_VI (1 << HWGRP_VI)
+
/* bitmap of the page sizes currently supported */
#define SMMU_IOMMU_PGSIZES (SZ_4K)
@@ -47,16 +88,29 @@
#define SMMU_CONFIG_DISABLE 0
#define SMMU_CONFIG_ENABLE 1
-#define SMMU_TLB_CONFIG 0x14
-#define SMMU_TLB_CONFIG_STATS__MASK (1 << 31)
-#define SMMU_TLB_CONFIG_STATS__ENABLE (1 << 31)
+/* REVISIT: To support multiple MCs */
+enum {
+ _MC = 0,
+};
+
+enum {
+ _TLB = 0,
+ _PTC,
+};
+
+#define SMMU_CACHE_CONFIG_BASE 0x14
+#define __SMMU_CACHE_CONFIG(mc, cache) (SMMU_CACHE_CONFIG_BASE + 4 * cache)
+#define SMMU_CACHE_CONFIG(cache) __SMMU_CACHE_CONFIG(_MC, cache)
+
+#define SMMU_CACHE_CONFIG_STATS_SHIFT 31
+#define SMMU_CACHE_CONFIG_STATS_ENABLE (1 << SMMU_CACHE_CONFIG_STATS_SHIFT)
+#define SMMU_CACHE_CONFIG_STATS_TEST_SHIFT 30
+#define SMMU_CACHE_CONFIG_STATS_TEST (1 << SMMU_CACHE_CONFIG_STATS_TEST_SHIFT)
+
#define SMMU_TLB_CONFIG_HIT_UNDER_MISS__ENABLE (1 << 29)
#define SMMU_TLB_CONFIG_ACTIVE_LINES__VALUE 0x10
#define SMMU_TLB_CONFIG_RESET_VAL 0x20000010
-#define SMMU_PTC_CONFIG 0x18
-#define SMMU_PTC_CONFIG_STATS__MASK (1 << 31)
-#define SMMU_PTC_CONFIG_STATS__ENABLE (1 << 31)
#define SMMU_PTC_CONFIG_CACHE__ENABLE (1 << 29)
#define SMMU_PTC_CONFIG_INDEX_MAP__PATTERN 0x3f
#define SMMU_PTC_CONFIG_RESET_VAL 0x2000003f
@@ -86,10 +140,10 @@
#define SMMU_ASID_SECURITY 0x38
-#define SMMU_STATS_TLB_HIT_COUNT 0x1f0
-#define SMMU_STATS_TLB_MISS_COUNT 0x1f4
-#define SMMU_STATS_PTC_HIT_COUNT 0x1f8
-#define SMMU_STATS_PTC_MISS_COUNT 0x1fc
+#define SMMU_STATS_CACHE_COUNT_BASE 0x1f0
+
+#define SMMU_STATS_CACHE_COUNT(mc, cache, hitmiss) \
+ (SMMU_STATS_CACHE_COUNT_BASE + 8 * cache + 4 * hitmiss)
#define SMMU_TRANSLATION_ENABLE_0 0x228
#define SMMU_TRANSLATION_ENABLE_1 0x22c
@@ -231,6 +285,12 @@ struct smmu_as {
spinlock_t client_lock; /* for client list */
};
+struct smmu_debugfs_info {
+ struct smmu_device *smmu;
+ int mc;
+ int cache;
+};
+
/*
* Per SMMU device - IOMMU device
*/
@@ -251,6 +311,9 @@ struct smmu_device {
unsigned long translation_enable_2;
unsigned long asid_security;
+ struct dentry *debugfs_root;
+ struct smmu_debugfs_info *debugfs_info;
+
struct device_node *ahb;
int num_as;
@@ -412,8 +475,8 @@ static int smmu_setup_regs(struct smmu_device *smmu)
smmu_write(smmu, smmu->translation_enable_1, SMMU_TRANSLATION_ENABLE_1);
smmu_write(smmu, smmu->translation_enable_2, SMMU_TRANSLATION_ENABLE_2);
smmu_write(smmu, smmu->asid_security, SMMU_ASID_SECURITY);
- smmu_write(smmu, SMMU_TLB_CONFIG_RESET_VAL, SMMU_TLB_CONFIG);
- smmu_write(smmu, SMMU_PTC_CONFIG_RESET_VAL, SMMU_PTC_CONFIG);
+ smmu_write(smmu, SMMU_TLB_CONFIG_RESET_VAL, SMMU_CACHE_CONFIG(_TLB));
+ smmu_write(smmu, SMMU_PTC_CONFIG_RESET_VAL, SMMU_CACHE_CONFIG(_PTC));
smmu_flush_regs(smmu, 1);
@@ -895,6 +958,175 @@ static struct iommu_ops smmu_iommu_ops = {
.pgsize_bitmap = SMMU_IOMMU_PGSIZES,
};
+/* Should be in the order of enum */
+static const char * const smmu_debugfs_mc[] = { "mc", };
+static const char * const smmu_debugfs_cache[] = { "tlb", "ptc", };
+
+static ssize_t smmu_debugfs_stats_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *pos)
+{
+ struct smmu_debugfs_info *info;
+ struct smmu_device *smmu;
+ struct dentry *dent;
+ int i;
+ enum {
+ _OFF = 0,
+ _ON,
+ _RESET,
+ };
+ const char * const command[] = {
+ [_OFF] = "off",
+ [_ON] = "on",
+ [_RESET] = "reset",
+ };
+ char str[] = "reset";
+ u32 val;
+ size_t offs;
+
+ count = min_t(size_t, count, sizeof(str));
+ if (copy_from_user(str, buffer, count))
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(command); i++)
+ if (strncmp(str, command[i],
+ strlen(command[i])) == 0)
+ break;
+
+ if (i == ARRAY_SIZE(command))
+ return -EINVAL;
+
+ dent = file->f_dentry;
+ info = dent->d_inode->i_private;
+ smmu = info->smmu;
+
+ offs = SMMU_CACHE_CONFIG(info->cache);
+ val = smmu_read(smmu, offs);
+ switch (i) {
+ case _OFF:
+ val &= ~SMMU_CACHE_CONFIG_STATS_ENABLE;
+ val &= ~SMMU_CACHE_CONFIG_STATS_TEST;
+ smmu_write(smmu, val, offs);
+ break;
+ case _ON:
+ val |= SMMU_CACHE_CONFIG_STATS_ENABLE;
+ val &= ~SMMU_CACHE_CONFIG_STATS_TEST;
+ smmu_write(smmu, val, offs);
+ break;
+ case _RESET:
+ val |= SMMU_CACHE_CONFIG_STATS_TEST;
+ smmu_write(smmu, val, offs);
+ val &= ~SMMU_CACHE_CONFIG_STATS_TEST;
+ smmu_write(smmu, val, offs);
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ dev_dbg(smmu->dev, "%s() %08x, %08x @%08x\n", __func__,
+ val, smmu_read(smmu, offs), offs);
+
+ return count;
+}
+
+static int smmu_debugfs_stats_show(struct seq_file *s, void *v)
+{
+ struct smmu_debugfs_info *info;
+ struct smmu_device *smmu;
+ struct dentry *dent;
+ int i;
+ const char * const stats[] = { "hit", "miss", };
+
+ dent = d_find_alias(s->private);
+ info = dent->d_inode->i_private;
+ smmu = info->smmu;
+
+ for (i = 0; i < ARRAY_SIZE(stats); i++) {
+ u32 val;
+ size_t offs;
+
+ offs = SMMU_STATS_CACHE_COUNT(info->mc, info->cache, i);
+ val = smmu_read(smmu, offs);
+ seq_printf(s, "%s:%08x ", stats[i], val);
+
+ dev_dbg(smmu->dev, "%s() %s %08x @%08x\n", __func__,
+ stats[i], val, offs);
+ }
+ seq_printf(s, "\n");
+
+ return 0;
+}
+
+static int smmu_debugfs_stats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, smmu_debugfs_stats_show, inode);
+}
+
+static const struct file_operations smmu_debugfs_stats_fops = {
+ .open = smmu_debugfs_stats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = smmu_debugfs_stats_write,
+};
+
+static void smmu_debugfs_delete(struct smmu_device *smmu)
+{
+ debugfs_remove_recursive(smmu->debugfs_root);
+ kfree(smmu->debugfs_info);
+}
+
+static void smmu_debugfs_create(struct smmu_device *smmu)
+{
+ int i;
+ size_t bytes;
+ struct dentry *root;
+
+ bytes = ARRAY_SIZE(smmu_debugfs_mc) * ARRAY_SIZE(smmu_debugfs_cache) *
+ sizeof(*smmu->debugfs_info);
+ smmu->debugfs_info = kmalloc(bytes, GFP_KERNEL);
+ if (!smmu->debugfs_info)
+ return;
+
+ root = debugfs_create_dir(dev_name(smmu->dev), NULL);
+ if (!root)
+ goto err_out;
+ smmu->debugfs_root = root;
+
+ for (i = 0; i < ARRAY_SIZE(smmu_debugfs_mc); i++) {
+ int j;
+ struct dentry *mc;
+
+ mc = debugfs_create_dir(smmu_debugfs_mc[i], root);
+ if (!mc)
+ goto err_out;
+
+ for (j = 0; j < ARRAY_SIZE(smmu_debugfs_cache); j++) {
+ struct dentry *cache;
+ struct smmu_debugfs_info *info;
+
+ info = smmu->debugfs_info;
+ info += i * ARRAY_SIZE(smmu_debugfs_mc) + j;
+ info->smmu = smmu;
+ info->mc = i;
+ info->cache = j;
+
+ cache = debugfs_create_file(smmu_debugfs_cache[j],
+ S_IWUGO | S_IRUGO, mc,
+ (void *)info,
+ &smmu_debugfs_stats_fops);
+ if (!cache)
+ goto err_out;
+ }
+ }
+
+ return;
+
+err_out:
+ smmu_debugfs_delete(smmu);
+}
+
static int tegra_smmu_suspend(struct device *dev)
{
struct smmu_device *smmu = dev_get_drvdata(dev);
@@ -999,6 +1231,7 @@ static int tegra_smmu_probe(struct platform_device *pdev)
if (!smmu->avp_vector_page)
return -ENOMEM;
+ smmu_debugfs_create(smmu);
smmu_handle = smmu;
return 0;
}
@@ -1008,6 +1241,8 @@ static int tegra_smmu_remove(struct platform_device *pdev)
struct smmu_device *smmu = platform_get_drvdata(pdev);
int i;
+ smmu_debugfs_delete(smmu);
+
smmu_write(smmu, SMMU_CONFIG_DISABLE, SMMU_CONFIG);
for (i = 0; i < smmu->num_as; i++)
free_pdir(&smmu->as[i]);
diff --git a/drivers/isdn/hisax/Kconfig b/drivers/isdn/hisax/Kconfig
index 452fde9..70ecd0c 100644
--- a/drivers/isdn/hisax/Kconfig
+++ b/drivers/isdn/hisax/Kconfig
@@ -109,7 +109,7 @@ config HISAX_16_3
config HISAX_TELESPCI
bool "Teles PCI"
- depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV))
+ depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV || (XTENSA && !CPU_LITTLE_ENDIAN)))
help
This enables HiSax support for the Teles PCI.
See <file:Documentation/isdn/README.HiSax> on how to configure it.
@@ -237,7 +237,7 @@ config HISAX_MIC
config HISAX_NETJET
bool "NETjet card"
- depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV))
+ depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV || (XTENSA && !CPU_LITTLE_ENDIAN)))
help
This enables HiSax support for the NetJet from Traverse
Technologies.
@@ -248,7 +248,7 @@ config HISAX_NETJET
config HISAX_NETJET_U
bool "NETspider U card"
- depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV))
+ depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV || (XTENSA && !CPU_LITTLE_ENDIAN)))
help
This enables HiSax support for the Netspider U interface ISDN card
from Traverse Technologies.
@@ -316,7 +316,7 @@ config HISAX_GAZEL
config HISAX_HFC_PCI
bool "HFC PCI-Bus cards"
- depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV))
+ depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV || (XTENSA && !CPU_LITTLE_ENDIAN)))
help
This enables HiSax support for the HFC-S PCI 2BDS0 based cards.
@@ -341,7 +341,7 @@ config HISAX_HFC_SX
config HISAX_ENTERNOW_PCI
bool "Formula-n enter:now PCI card"
- depends on HISAX_NETJET && PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV))
+ depends on HISAX_NETJET && PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV || (XTENSA && !CPU_LITTLE_ENDIAN)))
help
This enables HiSax support for the Formula-n enter:now PCI
ISDN card.
diff --git a/drivers/lguest/lguest_device.c b/drivers/lguest/lguest_device.c
index 9e8388e..fc92ccb 100644
--- a/drivers/lguest/lguest_device.c
+++ b/drivers/lguest/lguest_device.c
@@ -263,6 +263,9 @@ static struct virtqueue *lg_find_vq(struct virtio_device *vdev,
struct virtqueue *vq;
int err;
+ if (!name)
+ return NULL;
+
/* We must have this many virtqueues. */
if (index >= ldev->desc->num_vq)
return ERR_PTR(-ENOENT);
@@ -296,7 +299,7 @@ static struct virtqueue *lg_find_vq(struct virtio_device *vdev,
* to 'true': the host just a(nother) SMP CPU, so we only need inter-cpu
* barriers.
*/
- vq = vring_new_virtqueue(lvq->config.num, LGUEST_VRING_ALIGN, vdev,
+ vq = vring_new_virtqueue(index, lvq->config.num, LGUEST_VRING_ALIGN, vdev,
true, lvq->pages, lg_notify, callback, name);
if (!vq) {
err = -ENOMEM;
diff --git a/drivers/media/pci/meye/meye.c b/drivers/media/pci/meye/meye.c
index 7bc7752..e5a76da 100644
--- a/drivers/media/pci/meye/meye.c
+++ b/drivers/media/pci/meye/meye.c
@@ -1647,7 +1647,7 @@ static int meye_mmap(struct file *file, struct vm_area_struct *vma)
vma->vm_ops = &meye_vm_ops;
vma->vm_flags &= ~VM_IO; /* not I/O memory */
- vma->vm_flags |= VM_RESERVED; /* avoid to swap out this VMA */
+ vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_private_data = (void *) (offset / gbufsize);
meye_vm_open(vma);
diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
index 66ac21d..134016f 100644
--- a/drivers/media/platform/omap/omap_vout.c
+++ b/drivers/media/platform/omap/omap_vout.c
@@ -911,7 +911,7 @@ static int omap_vout_mmap(struct file *file, struct vm_area_struct *vma)
q->bufs[i]->baddr = vma->vm_start;
- vma->vm_flags |= VM_RESERVED;
+ vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
vma->vm_ops = &omap_vout_vm_ops;
vma->vm_private_data = (void *) vout;
diff --git a/drivers/media/platform/vino.c b/drivers/media/platform/vino.c
index 790d96c..70b0bf4 100644
--- a/drivers/media/platform/vino.c
+++ b/drivers/media/platform/vino.c
@@ -3950,7 +3950,7 @@ found:
fb->map_count = 1;
- vma->vm_flags |= VM_DONTEXPAND | VM_RESERVED;
+ vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_flags &= ~VM_IO;
vma->vm_private_data = fb;
vma->vm_file = file;
diff --git a/drivers/media/usb/sn9c102/sn9c102_core.c b/drivers/media/usb/sn9c102/sn9c102_core.c
index 19ea780..5bfc8e2 100644
--- a/drivers/media/usb/sn9c102/sn9c102_core.c
+++ b/drivers/media/usb/sn9c102/sn9c102_core.c
@@ -2126,8 +2126,7 @@ static int sn9c102_mmap(struct file* filp, struct vm_area_struct *vma)
return -EINVAL;
}
- vma->vm_flags |= VM_IO;
- vma->vm_flags |= VM_RESERVED;
+ vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
pos = cam->frame[i].bufmem;
while (size > 0) { /* size is page-aligned */
diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
index f67018e..5c36a57 100644
--- a/drivers/media/usb/usbvision/usbvision-video.c
+++ b/drivers/media/usb/usbvision/usbvision-video.c
@@ -1108,8 +1108,7 @@ static int usbvision_mmap(struct file *file, struct vm_area_struct *vma)
}
/* VM_IO is eventually going to replace PageReserved altogether */
- vma->vm_flags |= VM_IO;
- vma->vm_flags |= VM_RESERVED; /* avoid to swap out this VMA */
+ vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
pos = usbvision->frame[i].data;
while (size > 0) {
diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c
index f300dea..828e7c1 100644
--- a/drivers/media/v4l2-core/videobuf-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf-dma-sg.c
@@ -582,7 +582,7 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
map->count = 1;
map->q = q;
vma->vm_ops = &videobuf_vm_ops;
- vma->vm_flags |= VM_DONTEXPAND | VM_RESERVED;
+ vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_flags &= ~VM_IO; /* using shared anonymous pages */
vma->vm_private_data = map;
dprintk(1, "mmap %p: q=%p %08lx-%08lx pgoff %08lx bufs %d-%d\n",
diff --git a/drivers/media/v4l2-core/videobuf-vmalloc.c b/drivers/media/v4l2-core/videobuf-vmalloc.c
index df14258..2ff7fcc 100644
--- a/drivers/media/v4l2-core/videobuf-vmalloc.c
+++ b/drivers/media/v4l2-core/videobuf-vmalloc.c
@@ -270,7 +270,7 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
}
vma->vm_ops = &videobuf_vm_ops;
- vma->vm_flags |= VM_DONTEXPAND | VM_RESERVED;
+ vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_private_data = map;
dprintk(1, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
diff --git a/drivers/media/v4l2-core/videobuf2-memops.c b/drivers/media/v4l2-core/videobuf2-memops.c
index 504cd4c..051ea35 100644
--- a/drivers/media/v4l2-core/videobuf2-memops.c
+++ b/drivers/media/v4l2-core/videobuf2-memops.c
@@ -163,7 +163,7 @@ int vb2_mmap_pfn_range(struct vm_area_struct *vma, unsigned long paddr,
return ret;
}
- vma->vm_flags |= VM_DONTEXPAND | VM_RESERVED;
+ vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_private_data = priv;
vma->vm_ops = vm_ops;
diff --git a/drivers/misc/carma/carma-fpga.c b/drivers/misc/carma/carma-fpga.c
index 0c43297..8835eab 100644
--- a/drivers/misc/carma/carma-fpga.c
+++ b/drivers/misc/carma/carma-fpga.c
@@ -1243,8 +1243,6 @@ static int data_mmap(struct file *filp, struct vm_area_struct *vma)
return -EINVAL;
}
- /* IO memory (stop cacheing) */
- vma->vm_flags |= VM_IO | VM_RESERVED;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
return io_remap_pfn_range(vma, vma->vm_start, addr, vsize,
diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c
index ecafa4b..492c8ca 100644
--- a/drivers/misc/sgi-gru/grufile.c
+++ b/drivers/misc/sgi-gru/grufile.c
@@ -108,9 +108,8 @@ static int gru_file_mmap(struct file *file, struct vm_area_struct *vma)
vma->vm_end & (GRU_GSEG_PAGESIZE - 1))
return -EINVAL;
- vma->vm_flags |=
- (VM_IO | VM_DONTCOPY | VM_LOCKED | VM_DONTEXPAND | VM_PFNMAP |
- VM_RESERVED);
+ vma->vm_flags |= VM_IO | VM_PFNMAP | VM_LOCKED |
+ VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_page_prot = PAGE_SHARED;
vma->vm_ops = &gru_vm_ops;
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 8ac5246..06c42cf 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -26,6 +26,7 @@
#include <linux/suspend.h>
#include <linux/fault-inject.h>
#include <linux/random.h>
+#include <linux/slab.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
@@ -41,6 +42,12 @@
#include "sd_ops.h"
#include "sdio_ops.h"
+/*
+ * Background operations can take a long time, depending on the housekeeping
+ * operations the card has to perform.
+ */
+#define MMC_BKOPS_MAX_TIMEOUT (4 * 60 * 1000) /* max time to wait in ms */
+
static struct workqueue_struct *workqueue;
static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
@@ -245,6 +252,70 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
host->ops->request(host, mrq);
}
+/**
+ * mmc_start_bkops - start BKOPS for supported cards
+ * @card: MMC card to start BKOPS
+ * @form_exception: A flag to indicate if this function was
+ * called due to an exception raised by the card
+ *
+ * Start background operations whenever requested.
+ * When the urgent BKOPS bit is set in a R1 command response
+ * then background operations should be started immediately.
+*/
+void mmc_start_bkops(struct mmc_card *card, bool from_exception)
+{
+ int err;
+ int timeout;
+ bool use_busy_signal;
+
+ BUG_ON(!card);
+
+ if (!card->ext_csd.bkops_en || mmc_card_doing_bkops(card))
+ return;
+
+ err = mmc_read_bkops_status(card);
+ if (err) {
+ pr_err("%s: Failed to read bkops status: %d\n",
+ mmc_hostname(card->host), err);
+ return;
+ }
+
+ if (!card->ext_csd.raw_bkops_status)
+ return;
+
+ if (card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2 &&
+ from_exception)
+ return;
+
+ mmc_claim_host(card->host);
+ if (card->ext_csd.raw_bkops_status >= EXT_CSD_BKOPS_LEVEL_2) {
+ timeout = MMC_BKOPS_MAX_TIMEOUT;
+ use_busy_signal = true;
+ } else {
+ timeout = 0;
+ use_busy_signal = false;
+ }
+
+ err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_BKOPS_START, 1, timeout, use_busy_signal);
+ if (err) {
+ pr_warn("%s: Error %d starting bkops\n",
+ mmc_hostname(card->host), err);
+ goto out;
+ }
+
+ /*
+ * For urgent bkops status (LEVEL_2 and more)
+ * bkops executed synchronously, otherwise
+ * the operation is in progress
+ */
+ if (!use_busy_signal)
+ mmc_card_set_doing_bkops(card);
+out:
+ mmc_release_host(card->host);
+}
+EXPORT_SYMBOL(mmc_start_bkops);
+
static void mmc_wait_done(struct mmc_request *mrq)
{
complete(&mrq->completion);
@@ -354,6 +425,14 @@ struct mmc_async_req *mmc_start_req(struct mmc_host *host,
if (host->areq) {
mmc_wait_for_req_done(host, host->areq->mrq);
err = host->areq->err_check(host->card, host->areq);
+ /*
+ * Check BKOPS urgency for each R1 response
+ */
+ if (host->card && mmc_card_mmc(host->card) &&
+ ((mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1) ||
+ (mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1B)) &&
+ (host->areq->mrq->cmd->resp[0] & R1_EXCEPTION_EVENT))
+ mmc_start_bkops(host->card, true);
}
if (!err && areq)
@@ -398,7 +477,7 @@ EXPORT_SYMBOL(mmc_wait_for_req);
* @card: the MMC card associated with the HPI transfer
*
* Issued High Priority Interrupt, and check for card status
- * util out-of prg-state.
+ * until out-of prg-state.
*/
int mmc_interrupt_hpi(struct mmc_card *card)
{
@@ -424,8 +503,9 @@ int mmc_interrupt_hpi(struct mmc_card *card)
case R1_STATE_IDLE:
case R1_STATE_READY:
case R1_STATE_STBY:
+ case R1_STATE_TRAN:
/*
- * In idle states, HPI is not needed and the caller
+ * In idle and transfer states, HPI is not needed and the caller
* can issue the next intended command immediately
*/
goto out;
@@ -489,6 +569,64 @@ int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries
EXPORT_SYMBOL(mmc_wait_for_cmd);
/**
+ * mmc_stop_bkops - stop ongoing BKOPS
+ * @card: MMC card to check BKOPS
+ *
+ * Send HPI command to stop ongoing background operations to
+ * allow rapid servicing of foreground operations, e.g. read/
+ * writes. Wait until the card comes out of the programming state
+ * to avoid errors in servicing read/write requests.
+ */
+int mmc_stop_bkops(struct mmc_card *card)
+{
+ int err = 0;
+
+ BUG_ON(!card);
+ err = mmc_interrupt_hpi(card);
+
+ /*
+ * If err is EINVAL, we can't issue an HPI.
+ * It should complete the BKOPS.
+ */
+ if (!err || (err == -EINVAL)) {
+ mmc_card_clr_doing_bkops(card);
+ err = 0;
+ }
+
+ return err;
+}
+EXPORT_SYMBOL(mmc_stop_bkops);
+
+int mmc_read_bkops_status(struct mmc_card *card)
+{
+ int err;
+ u8 *ext_csd;
+
+ /*
+ * In future work, we should consider storing the entire ext_csd.
+ */
+ ext_csd = kmalloc(512, GFP_KERNEL);
+ if (!ext_csd) {
+ pr_err("%s: could not allocate buffer to receive the ext_csd.\n",
+ mmc_hostname(card->host));
+ return -ENOMEM;
+ }
+
+ mmc_claim_host(card->host);
+ err = mmc_send_ext_csd(card, ext_csd);
+ mmc_release_host(card->host);
+ if (err)
+ goto out;
+
+ card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
+ card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
+out:
+ kfree(ext_csd);
+ return err;
+}
+EXPORT_SYMBOL(mmc_read_bkops_status);
+
+/**
* mmc_set_data_timeout - set the timeout for a data command
* @data: data phase for command
* @card: the MMC card associated with the data transfer
@@ -975,7 +1113,8 @@ int mmc_regulator_set_ocr(struct mmc_host *mmc,
int tmp;
int voltage;
- /* REVISIT mmc_vddrange_to_ocrmask() may have set some
+ /*
+ * REVISIT mmc_vddrange_to_ocrmask() may have set some
* bits this regulator doesn't quite support ... don't
* be too picky, most cards and regulators are OK with
* a 0.1V range goof (it's a small error percentage).
@@ -989,12 +1128,13 @@ int mmc_regulator_set_ocr(struct mmc_host *mmc,
max_uV = min_uV + 100 * 1000;
}
- /* avoid needless changes to this voltage; the regulator
- * might not allow this operation
+ /*
+ * If we're using a fixed/static regulator, don't call
+ * regulator_set_voltage; it would fail.
*/
voltage = regulator_get_voltage(supply);
- if (mmc->caps2 & MMC_CAP2_BROKEN_VOLTAGE)
+ if (regulator_count_voltages(supply) == 1)
min_uV = max_uV = voltage;
if (voltage < 0)
@@ -1133,48 +1273,6 @@ void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
mmc_host_clk_release(host);
}
-static void mmc_poweroff_notify(struct mmc_host *host)
-{
- struct mmc_card *card;
- unsigned int timeout;
- unsigned int notify_type = EXT_CSD_NO_POWER_NOTIFICATION;
- int err = 0;
-
- card = host->card;
- mmc_claim_host(host);
-
- /*
- * Send power notify command only if card
- * is mmc and notify state is powered ON
- */
- if (card && mmc_card_mmc(card) &&
- (card->poweroff_notify_state == MMC_POWERED_ON)) {
-
- if (host->power_notify_type == MMC_HOST_PW_NOTIFY_SHORT) {
- notify_type = EXT_CSD_POWER_OFF_SHORT;
- timeout = card->ext_csd.generic_cmd6_time;
- card->poweroff_notify_state = MMC_POWEROFF_SHORT;
- } else {
- notify_type = EXT_CSD_POWER_OFF_LONG;
- timeout = card->ext_csd.power_off_longtime;
- card->poweroff_notify_state = MMC_POWEROFF_LONG;
- }
-
- err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_POWER_OFF_NOTIFICATION,
- notify_type, timeout);
-
- if (err && err != -EBADMSG)
- pr_err("Device failed to respond within %d poweroff "
- "time. Forcefully powering down the device\n",
- timeout);
-
- /* Set the card state to no notification after the poweroff */
- card->poweroff_notify_state = MMC_NO_POWER_NOTIFICATION;
- }
- mmc_release_host(host);
-}
-
/*
* Apply power to the MMC stack. This is a two-stage process.
* First, we enable power to the card without the clock running.
@@ -1237,8 +1335,6 @@ static void mmc_power_up(struct mmc_host *host)
void mmc_power_off(struct mmc_host *host)
{
- int err = 0;
-
if (host->ios.power_mode == MMC_POWER_OFF)
return;
@@ -1247,22 +1343,6 @@ void mmc_power_off(struct mmc_host *host)
host->ios.clock = 0;
host->ios.vdd = 0;
- /*
- * For eMMC 4.5 device send AWAKE command before
- * POWER_OFF_NOTIFY command, because in sleep state
- * eMMC 4.5 devices respond to only RESET and AWAKE cmd
- */
- if (host->card && mmc_card_is_sleep(host->card) &&
- host->bus_ops->resume) {
- err = host->bus_ops->resume(host);
-
- if (!err)
- mmc_poweroff_notify(host);
- else
- pr_warning("%s: error %d during resume "
- "(continue with poweroff sequence)\n",
- mmc_hostname(host), err);
- }
/*
* Reset ocr mask to be the highest possible voltage supported for
@@ -2052,6 +2132,11 @@ void mmc_rescan(struct work_struct *work)
if (host->rescan_disable)
return;
+ /* If there is a non-removable card registered, only scan once */
+ if ((host->caps & MMC_CAP_NONREMOVABLE) && host->rescan_entered)
+ return;
+ host->rescan_entered = 1;
+
mmc_bus_get(host);
/*
@@ -2327,9 +2412,14 @@ int mmc_suspend_host(struct mmc_host *host)
mmc_bus_get(host);
if (host->bus_ops && !host->bus_dead) {
-
- if (host->bus_ops->suspend)
+ if (host->bus_ops->suspend) {
+ if (mmc_card_doing_bkops(host->card)) {
+ err = mmc_stop_bkops(host->card);
+ if (err)
+ goto out;
+ }
err = host->bus_ops->suspend(host);
+ }
if (err == -ENOSYS || !host->bus_ops->resume) {
/*
@@ -2411,15 +2501,24 @@ int mmc_pm_notify(struct notifier_block *notify_block,
struct mmc_host *host = container_of(
notify_block, struct mmc_host, pm_notify);
unsigned long flags;
-
+ int err = 0;
switch (mode) {
case PM_HIBERNATION_PREPARE:
case PM_SUSPEND_PREPARE:
+ if (host->card && mmc_card_mmc(host->card) &&
+ mmc_card_doing_bkops(host->card)) {
+ err = mmc_stop_bkops(host->card);
+ if (err) {
+ pr_err("%s: didn't stop bkops\n",
+ mmc_hostname(host));
+ return err;
+ }
+ mmc_card_clr_doing_bkops(host->card);
+ }
spin_lock_irqsave(&host->lock, flags);
host->rescan_disable = 1;
- host->power_notify_type = MMC_HOST_PW_NOTIFY_SHORT;
spin_unlock_irqrestore(&host->lock, flags);
cancel_delayed_work_sync(&host->detect);
@@ -2443,7 +2542,6 @@ int mmc_pm_notify(struct notifier_block *notify_block,
spin_lock_irqsave(&host->lock, flags);
host->rescan_disable = 0;
- host->power_notify_type = MMC_HOST_PW_NOTIFY_LONG;
spin_unlock_irqrestore(&host->lock, flags);
mmc_detect_change(host, 0);
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index 9ab5b17..d96c643 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -281,7 +281,7 @@ static int mmc_ext_csd_open(struct inode *inode, struct file *filp)
if (err)
goto out_free;
- for (i = 511; i >= 0; i--)
+ for (i = 0; i < 512; i++)
n += sprintf(buf + n, "%02x", ext_csd[i]);
n += sprintf(buf + n, "\n");
BUG_ON(n != EXT_CSD_STR_LEN);
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 396b258..7cc4638 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -463,6 +463,17 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
}
if (card->ext_csd.rev >= 5) {
+ /* check whether the eMMC card supports BKOPS */
+ if (ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) {
+ card->ext_csd.bkops = 1;
+ card->ext_csd.bkops_en = ext_csd[EXT_CSD_BKOPS_EN];
+ card->ext_csd.raw_bkops_status =
+ ext_csd[EXT_CSD_BKOPS_STATUS];
+ if (!card->ext_csd.bkops_en)
+ pr_info("%s: BKOPS_EN bit is not set\n",
+ mmc_hostname(card->host));
+ }
+
/* check whether the eMMC card supports HPI */
if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x1) {
card->ext_csd.hpi = 1;
@@ -996,7 +1007,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
* so check for success and update the flag
*/
if (!err)
- card->poweroff_notify_state = MMC_POWERED_ON;
+ card->ext_csd.power_off_notification = EXT_CSD_POWER_ON;
}
/*
@@ -1262,6 +1273,35 @@ err:
return err;
}
+static int mmc_can_poweroff_notify(const struct mmc_card *card)
+{
+ return card &&
+ mmc_card_mmc(card) &&
+ (card->ext_csd.power_off_notification == EXT_CSD_POWER_ON);
+}
+
+static int mmc_poweroff_notify(struct mmc_card *card, unsigned int notify_type)
+{
+ unsigned int timeout = card->ext_csd.generic_cmd6_time;
+ int err;
+
+ /* Use EXT_CSD_POWER_OFF_SHORT as default notification type. */
+ if (notify_type == EXT_CSD_POWER_OFF_LONG)
+ timeout = card->ext_csd.power_off_longtime;
+
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_POWER_OFF_NOTIFICATION,
+ notify_type, timeout);
+ if (err)
+ pr_err("%s: Power Off Notification timed out, %u\n",
+ mmc_hostname(card->host), timeout);
+
+ /* Disable the power off notification after the switch operation. */
+ card->ext_csd.power_off_notification = EXT_CSD_NO_POWER_NOTIFICATION;
+
+ return err;
+}
+
/*
* Host is being removed. Free up the current card.
*/
@@ -1322,11 +1362,11 @@ static int mmc_suspend(struct mmc_host *host)
BUG_ON(!host->card);
mmc_claim_host(host);
- if (mmc_card_can_sleep(host)) {
+ if (mmc_can_poweroff_notify(host->card))
+ err = mmc_poweroff_notify(host->card, EXT_CSD_POWER_OFF_SHORT);
+ else if (mmc_card_can_sleep(host))
err = mmc_card_sleep(host);
- if (!err)
- mmc_card_set_sleep(host->card);
- } else if (!mmc_host_is_spi(host))
+ else if (!mmc_host_is_spi(host))
err = mmc_deselect_cards(host);
host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200);
mmc_release_host(host);
@@ -1348,11 +1388,7 @@ static int mmc_resume(struct mmc_host *host)
BUG_ON(!host->card);
mmc_claim_host(host);
- if (mmc_card_is_sleep(host->card)) {
- err = mmc_card_awake(host);
- mmc_card_clr_sleep(host->card);
- } else
- err = mmc_init_card(host, host->ocr, host->card);
+ err = mmc_init_card(host, host->ocr, host->card);
mmc_release_host(host);
return err;
@@ -1363,7 +1399,6 @@ static int mmc_power_restore(struct mmc_host *host)
int ret;
host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200);
- mmc_card_clr_sleep(host->card);
mmc_claim_host(host);
ret = mmc_init_card(host, host->ocr, host->card);
mmc_release_host(host);
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 0ed2cc5..a0e1720 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -230,6 +230,10 @@ mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
return 0;
}
+/*
+ * NOTE: void *buf, caller for the buf is required to use DMA-capable
+ * buffer or on-stack buffer (with some overhead in callee).
+ */
static int
mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
u32 opcode, void *buf, unsigned len)
@@ -239,13 +243,19 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
struct mmc_data data = {0};
struct scatterlist sg;
void *data_buf;
+ int is_on_stack;
- /* dma onto stack is unsafe/nonportable, but callers to this
- * routine normally provide temporary on-stack buffers ...
- */
- data_buf = kmalloc(len, GFP_KERNEL);
- if (data_buf == NULL)
- return -ENOMEM;
+ is_on_stack = object_is_on_stack(buf);
+ if (is_on_stack) {
+ /*
+ * dma onto stack is unsafe/nonportable, but callers to this
+ * routine normally provide temporary on-stack buffers ...
+ */
+ data_buf = kmalloc(len, GFP_KERNEL);
+ if (!data_buf)
+ return -ENOMEM;
+ } else
+ data_buf = buf;
mrq.cmd = &cmd;
mrq.data = &data;
@@ -280,8 +290,10 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
mmc_wait_for_req(host, &mrq);
- memcpy(buf, data_buf, len);
- kfree(data_buf);
+ if (is_on_stack) {
+ memcpy(buf, data_buf, len);
+ kfree(data_buf);
+ }
if (cmd.error)
return cmd.error;
@@ -294,24 +306,32 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
int mmc_send_csd(struct mmc_card *card, u32 *csd)
{
int ret, i;
+ u32 *csd_tmp;
if (!mmc_host_is_spi(card->host))
return mmc_send_cxd_native(card->host, card->rca << 16,
csd, MMC_SEND_CSD);
- ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd, 16);
+ csd_tmp = kmalloc(16, GFP_KERNEL);
+ if (!csd_tmp)
+ return -ENOMEM;
+
+ ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd_tmp, 16);
if (ret)
- return ret;
+ goto err;
for (i = 0;i < 4;i++)
- csd[i] = be32_to_cpu(csd[i]);
+ csd[i] = be32_to_cpu(csd_tmp[i]);
- return 0;
+err:
+ kfree(csd_tmp);
+ return ret;
}
int mmc_send_cid(struct mmc_host *host, u32 *cid)
{
int ret, i;
+ u32 *cid_tmp;
if (!mmc_host_is_spi(host)) {
if (!host->card)
@@ -320,14 +340,20 @@ int mmc_send_cid(struct mmc_host *host, u32 *cid)
cid, MMC_SEND_CID);
}
- ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid, 16);
+ cid_tmp = kmalloc(16, GFP_KERNEL);
+ if (!cid_tmp)
+ return -ENOMEM;
+
+ ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid_tmp, 16);
if (ret)
- return ret;
+ goto err;
for (i = 0;i < 4;i++)
- cid[i] = be32_to_cpu(cid[i]);
+ cid[i] = be32_to_cpu(cid_tmp[i]);
- return 0;
+err:
+ kfree(cid_tmp);
+ return ret;
}
int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd)
@@ -367,18 +393,19 @@ int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
}
/**
- * mmc_switch - modify EXT_CSD register
+ * __mmc_switch - modify EXT_CSD register
* @card: the MMC card associated with the data transfer
* @set: cmd set values
* @index: EXT_CSD register index
* @value: value to program into EXT_CSD register
* @timeout_ms: timeout (ms) for operation performed by register write,
* timeout of zero implies maximum possible timeout
+ * @use_busy_signal: use the busy signal as response type
*
* Modifies the EXT_CSD register for selected card.
*/
-int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
- unsigned int timeout_ms)
+int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
+ unsigned int timeout_ms, bool use_busy_signal)
{
int err;
struct mmc_command cmd = {0};
@@ -392,13 +419,23 @@ int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
(index << 16) |
(value << 8) |
set;
- cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
+ cmd.flags = MMC_CMD_AC;
+ if (use_busy_signal)
+ cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
+ else
+ cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
+
+
cmd.cmd_timeout_ms = timeout_ms;
err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
if (err)
return err;
+ /* No need to check card status in case of unblocking command */
+ if (!use_busy_signal)
+ return 0;
+
/* Must check status to be sure of no errors */
do {
err = mmc_send_status(card, &status);
@@ -423,6 +460,13 @@ int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
return 0;
}
+EXPORT_SYMBOL_GPL(__mmc_switch);
+
+int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
+ unsigned int timeout_ms)
+{
+ return __mmc_switch(card, set, index, value, timeout_ms, true);
+}
EXPORT_SYMBOL_GPL(mmc_switch);
int mmc_send_status(struct mmc_card *card, u32 *status)
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index 236842e..6bf6879 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -193,14 +193,7 @@ static int sdio_bus_remove(struct device *dev)
}
#ifdef CONFIG_PM
-
-static int pm_no_operation(struct device *dev)
-{
- return 0;
-}
-
static const struct dev_pm_ops sdio_bus_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_no_operation, pm_no_operation)
SET_RUNTIME_PM_OPS(
pm_generic_runtime_suspend,
pm_generic_runtime_resume,
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
index 0582429..08c6b3d 100644
--- a/drivers/mmc/core/slot-gpio.c
+++ b/drivers/mmc/core/slot-gpio.c
@@ -100,7 +100,13 @@ int mmc_gpio_request_ro(struct mmc_host *host, unsigned int gpio)
ctx = host->slot.handler_priv;
- return gpio_request_one(gpio, GPIOF_DIR_IN, ctx->ro_label);
+ ret = gpio_request_one(gpio, GPIOF_DIR_IN, ctx->ro_label);
+ if (ret < 0)
+ return ret;
+
+ ctx->ro_gpio = gpio;
+
+ return 0;
}
EXPORT_SYMBOL(mmc_gpio_request_ro);
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index aa131b3..9bf10e7 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -540,6 +540,15 @@ config MMC_DW_PLTFM
If unsure, say Y.
+config MMC_DW_EXYNOS
+ tristate "Exynos specific extentions for Synopsys DW Memory Card Interface"
+ depends on MMC_DW
+ select MMC_DW_PLTFM
+ help
+ This selects support for Samsung Exynos SoC specific extensions to the
+ Synopsys DesignWare Memory Card Interface driver. Select this option
+ for platforms based on Exynos4 and Exynos5 SoC's.
+
config MMC_DW_PCI
tristate "Synopsys Designware MCI support on PCI bus"
depends on MMC_DW && PCI
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 8922b06..17ad0a7 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -39,6 +39,7 @@ obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o
obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o
obj-$(CONFIG_MMC_DW) += dw_mmc.o
obj-$(CONFIG_MMC_DW_PLTFM) += dw_mmc-pltfm.o
+obj-$(CONFIG_MMC_DW_EXYNOS) += dw_mmc-exynos.o
obj-$(CONFIG_MMC_DW_PCI) += dw_mmc-pci.o
obj-$(CONFIG_MMC_SH_MMCIF) += sh_mmcif.o
obj-$(CONFIG_MMC_JZ4740) += jz4740_mmc.o
diff --git a/drivers/mmc/host/atmel-mci-regs.h b/drivers/mmc/host/atmel-mci-regs.h
index ab56f7d..c97001e 100644
--- a/drivers/mmc/host/atmel-mci-regs.h
+++ b/drivers/mmc/host/atmel-mci-regs.h
@@ -140,6 +140,13 @@
#define atmci_writel(port,reg,value) \
__raw_writel((value), (port)->regs + reg)
+/* On AVR chips the Peripheral DMA Controller is not connected to MCI. */
+#ifdef CONFIG_AVR32
+# define ATMCI_PDC_CONNECTED 0
+#else
+# define ATMCI_PDC_CONNECTED 1
+#endif
+
/*
* Fix sconfig's burst size according to atmel MCI. We need to convert them as:
* 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 852d5fb..ddf096e 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -19,6 +19,9 @@
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/scatterlist.h>
#include <linux/seq_file.h>
@@ -71,7 +74,7 @@ enum atmci_pdc_buf {
};
struct atmel_mci_caps {
- bool has_dma;
+ bool has_dma_conf_reg;
bool has_pdc;
bool has_cfg_reg;
bool has_cstor_reg;
@@ -418,7 +421,7 @@ static int atmci_regs_show(struct seq_file *s, void *v)
atmci_show_status_reg(s, "SR", buf[ATMCI_SR / 4]);
atmci_show_status_reg(s, "IMR", buf[ATMCI_IMR / 4]);
- if (host->caps.has_dma) {
+ if (host->caps.has_dma_conf_reg) {
u32 val;
val = buf[ATMCI_DMA / 4];
@@ -500,6 +503,70 @@ err:
dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
}
+#if defined(CONFIG_OF)
+static const struct of_device_id atmci_dt_ids[] = {
+ { .compatible = "atmel,hsmci" },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, atmci_dt_ids);
+
+static struct mci_platform_data __devinit*
+atmci_of_init(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *cnp;
+ struct mci_platform_data *pdata;
+ u32 slot_id;
+
+ if (!np) {
+ dev_err(&pdev->dev, "device node not found\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ dev_err(&pdev->dev, "could not allocate memory for pdata\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ for_each_child_of_node(np, cnp) {
+ if (of_property_read_u32(cnp, "reg", &slot_id)) {
+ dev_warn(&pdev->dev, "reg property is missing for %s\n",
+ cnp->full_name);
+ continue;
+ }
+
+ if (slot_id >= ATMCI_MAX_NR_SLOTS) {
+ dev_warn(&pdev->dev, "can't have more than %d slots\n",
+ ATMCI_MAX_NR_SLOTS);
+ break;
+ }
+
+ if (of_property_read_u32(cnp, "bus-width",
+ &pdata->slot[slot_id].bus_width))
+ pdata->slot[slot_id].bus_width = 1;
+
+ pdata->slot[slot_id].detect_pin =
+ of_get_named_gpio(cnp, "cd-gpios", 0);
+
+ pdata->slot[slot_id].detect_is_active_high =
+ of_property_read_bool(cnp, "cd-inverted");
+
+ pdata->slot[slot_id].wp_pin =
+ of_get_named_gpio(cnp, "wp-gpios", 0);
+ }
+
+ return pdata;
+}
+#else /* CONFIG_OF */
+static inline struct mci_platform_data*
+atmci_of_init(struct platform_device *dev)
+{
+ return ERR_PTR(-EINVAL);
+}
+#endif
+
static inline unsigned int atmci_get_version(struct atmel_mci *host)
{
return atmci_readl(host, ATMCI_VERSION) & 0x00000fff;
@@ -774,7 +841,7 @@ static void atmci_dma_complete(void *arg)
dev_vdbg(&host->pdev->dev, "DMA complete\n");
- if (host->caps.has_dma)
+ if (host->caps.has_dma_conf_reg)
/* Disable DMA hardware handshaking on MCI */
atmci_writel(host, ATMCI_DMA, atmci_readl(host, ATMCI_DMA) & ~ATMCI_DMAEN);
@@ -961,7 +1028,9 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
maxburst = atmci_convert_chksize(host->dma_conf.dst_maxburst);
}
- atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(maxburst) | ATMCI_DMAEN);
+ if (host->caps.has_dma_conf_reg)
+ atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(maxburst) |
+ ATMCI_DMAEN);
sglen = dma_map_sg(chan->device->dev, data->sg,
data->sg_len, direction);
@@ -2046,6 +2115,13 @@ static int __init atmci_init_slot(struct atmel_mci *host,
slot->sdc_reg = sdc_reg;
slot->sdio_irq = sdio_irq;
+ dev_dbg(&mmc->class_dev,
+ "slot[%u]: bus_width=%u, detect_pin=%d, "
+ "detect_is_active_high=%s, wp_pin=%d\n",
+ id, slot_data->bus_width, slot_data->detect_pin,
+ slot_data->detect_is_active_high ? "true" : "false",
+ slot_data->wp_pin);
+
mmc->ops = &atmci_ops;
mmc->f_min = DIV_ROUND_UP(host->bus_hz, 512);
mmc->f_max = host->bus_hz / 2;
@@ -2169,7 +2245,10 @@ static bool atmci_configure_dma(struct atmel_mci *host)
pdata = host->pdev->dev.platform_data;
- if (pdata && find_slave_dev(pdata->dma_slave)) {
+ if (!pdata)
+ return false;
+
+ if (pdata->dma_slave && find_slave_dev(pdata->dma_slave)) {
dma_cap_mask_t mask;
/* Try to grab a DMA channel */
@@ -2210,8 +2289,8 @@ static void __init atmci_get_cap(struct atmel_mci *host)
dev_info(&host->pdev->dev,
"version: 0x%x\n", version);
- host->caps.has_dma = 0;
- host->caps.has_pdc = 1;
+ host->caps.has_dma_conf_reg = 0;
+ host->caps.has_pdc = ATMCI_PDC_CONNECTED;
host->caps.has_cfg_reg = 0;
host->caps.has_cstor_reg = 0;
host->caps.has_highspeed = 0;
@@ -2228,12 +2307,7 @@ static void __init atmci_get_cap(struct atmel_mci *host)
host->caps.has_odd_clk_div = 1;
case 0x400:
case 0x300:
-#ifdef CONFIG_AT_HDMAC
- host->caps.has_dma = 1;
-#else
- dev_info(&host->pdev->dev,
- "has dma capability but dma engine is not selected, then use pio\n");
-#endif
+ host->caps.has_dma_conf_reg = 1;
host->caps.has_pdc = 0;
host->caps.has_cfg_reg = 1;
host->caps.has_cstor_reg = 1;
@@ -2268,8 +2342,14 @@ static int __init atmci_probe(struct platform_device *pdev)
if (!regs)
return -ENXIO;
pdata = pdev->dev.platform_data;
- if (!pdata)
- return -ENXIO;
+ if (!pdata) {
+ pdata = atmci_of_init(pdev);
+ if (IS_ERR(pdata)) {
+ dev_err(&pdev->dev, "platform data not available\n");
+ return PTR_ERR(pdata);
+ }
+ }
+
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
@@ -2308,7 +2388,7 @@ static int __init atmci_probe(struct platform_device *pdev)
/* Get MCI capabilities and set operations according to it */
atmci_get_cap(host);
- if (host->caps.has_dma && atmci_configure_dma(host)) {
+ if (atmci_configure_dma(host)) {
host->prepare_data = &atmci_prepare_data_dma;
host->submit_data = &atmci_submit_data_dma;
host->stop_transfer = &atmci_stop_transfer_dma;
@@ -2487,6 +2567,7 @@ static struct platform_driver atmci_driver = {
.driver = {
.name = "atmel_mci",
.pm = ATMCI_PM_OPS,
+ .of_match_table = of_match_ptr(atmci_dt_ids),
},
};
diff --git a/drivers/mmc/host/bfin_sdh.c b/drivers/mmc/host/bfin_sdh.c
index a17dd73..b9b463e 100644
--- a/drivers/mmc/host/bfin_sdh.c
+++ b/drivers/mmc/host/bfin_sdh.c
@@ -24,9 +24,7 @@
#include <asm/portmux.h>
#include <asm/bfin_sdh.h>
-#if defined(CONFIG_BF51x)
-#define bfin_read_SDH_PWR_CTL bfin_read_RSI_PWR_CTL
-#define bfin_write_SDH_PWR_CTL bfin_write_RSI_PWR_CTL
+#if defined(CONFIG_BF51x) || defined(__ADSPBF60x__)
#define bfin_read_SDH_CLK_CTL bfin_read_RSI_CLK_CTL
#define bfin_write_SDH_CLK_CTL bfin_write_RSI_CLK_CTL
#define bfin_write_SDH_ARGUMENT bfin_write_RSI_ARGUMENT
@@ -45,8 +43,16 @@
#define bfin_write_SDH_E_STATUS bfin_write_RSI_E_STATUS
#define bfin_read_SDH_STATUS bfin_read_RSI_STATUS
#define bfin_write_SDH_MASK0 bfin_write_RSI_MASK0
+#define bfin_write_SDH_E_MASK bfin_write_RSI_E_MASK
#define bfin_read_SDH_CFG bfin_read_RSI_CFG
#define bfin_write_SDH_CFG bfin_write_RSI_CFG
+# if defined(__ADSPBF60x__)
+# define bfin_read_SDH_BLK_SIZE bfin_read_RSI_BLKSZ
+# define bfin_write_SDH_BLK_SIZE bfin_write_RSI_BLKSZ
+# else
+# define bfin_read_SDH_PWR_CTL bfin_read_RSI_PWR_CTL
+# define bfin_write_SDH_PWR_CTL bfin_write_RSI_PWR_CTL
+# endif
#endif
struct sdh_host {
@@ -62,6 +68,7 @@ struct sdh_host {
dma_addr_t sg_dma;
int dma_len;
+ unsigned long sclk;
unsigned int imask;
unsigned int power_mode;
unsigned int clk_div;
@@ -127,11 +134,15 @@ static int sdh_setup_data(struct sdh_host *host, struct mmc_data *data)
/* Only supports power-of-2 block size */
if (data->blksz & (data->blksz - 1))
return -EINVAL;
+#ifndef RSI_BLKSZ
data_ctl |= ((ffs(data->blksz) - 1) << 4);
+#else
+ bfin_write_SDH_BLK_SIZE(data->blksz);
+#endif
bfin_write_SDH_DATA_CTL(data_ctl);
/* the time of a host clock period in ns */
- cycle_ns = 1000000000 / (get_sclk() / (2 * (host->clk_div + 1)));
+ cycle_ns = 1000000000 / (host->sclk / (2 * (host->clk_div + 1)));
timeout = data->timeout_ns / cycle_ns;
timeout += data->timeout_clks;
bfin_write_SDH_DATA_TIMER(timeout);
@@ -145,8 +156,13 @@ static int sdh_setup_data(struct sdh_host *host, struct mmc_data *data)
sdh_enable_stat_irq(host, (DAT_CRC_FAIL | DAT_TIME_OUT | DAT_END));
host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma_dir);
-#if defined(CONFIG_BF54x)
- dma_cfg |= DMAFLOW_ARRAY | NDSIZE_5 | RESTART | WDSIZE_32 | DMAEN;
+#if defined(CONFIG_BF54x) || defined(CONFIG_BF60x)
+ dma_cfg |= DMAFLOW_ARRAY | RESTART | WDSIZE_32 | DMAEN;
+# ifdef RSI_BLKSZ
+ dma_cfg |= PSIZE_32 | NDSIZE_3;
+# else
+ dma_cfg |= NDSIZE_5;
+# endif
{
struct scatterlist *sg;
int i;
@@ -156,7 +172,7 @@ static int sdh_setup_data(struct sdh_host *host, struct mmc_data *data)
host->sg_cpu[i].x_count = sg_dma_len(sg) / 4;
host->sg_cpu[i].x_modify = 4;
dev_dbg(mmc_dev(host->mmc), "%d: start_addr:0x%lx, "
- "cfg:0x%x, x_count:0x%x, x_modify:0x%x\n",
+ "cfg:0x%lx, x_count:0x%lx, x_modify:0x%lx\n",
i, host->sg_cpu[i].start_addr,
host->sg_cpu[i].cfg, host->sg_cpu[i].x_count,
host->sg_cpu[i].x_modify);
@@ -172,6 +188,7 @@ static int sdh_setup_data(struct sdh_host *host, struct mmc_data *data)
set_dma_curr_desc_addr(host->dma_ch, (unsigned long *)host->sg_dma);
set_dma_x_count(host->dma_ch, 0);
set_dma_x_modify(host->dma_ch, 0);
+ SSYNC();
set_dma_config(host->dma_ch, dma_cfg);
#elif defined(CONFIG_BF51x)
/* RSI DMA doesn't work in array mode */
@@ -179,6 +196,7 @@ static int sdh_setup_data(struct sdh_host *host, struct mmc_data *data)
set_dma_start_addr(host->dma_ch, sg_dma_address(&data->sg[0]));
set_dma_x_count(host->dma_ch, length / 4);
set_dma_x_modify(host->dma_ch, 4);
+ SSYNC();
set_dma_config(host->dma_ch, dma_cfg);
#endif
bfin_write_SDH_DATA_CTL(bfin_read_SDH_DATA_CTL() | DTX_DMA_E | DTX_E);
@@ -296,7 +314,6 @@ static int sdh_data_done(struct sdh_host *host, unsigned int stat)
else
data->bytes_xfered = 0;
- sdh_disable_stat_irq(host, DAT_END | DAT_TIME_OUT | DAT_CRC_FAIL | RX_OVERRUN | TX_UNDERRUN);
bfin_write_SDH_STATUS_CLR(DAT_END_STAT | DAT_TIMEOUT_STAT | \
DAT_CRC_FAIL_STAT | DAT_BLK_END_STAT | RX_OVERRUN | TX_UNDERRUN);
bfin_write_SDH_DATA_CTL(0);
@@ -321,74 +338,115 @@ static void sdh_request(struct mmc_host *mmc, struct mmc_request *mrq)
dev_dbg(mmc_dev(host->mmc), "%s enter, mrp:%p, cmd:%p\n", __func__, mrq, mrq->cmd);
WARN_ON(host->mrq != NULL);
+ spin_lock(&host->lock);
host->mrq = mrq;
host->data = mrq->data;
if (mrq->data && mrq->data->flags & MMC_DATA_READ) {
ret = sdh_setup_data(host, mrq->data);
if (ret)
- return;
+ goto data_err;
}
sdh_start_cmd(host, mrq->cmd);
+data_err:
+ spin_unlock(&host->lock);
}
static void sdh_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct sdh_host *host;
- unsigned long flags;
u16 clk_ctl = 0;
+#ifndef RSI_BLKSZ
u16 pwr_ctl = 0;
+#endif
u16 cfg;
host = mmc_priv(mmc);
- spin_lock_irqsave(&host->lock, flags);
- if (ios->clock) {
- unsigned long sys_clk, ios_clk;
- unsigned char clk_div;
- ios_clk = 2 * ios->clock;
- sys_clk = get_sclk();
- clk_div = sys_clk / ios_clk;
- if (sys_clk % ios_clk == 0)
- clk_div -= 1;
- clk_div = min_t(unsigned char, clk_div, 0xFF);
- clk_ctl |= clk_div;
- clk_ctl |= CLK_E;
- host->clk_div = clk_div;
- } else
- sdh_stop_clock(host);
-
- if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
-#ifdef CONFIG_SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND
- pwr_ctl |= ROD_CTL;
-#else
- pwr_ctl |= SD_CMD_OD | ROD_CTL;
-#endif
+ spin_lock(&host->lock);
- if (ios->bus_width == MMC_BUS_WIDTH_4) {
- cfg = bfin_read_SDH_CFG();
+ cfg = bfin_read_SDH_CFG();
+ cfg |= MWE;
+ switch (ios->bus_width) {
+ case MMC_BUS_WIDTH_4:
+#ifndef RSI_BLKSZ
cfg &= ~PD_SDDAT3;
+#endif
cfg |= PUP_SDDAT3;
/* Enable 4 bit SDIO */
- cfg |= (SD4E | MWE);
- bfin_write_SDH_CFG(cfg);
- clk_ctl |= WIDE_BUS;
- } else {
- cfg = bfin_read_SDH_CFG();
- cfg |= MWE;
- bfin_write_SDH_CFG(cfg);
+ cfg |= SD4E;
+ clk_ctl |= WIDE_BUS_4;
+ break;
+ case MMC_BUS_WIDTH_8:
+#ifndef RSI_BLKSZ
+ cfg &= ~PD_SDDAT3;
+#endif
+ cfg |= PUP_SDDAT3;
+ /* Disable 4 bit SDIO */
+ cfg &= ~SD4E;
+ clk_ctl |= BYTE_BUS_8;
+ break;
+ default:
+ cfg &= ~PUP_SDDAT3;
+ /* Disable 4 bit SDIO */
+ cfg &= ~SD4E;
}
- bfin_write_SDH_CLK_CTL(clk_ctl);
-
host->power_mode = ios->power_mode;
- if (ios->power_mode == MMC_POWER_ON)
+#ifndef RSI_BLKSZ
+ if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) {
+ pwr_ctl |= ROD_CTL;
+# ifndef CONFIG_SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND
+ pwr_ctl |= SD_CMD_OD;
+# endif
+ }
+
+ if (ios->power_mode != MMC_POWER_OFF)
pwr_ctl |= PWR_ON;
+ else
+ pwr_ctl &= ~PWR_ON;
bfin_write_SDH_PWR_CTL(pwr_ctl);
+#else
+# ifndef CONFIG_SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND
+ if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
+ cfg |= SD_CMD_OD;
+ else
+ cfg &= ~SD_CMD_OD;
+# endif
+
+
+ if (ios->power_mode != MMC_POWER_OFF)
+ cfg |= PWR_ON;
+ else
+ cfg &= ~PWR_ON;
+
+ bfin_write_SDH_CFG(cfg);
+#endif
SSYNC();
- spin_unlock_irqrestore(&host->lock, flags);
+ if (ios->power_mode == MMC_POWER_ON && ios->clock) {
+ unsigned char clk_div;
+ clk_div = (get_sclk() / ios->clock - 1) / 2;
+ clk_div = min_t(unsigned char, clk_div, 0xFF);
+ clk_ctl |= clk_div;
+ clk_ctl |= CLK_E;
+ host->clk_div = clk_div;
+ bfin_write_SDH_CLK_CTL(clk_ctl);
+
+ } else
+ sdh_stop_clock(host);
+
+ /* set up sdh interrupt mask*/
+ if (ios->power_mode == MMC_POWER_ON)
+ bfin_write_SDH_MASK0(DAT_END | DAT_TIME_OUT | DAT_CRC_FAIL |
+ RX_OVERRUN | TX_UNDERRUN | CMD_SENT | CMD_RESP_END |
+ CMD_TIME_OUT | CMD_CRC_FAIL);
+ else
+ bfin_write_SDH_MASK0(0);
+ SSYNC();
+
+ spin_unlock(&host->lock);
dev_dbg(mmc_dev(host->mmc), "SDH: clk_div = 0x%x actual clock:%ld expected clock:%d\n",
host->clk_div,
@@ -405,7 +463,7 @@ static irqreturn_t sdh_dma_irq(int irq, void *devid)
{
struct sdh_host *host = devid;
- dev_dbg(mmc_dev(host->mmc), "%s enter, irq_stat: 0x%04x\n", __func__,
+ dev_dbg(mmc_dev(host->mmc), "%s enter, irq_stat: 0x%04lx\n", __func__,
get_dma_curr_irqstat(host->dma_ch));
clear_dma_irqstat(host->dma_ch);
SSYNC();
@@ -420,6 +478,9 @@ static irqreturn_t sdh_stat_irq(int irq, void *devid)
int handled = 0;
dev_dbg(mmc_dev(host->mmc), "%s enter\n", __func__);
+
+ spin_lock(&host->lock);
+
status = bfin_read_SDH_E_STATUS();
if (status & SD_CARD_DET) {
mmc_detect_change(host->mmc, 0);
@@ -437,11 +498,30 @@ static irqreturn_t sdh_stat_irq(int irq, void *devid)
if (status & (DAT_END | DAT_TIME_OUT | DAT_CRC_FAIL | RX_OVERRUN | TX_UNDERRUN))
handled |= sdh_data_done(host, status);
+ spin_unlock(&host->lock);
+
dev_dbg(mmc_dev(host->mmc), "%s exit\n\n", __func__);
return IRQ_RETVAL(handled);
}
+static void sdh_reset(void)
+{
+#if defined(CONFIG_BF54x)
+ /* Secure Digital Host shares DMA with Nand controller */
+ bfin_write_DMAC1_PERIMUX(bfin_read_DMAC1_PERIMUX() | 0x1);
+#endif
+
+ bfin_write_SDH_CFG(bfin_read_SDH_CFG() | CLKS_EN);
+ SSYNC();
+
+ /* Disable card inserting detection pin. set MMC_CAP_NEEDS_POLL, and
+ * mmc stack will do the detection.
+ */
+ bfin_write_SDH_CFG((bfin_read_SDH_CFG() & 0x1F) | (PUP_SDDAT | PUP_SDDAT3));
+ SSYNC();
+}
+
static int __devinit sdh_probe(struct platform_device *pdev)
{
struct mmc_host *mmc;
@@ -462,8 +542,16 @@ static int __devinit sdh_probe(struct platform_device *pdev)
}
mmc->ops = &sdh_ops;
- mmc->max_segs = 32;
+#if defined(CONFIG_BF51x)
+ mmc->max_segs = 1;
+#else
+ mmc->max_segs = PAGE_SIZE / sizeof(struct dma_desc_array);
+#endif
+#ifdef RSI_BLKSZ
+ mmc->max_seg_size = -1;
+#else
mmc->max_seg_size = 1 << 16;
+#endif
mmc->max_blk_size = 1 << 11;
mmc->max_blk_count = 1 << 11;
mmc->max_req_size = PAGE_SIZE;
@@ -473,6 +561,7 @@ static int __devinit sdh_probe(struct platform_device *pdev)
mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_NEEDS_POLL;
host = mmc_priv(mmc);
host->mmc = mmc;
+ host->sclk = get_sclk();
spin_lock_init(&host->lock);
host->irq = drv_data->irq_int0;
@@ -497,7 +586,6 @@ static int __devinit sdh_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, mmc);
- mmc_add_host(mmc);
ret = request_irq(host->irq, sdh_stat_irq, 0, "SDH Status IRQ", host);
if (ret) {
@@ -510,20 +598,10 @@ static int __devinit sdh_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "unable to request peripheral pins\n");
goto out4;
}
-#if defined(CONFIG_BF54x)
- /* Secure Digital Host shares DMA with Nand controller */
- bfin_write_DMAC1_PERIMUX(bfin_read_DMAC1_PERIMUX() | 0x1);
-#endif
-
- bfin_write_SDH_CFG(bfin_read_SDH_CFG() | CLKS_EN);
- SSYNC();
- /* Disable card inserting detection pin. set MMC_CAP_NEES_POLL, and
- * mmc stack will do the detection.
- */
- bfin_write_SDH_CFG((bfin_read_SDH_CFG() & 0x1F) | (PUP_SDDAT | PUP_SDDAT3));
- SSYNC();
+ sdh_reset();
+ mmc_add_host(mmc);
return 0;
out4:
@@ -571,7 +649,6 @@ static int sdh_suspend(struct platform_device *dev, pm_message_t state)
if (mmc)
ret = mmc_suspend_host(mmc);
- bfin_write_SDH_PWR_CTL(bfin_read_SDH_PWR_CTL() & ~PWR_ON);
peripheral_free_list(drv_data->pin_req);
return ret;
@@ -589,16 +666,7 @@ static int sdh_resume(struct platform_device *dev)
return ret;
}
- bfin_write_SDH_PWR_CTL(bfin_read_SDH_PWR_CTL() | PWR_ON);
-#if defined(CONFIG_BF54x)
- /* Secure Digital Host shares DMA with Nand controller */
- bfin_write_DMAC1_PERIMUX(bfin_read_DMAC1_PERIMUX() | 0x1);
-#endif
- bfin_write_SDH_CFG(bfin_read_SDH_CFG() | CLKS_EN);
- SSYNC();
-
- bfin_write_SDH_CFG((bfin_read_SDH_CFG() & 0x1F) | (PUP_SDDAT | PUP_SDDAT3));
- SSYNC();
+ sdh_reset();
if (mmc)
ret = mmc_resume_host(mmc);
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index 3dfd347..2063677 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -30,11 +30,12 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/delay.h>
+#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
+#include <linux/edma.h>
#include <linux/mmc/mmc.h>
#include <linux/platform_data/mmc-davinci.h>
-#include <mach/edma.h>
/*
* Register Definitions
@@ -200,21 +201,13 @@ struct mmc_davinci_host {
u32 bytes_left;
u32 rxdma, txdma;
+ struct dma_chan *dma_tx;
+ struct dma_chan *dma_rx;
bool use_dma;
bool do_dma;
bool sdio_int;
bool active_request;
- /* Scatterlist DMA uses one or more parameter RAM entries:
- * the main one (associated with rxdma or txdma) plus zero or
- * more links. The entries for a given transfer differ only
- * by memory buffer (address, length) and link field.
- */
- struct edmacc_param tx_template;
- struct edmacc_param rx_template;
- unsigned n_link;
- u32 links[MAX_NR_SG - 1];
-
/* For PIO we walk scatterlists one segment at a time. */
unsigned int sg_len;
struct scatterlist *sg;
@@ -410,153 +403,74 @@ static void mmc_davinci_start_command(struct mmc_davinci_host *host,
static void davinci_abort_dma(struct mmc_davinci_host *host)
{
- int sync_dev;
+ struct dma_chan *sync_dev;
if (host->data_dir == DAVINCI_MMC_DATADIR_READ)
- sync_dev = host->rxdma;
+ sync_dev = host->dma_rx;
else
- sync_dev = host->txdma;
-
- edma_stop(sync_dev);
- edma_clean_channel(sync_dev);
-}
-
-static void
-mmc_davinci_xfer_done(struct mmc_davinci_host *host, struct mmc_data *data);
-
-static void mmc_davinci_dma_cb(unsigned channel, u16 ch_status, void *data)
-{
- if (DMA_COMPLETE != ch_status) {
- struct mmc_davinci_host *host = data;
-
- /* Currently means: DMA Event Missed, or "null" transfer
- * request was seen. In the future, TC errors (like bad
- * addresses) might be presented too.
- */
- dev_warn(mmc_dev(host->mmc), "DMA %s error\n",
- (host->data->flags & MMC_DATA_WRITE)
- ? "write" : "read");
- host->data->error = -EIO;
- mmc_davinci_xfer_done(host, host->data);
- }
-}
-
-/* Set up tx or rx template, to be modified and updated later */
-static void __init mmc_davinci_dma_setup(struct mmc_davinci_host *host,
- bool tx, struct edmacc_param *template)
-{
- unsigned sync_dev;
- const u16 acnt = 4;
- const u16 bcnt = rw_threshold >> 2;
- const u16 ccnt = 0;
- u32 src_port = 0;
- u32 dst_port = 0;
- s16 src_bidx, dst_bidx;
- s16 src_cidx, dst_cidx;
-
- /*
- * A-B Sync transfer: each DMA request is for one "frame" of
- * rw_threshold bytes, broken into "acnt"-size chunks repeated
- * "bcnt" times. Each segment needs "ccnt" such frames; since
- * we tell the block layer our mmc->max_seg_size limit, we can
- * trust (later) that it's within bounds.
- *
- * The FIFOs are read/written in 4-byte chunks (acnt == 4) and
- * EDMA will optimize memory operations to use larger bursts.
- */
- if (tx) {
- sync_dev = host->txdma;
-
- /* src_prt, ccnt, and link to be set up later */
- src_bidx = acnt;
- src_cidx = acnt * bcnt;
-
- dst_port = host->mem_res->start + DAVINCI_MMCDXR;
- dst_bidx = 0;
- dst_cidx = 0;
- } else {
- sync_dev = host->rxdma;
-
- src_port = host->mem_res->start + DAVINCI_MMCDRR;
- src_bidx = 0;
- src_cidx = 0;
-
- /* dst_prt, ccnt, and link to be set up later */
- dst_bidx = acnt;
- dst_cidx = acnt * bcnt;
- }
-
- /*
- * We can't use FIFO mode for the FIFOs because MMC FIFO addresses
- * are not 256-bit (32-byte) aligned. So we use INCR, and the W8BIT
- * parameter is ignored.
- */
- edma_set_src(sync_dev, src_port, INCR, W8BIT);
- edma_set_dest(sync_dev, dst_port, INCR, W8BIT);
+ sync_dev = host->dma_tx;
- edma_set_src_index(sync_dev, src_bidx, src_cidx);
- edma_set_dest_index(sync_dev, dst_bidx, dst_cidx);
-
- edma_set_transfer_params(sync_dev, acnt, bcnt, ccnt, 8, ABSYNC);
-
- edma_read_slot(sync_dev, template);
-
- /* don't bother with irqs or chaining */
- template->opt |= EDMA_CHAN_SLOT(sync_dev) << 12;
+ dmaengine_terminate_all(sync_dev);
}
-static void mmc_davinci_send_dma_request(struct mmc_davinci_host *host,
+static int mmc_davinci_send_dma_request(struct mmc_davinci_host *host,
struct mmc_data *data)
{
- struct edmacc_param *template;
- int channel, slot;
- unsigned link;
- struct scatterlist *sg;
- unsigned sg_len;
- unsigned bytes_left = host->bytes_left;
- const unsigned shift = ffs(rw_threshold) - 1;
+ struct dma_chan *chan;
+ struct dma_async_tx_descriptor *desc;
+ int ret = 0;
if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) {
- template = &host->tx_template;
- channel = host->txdma;
+ struct dma_slave_config dma_tx_conf = {
+ .direction = DMA_MEM_TO_DEV,
+ .dst_addr = host->mem_res->start + DAVINCI_MMCDXR,
+ .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+ .dst_maxburst =
+ rw_threshold / DMA_SLAVE_BUSWIDTH_4_BYTES,
+ };
+ chan = host->dma_tx;
+ dmaengine_slave_config(host->dma_tx, &dma_tx_conf);
+
+ desc = dmaengine_prep_slave_sg(host->dma_tx,
+ data->sg,
+ host->sg_len,
+ DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc) {
+ dev_dbg(mmc_dev(host->mmc),
+ "failed to allocate DMA TX descriptor");
+ ret = -1;
+ goto out;
+ }
} else {
- template = &host->rx_template;
- channel = host->rxdma;
- }
-
- /* We know sg_len and ccnt will never be out of range because
- * we told the mmc layer which in turn tells the block layer
- * to ensure that it only hands us one scatterlist segment
- * per EDMA PARAM entry. Update the PARAM
- * entries needed for each segment of this scatterlist.
- */
- for (slot = channel, link = 0, sg = data->sg, sg_len = host->sg_len;
- sg_len-- != 0 && bytes_left;
- sg = sg_next(sg), slot = host->links[link++]) {
- u32 buf = sg_dma_address(sg);
- unsigned count = sg_dma_len(sg);
-
- template->link_bcntrld = sg_len
- ? (EDMA_CHAN_SLOT(host->links[link]) << 5)
- : 0xffff;
-
- if (count > bytes_left)
- count = bytes_left;
- bytes_left -= count;
-
- if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE)
- template->src = buf;
- else
- template->dst = buf;
- template->ccnt = count >> shift;
-
- edma_write_slot(slot, template);
+ struct dma_slave_config dma_rx_conf = {
+ .direction = DMA_DEV_TO_MEM,
+ .src_addr = host->mem_res->start + DAVINCI_MMCDRR,
+ .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+ .src_maxburst =
+ rw_threshold / DMA_SLAVE_BUSWIDTH_4_BYTES,
+ };
+ chan = host->dma_rx;
+ dmaengine_slave_config(host->dma_rx, &dma_rx_conf);
+
+ desc = dmaengine_prep_slave_sg(host->dma_rx,
+ data->sg,
+ host->sg_len,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc) {
+ dev_dbg(mmc_dev(host->mmc),
+ "failed to allocate DMA RX descriptor");
+ ret = -1;
+ goto out;
+ }
}
- if (host->version == MMC_CTLR_VERSION_2)
- edma_clear_event(channel);
+ dmaengine_submit(desc);
+ dma_async_issue_pending(chan);
- edma_start(channel);
+out:
+ return ret;
}
static int mmc_davinci_start_dma_transfer(struct mmc_davinci_host *host,
@@ -564,6 +478,7 @@ static int mmc_davinci_start_dma_transfer(struct mmc_davinci_host *host,
{
int i;
int mask = rw_threshold - 1;
+ int ret = 0;
host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
((data->flags & MMC_DATA_WRITE)
@@ -583,70 +498,48 @@ static int mmc_davinci_start_dma_transfer(struct mmc_davinci_host *host,
}
host->do_dma = 1;
- mmc_davinci_send_dma_request(host, data);
+ ret = mmc_davinci_send_dma_request(host, data);
- return 0;
+ return ret;
}
static void __init_or_module
davinci_release_dma_channels(struct mmc_davinci_host *host)
{
- unsigned i;
-
if (!host->use_dma)
return;
- for (i = 0; i < host->n_link; i++)
- edma_free_slot(host->links[i]);
-
- edma_free_channel(host->txdma);
- edma_free_channel(host->rxdma);
+ dma_release_channel(host->dma_tx);
+ dma_release_channel(host->dma_rx);
}
static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host)
{
- u32 link_size;
- int r, i;
-
- /* Acquire master DMA write channel */
- r = edma_alloc_channel(host->txdma, mmc_davinci_dma_cb, host,
- EVENTQ_DEFAULT);
- if (r < 0) {
- dev_warn(mmc_dev(host->mmc), "alloc %s channel err %d\n",
- "tx", r);
- return r;
- }
- mmc_davinci_dma_setup(host, true, &host->tx_template);
-
- /* Acquire master DMA read channel */
- r = edma_alloc_channel(host->rxdma, mmc_davinci_dma_cb, host,
- EVENTQ_DEFAULT);
- if (r < 0) {
- dev_warn(mmc_dev(host->mmc), "alloc %s channel err %d\n",
- "rx", r);
- goto free_master_write;
+ int r;
+ dma_cap_mask_t mask;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ host->dma_tx =
+ dma_request_channel(mask, edma_filter_fn, &host->txdma);
+ if (!host->dma_tx) {
+ dev_err(mmc_dev(host->mmc), "Can't get dma_tx channel\n");
+ return -ENODEV;
}
- mmc_davinci_dma_setup(host, false, &host->rx_template);
- /* Allocate parameter RAM slots, which will later be bound to a
- * channel as needed to handle a scatterlist.
- */
- link_size = min_t(unsigned, host->nr_sg, ARRAY_SIZE(host->links));
- for (i = 0; i < link_size; i++) {
- r = edma_alloc_slot(EDMA_CTLR(host->txdma), EDMA_SLOT_ANY);
- if (r < 0) {
- dev_dbg(mmc_dev(host->mmc), "dma PaRAM alloc --> %d\n",
- r);
- break;
- }
- host->links[i] = r;
+ host->dma_rx =
+ dma_request_channel(mask, edma_filter_fn, &host->rxdma);
+ if (!host->dma_rx) {
+ dev_err(mmc_dev(host->mmc), "Can't get dma_rx channel\n");
+ r = -ENODEV;
+ goto free_master_write;
}
- host->n_link = i;
return 0;
free_master_write:
- edma_free_channel(host->txdma);
+ dma_release_channel(host->dma_tx);
return r;
}
@@ -1359,7 +1252,7 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
* Each hw_seg uses one EDMA parameter RAM slot, always one
* channel and then usually some linked slots.
*/
- mmc->max_segs = 1 + host->n_link;
+ mmc->max_segs = MAX_NR_SG;
/* EDMA limit per hw segment (one or two MBytes) */
mmc->max_seg_size = MAX_CCNT * rw_threshold;
diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c
new file mode 100644
index 0000000..660bbc5
--- /dev/null
+++ b/drivers/mmc/host/dw_mmc-exynos.c
@@ -0,0 +1,253 @@
+/*
+ * Exynos Specific Extensions for Synopsys DW Multimedia Card Interface driver
+ *
+ * Copyright (C) 2012, Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/dw_mmc.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+
+#include "dw_mmc.h"
+#include "dw_mmc-pltfm.h"
+
+#define NUM_PINS(x) (x + 2)
+
+#define SDMMC_CLKSEL 0x09C
+#define SDMMC_CLKSEL_CCLK_SAMPLE(x) (((x) & 7) << 0)
+#define SDMMC_CLKSEL_CCLK_DRIVE(x) (((x) & 7) << 16)
+#define SDMMC_CLKSEL_CCLK_DIVIDER(x) (((x) & 7) << 24)
+#define SDMMC_CLKSEL_GET_DRV_WD3(x) (((x) >> 16) & 0x7)
+#define SDMMC_CLKSEL_TIMING(x, y, z) (SDMMC_CLKSEL_CCLK_SAMPLE(x) | \
+ SDMMC_CLKSEL_CCLK_DRIVE(y) | \
+ SDMMC_CLKSEL_CCLK_DIVIDER(z))
+
+#define SDMMC_CMD_USE_HOLD_REG BIT(29)
+
+#define EXYNOS4210_FIXED_CIU_CLK_DIV 2
+#define EXYNOS4412_FIXED_CIU_CLK_DIV 4
+
+/* Variations in Exynos specific dw-mshc controller */
+enum dw_mci_exynos_type {
+ DW_MCI_TYPE_EXYNOS4210,
+ DW_MCI_TYPE_EXYNOS4412,
+ DW_MCI_TYPE_EXYNOS5250,
+};
+
+/* Exynos implementation specific driver private data */
+struct dw_mci_exynos_priv_data {
+ enum dw_mci_exynos_type ctrl_type;
+ u8 ciu_div;
+ u32 sdr_timing;
+ u32 ddr_timing;
+};
+
+static struct dw_mci_exynos_compatible {
+ char *compatible;
+ enum dw_mci_exynos_type ctrl_type;
+} exynos_compat[] = {
+ {
+ .compatible = "samsung,exynos4210-dw-mshc",
+ .ctrl_type = DW_MCI_TYPE_EXYNOS4210,
+ }, {
+ .compatible = "samsung,exynos4412-dw-mshc",
+ .ctrl_type = DW_MCI_TYPE_EXYNOS4412,
+ }, {
+ .compatible = "samsung,exynos5250-dw-mshc",
+ .ctrl_type = DW_MCI_TYPE_EXYNOS5250,
+ },
+};
+
+static int dw_mci_exynos_priv_init(struct dw_mci *host)
+{
+ struct dw_mci_exynos_priv_data *priv;
+ int idx;
+
+ priv = devm_kzalloc(host->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ dev_err(host->dev, "mem alloc failed for private data\n");
+ return -ENOMEM;
+ }
+
+ for (idx = 0; idx < ARRAY_SIZE(exynos_compat); idx++) {
+ if (of_device_is_compatible(host->dev->of_node,
+ exynos_compat[idx].compatible))
+ priv->ctrl_type = exynos_compat[idx].ctrl_type;
+ }
+
+ host->priv = priv;
+ return 0;
+}
+
+static int dw_mci_exynos_setup_clock(struct dw_mci *host)
+{
+ struct dw_mci_exynos_priv_data *priv = host->priv;
+
+ if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS5250)
+ host->bus_hz /= (priv->ciu_div + 1);
+ else if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS4412)
+ host->bus_hz /= EXYNOS4412_FIXED_CIU_CLK_DIV;
+ else if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS4210)
+ host->bus_hz /= EXYNOS4210_FIXED_CIU_CLK_DIV;
+
+ return 0;
+}
+
+static void dw_mci_exynos_prepare_command(struct dw_mci *host, u32 *cmdr)
+{
+ /*
+ * Exynos4412 and Exynos5250 extends the use of CMD register with the
+ * use of bit 29 (which is reserved on standard MSHC controllers) for
+ * optionally bypassing the HOLD register for command and data. The
+ * HOLD register should be bypassed in case there is no phase shift
+ * applied on CMD/DATA that is sent to the card.
+ */
+ if (SDMMC_CLKSEL_GET_DRV_WD3(mci_readl(host, CLKSEL)))
+ *cmdr |= SDMMC_CMD_USE_HOLD_REG;
+}
+
+static void dw_mci_exynos_set_ios(struct dw_mci *host, struct mmc_ios *ios)
+{
+ struct dw_mci_exynos_priv_data *priv = host->priv;
+
+ if (ios->timing == MMC_TIMING_UHS_DDR50)
+ mci_writel(host, CLKSEL, priv->ddr_timing);
+ else
+ mci_writel(host, CLKSEL, priv->sdr_timing);
+}
+
+static int dw_mci_exynos_parse_dt(struct dw_mci *host)
+{
+ struct dw_mci_exynos_priv_data *priv = host->priv;
+ struct device_node *np = host->dev->of_node;
+ u32 timing[2];
+ u32 div = 0;
+ int ret;
+
+ of_property_read_u32(np, "samsung,dw-mshc-ciu-div", &div);
+ priv->ciu_div = div;
+
+ ret = of_property_read_u32_array(np,
+ "samsung,dw-mshc-sdr-timing", timing, 2);
+ if (ret)
+ return ret;
+
+ priv->sdr_timing = SDMMC_CLKSEL_TIMING(timing[0], timing[1], div);
+
+ ret = of_property_read_u32_array(np,
+ "samsung,dw-mshc-ddr-timing", timing, 2);
+ if (ret)
+ return ret;
+
+ priv->ddr_timing = SDMMC_CLKSEL_TIMING(timing[0], timing[1], div);
+ return 0;
+}
+
+static int dw_mci_exynos_setup_bus(struct dw_mci *host,
+ struct device_node *slot_np, u8 bus_width)
+{
+ int idx, gpio, ret;
+
+ if (!slot_np)
+ return -EINVAL;
+
+ /* cmd + clock + bus-width pins */
+ for (idx = 0; idx < NUM_PINS(bus_width); idx++) {
+ gpio = of_get_gpio(slot_np, idx);
+ if (!gpio_is_valid(gpio)) {
+ dev_err(host->dev, "invalid gpio: %d\n", gpio);
+ return -EINVAL;
+ }
+
+ ret = devm_gpio_request(host->dev, gpio, "dw-mci-bus");
+ if (ret) {
+ dev_err(host->dev, "gpio [%d] request failed\n", gpio);
+ return -EBUSY;
+ }
+ }
+
+ gpio = of_get_named_gpio(slot_np, "wp-gpios", 0);
+ if (gpio_is_valid(gpio)) {
+ if (devm_gpio_request(host->dev, gpio, "dw-mci-wp"))
+ dev_info(host->dev, "gpio [%d] request failed\n",
+ gpio);
+ } else {
+ dev_info(host->dev, "wp gpio not available");
+ host->pdata->quirks |= DW_MCI_QUIRK_NO_WRITE_PROTECT;
+ }
+
+ if (host->pdata->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
+ return 0;
+
+ gpio = of_get_named_gpio(slot_np, "samsung,cd-pinmux-gpio", 0);
+ if (gpio_is_valid(gpio)) {
+ if (devm_gpio_request(host->dev, gpio, "dw-mci-cd"))
+ dev_err(host->dev, "gpio [%d] request failed\n", gpio);
+ } else {
+ dev_info(host->dev, "cd gpio not available");
+ }
+
+ return 0;
+}
+
+/* Exynos5250 controller specific capabilities */
+static unsigned long exynos5250_dwmmc_caps[4] = {
+ MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR |
+ MMC_CAP_8_BIT_DATA | MMC_CAP_CMD23,
+ MMC_CAP_CMD23,
+ MMC_CAP_CMD23,
+ MMC_CAP_CMD23,
+};
+
+static struct dw_mci_drv_data exynos5250_drv_data = {
+ .caps = exynos5250_dwmmc_caps,
+ .init = dw_mci_exynos_priv_init,
+ .setup_clock = dw_mci_exynos_setup_clock,
+ .prepare_command = dw_mci_exynos_prepare_command,
+ .set_ios = dw_mci_exynos_set_ios,
+ .parse_dt = dw_mci_exynos_parse_dt,
+ .setup_bus = dw_mci_exynos_setup_bus,
+};
+
+static const struct of_device_id dw_mci_exynos_match[] = {
+ { .compatible = "samsung,exynos5250-dw-mshc",
+ .data = (void *)&exynos5250_drv_data, },
+ {},
+};
+MODULE_DEVICE_TABLE(of, dw_mci_pltfm_match);
+
+int dw_mci_exynos_probe(struct platform_device *pdev)
+{
+ struct dw_mci_drv_data *drv_data;
+ const struct of_device_id *match;
+
+ match = of_match_node(dw_mci_exynos_match, pdev->dev.of_node);
+ drv_data = match->data;
+ return dw_mci_pltfm_register(pdev, drv_data);
+}
+
+static struct platform_driver dw_mci_exynos_pltfm_driver = {
+ .probe = dw_mci_exynos_probe,
+ .remove = __exit_p(dw_mci_pltfm_remove),
+ .driver = {
+ .name = "dwmmc_exynos",
+ .of_match_table = of_match_ptr(dw_mci_exynos_match),
+ .pm = &dw_mci_pltfm_pmops,
+ },
+};
+
+module_platform_driver(dw_mci_exynos_pltfm_driver);
+
+MODULE_DESCRIPTION("Samsung Specific DW-MSHC Driver Extension");
+MODULE_AUTHOR("Thomas Abraham <thomas.ab@samsung.com");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:dwmmc-exynos");
diff --git a/drivers/mmc/host/dw_mmc-pci.c b/drivers/mmc/host/dw_mmc-pci.c
index dc0d25a..edb37e9 100644
--- a/drivers/mmc/host/dw_mmc-pci.c
+++ b/drivers/mmc/host/dw_mmc-pci.c
@@ -59,7 +59,7 @@ static int __devinit dw_mci_pci_probe(struct pci_dev *pdev,
host->irq = pdev->irq;
host->irq_flags = IRQF_SHARED;
- host->dev = pdev->dev;
+ host->dev = &pdev->dev;
host->pdata = &pci_board_data;
host->regs = pci_iomap(pdev, PCI_BAR_NO, COMPLETE_BAR);
@@ -140,18 +140,7 @@ static struct pci_driver dw_mci_pci_driver = {
},
};
-static int __init dw_mci_init(void)
-{
- return pci_register_driver(&dw_mci_pci_driver);
-}
-
-static void __exit dw_mci_exit(void)
-{
- pci_unregister_driver(&dw_mci_pci_driver);
-}
-
-module_init(dw_mci_init);
-module_exit(dw_mci_exit);
+module_pci_driver(dw_mci_pci_driver);
MODULE_DESCRIPTION("DW Multimedia Card PCI Interface driver");
MODULE_AUTHOR("Shashidhar Hiremath <shashidharh@vayavyalabs.com>");
diff --git a/drivers/mmc/host/dw_mmc-pltfm.c b/drivers/mmc/host/dw_mmc-pltfm.c
index 92ec3eb..c960ca7 100644
--- a/drivers/mmc/host/dw_mmc-pltfm.c
+++ b/drivers/mmc/host/dw_mmc-pltfm.c
@@ -19,59 +19,63 @@
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/dw_mmc.h>
+#include <linux/of.h>
+
#include "dw_mmc.h"
-static int dw_mci_pltfm_probe(struct platform_device *pdev)
+int dw_mci_pltfm_register(struct platform_device *pdev,
+ struct dw_mci_drv_data *drv_data)
{
struct dw_mci *host;
struct resource *regs;
int ret;
- host = kzalloc(sizeof(struct dw_mci), GFP_KERNEL);
+ host = devm_kzalloc(&pdev->dev, sizeof(struct dw_mci), GFP_KERNEL);
if (!host)
return -ENOMEM;
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!regs) {
- ret = -ENXIO;
- goto err_free;
- }
+ if (!regs)
+ return -ENXIO;
host->irq = platform_get_irq(pdev, 0);
- if (host->irq < 0) {
- ret = host->irq;
- goto err_free;
- }
+ if (host->irq < 0)
+ return host->irq;
- host->dev = pdev->dev;
+ host->drv_data = drv_data;
+ host->dev = &pdev->dev;
host->irq_flags = 0;
host->pdata = pdev->dev.platform_data;
- ret = -ENOMEM;
- host->regs = ioremap(regs->start, resource_size(regs));
+ host->regs = devm_request_and_ioremap(&pdev->dev, regs);
if (!host->regs)
- goto err_free;
+ return -ENOMEM;
+
+ if (host->drv_data->init) {
+ ret = host->drv_data->init(host);
+ if (ret)
+ return ret;
+ }
+
platform_set_drvdata(pdev, host);
ret = dw_mci_probe(host);
- if (ret)
- goto err_out;
- return ret;
-err_out:
- iounmap(host->regs);
-err_free:
- kfree(host);
return ret;
}
+EXPORT_SYMBOL_GPL(dw_mci_pltfm_register);
-static int __exit dw_mci_pltfm_remove(struct platform_device *pdev)
+static int __devinit dw_mci_pltfm_probe(struct platform_device *pdev)
+{
+ return dw_mci_pltfm_register(pdev, NULL);
+}
+
+static int __devexit dw_mci_pltfm_remove(struct platform_device *pdev)
{
struct dw_mci *host = platform_get_drvdata(pdev);
platform_set_drvdata(pdev, NULL);
dw_mci_remove(host);
- iounmap(host->regs);
- kfree(host);
return 0;
}
+EXPORT_SYMBOL_GPL(dw_mci_pltfm_remove);
#ifdef CONFIG_PM_SLEEP
/*
@@ -105,12 +109,20 @@ static int dw_mci_pltfm_resume(struct device *dev)
#define dw_mci_pltfm_resume NULL
#endif /* CONFIG_PM_SLEEP */
-static SIMPLE_DEV_PM_OPS(dw_mci_pltfm_pmops, dw_mci_pltfm_suspend, dw_mci_pltfm_resume);
+SIMPLE_DEV_PM_OPS(dw_mci_pltfm_pmops, dw_mci_pltfm_suspend, dw_mci_pltfm_resume);
+EXPORT_SYMBOL_GPL(dw_mci_pltfm_pmops);
+
+static const struct of_device_id dw_mci_pltfm_match[] = {
+ { .compatible = "snps,dw-mshc", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, dw_mci_pltfm_match);
static struct platform_driver dw_mci_pltfm_driver = {
.remove = __exit_p(dw_mci_pltfm_remove),
.driver = {
.name = "dw_mmc",
+ .of_match_table = of_match_ptr(dw_mci_pltfm_match),
.pm = &dw_mci_pltfm_pmops,
},
};
diff --git a/drivers/mmc/host/dw_mmc-pltfm.h b/drivers/mmc/host/dw_mmc-pltfm.h
new file mode 100644
index 0000000..301f245
--- /dev/null
+++ b/drivers/mmc/host/dw_mmc-pltfm.h
@@ -0,0 +1,20 @@
+/*
+ * Synopsys DesignWare Multimedia Card Interface Platform driver
+ *
+ * Copyright (C) 2012, Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _DW_MMC_PLTFM_H_
+#define _DW_MMC_PLTFM_H_
+
+extern int dw_mci_pltfm_register(struct platform_device *pdev,
+ struct dw_mci_drv_data *drv_data);
+extern int __devexit dw_mci_pltfm_remove(struct platform_device *pdev);
+extern const struct dev_pm_ops dw_mci_pltfm_pmops;
+
+#endif /* _DW_MMC_PLTFM_H_ */
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index af40d22..c2828f3 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -33,6 +33,7 @@
#include <linux/bitops.h>
#include <linux/regulator/consumer.h>
#include <linux/workqueue.h>
+#include <linux/of.h>
#include "dw_mmc.h"
@@ -230,6 +231,7 @@ static void dw_mci_set_timeout(struct dw_mci *host)
static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
{
struct mmc_data *data;
+ struct dw_mci_slot *slot = mmc_priv(mmc);
u32 cmdr;
cmd->error = -EINPROGRESS;
@@ -259,6 +261,9 @@ static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
cmdr |= SDMMC_CMD_DAT_WR;
}
+ if (slot->host->drv_data->prepare_command)
+ slot->host->drv_data->prepare_command(slot->host, &cmdr);
+
return cmdr;
}
@@ -266,7 +271,7 @@ static void dw_mci_start_command(struct dw_mci *host,
struct mmc_command *cmd, u32 cmd_flags)
{
host->cmd = cmd;
- dev_vdbg(&host->dev,
+ dev_vdbg(host->dev,
"start command: ARGR=0x%08x CMDR=0x%08x\n",
cmd->arg, cmd_flags);
@@ -308,7 +313,7 @@ static void dw_mci_dma_cleanup(struct dw_mci *host)
if (data)
if (!data->host_cookie)
- dma_unmap_sg(&host->dev,
+ dma_unmap_sg(host->dev,
data->sg,
data->sg_len,
dw_mci_get_dma_dir(data));
@@ -334,7 +339,7 @@ static void dw_mci_idmac_complete_dma(struct dw_mci *host)
{
struct mmc_data *data = host->data;
- dev_vdbg(&host->dev, "DMA complete\n");
+ dev_vdbg(host->dev, "DMA complete\n");
host->dma_ops->cleanup(host);
@@ -405,23 +410,11 @@ static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
static int dw_mci_idmac_init(struct dw_mci *host)
{
struct idmac_desc *p;
- int i, dma_support;
+ int i;
/* Number of descriptors in the ring buffer */
host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
- /* Check if Hardware Configuration Register has support for DMA */
- dma_support = (mci_readl(host, HCON) >> 16) & 0x3;
-
- if (!dma_support || dma_support > 2) {
- dev_err(&host->dev,
- "Host Controller does not support IDMA Tx.\n");
- host->dma_ops = NULL;
- return -ENODEV;
- }
-
- dev_info(&host->dev, "Using internal DMA controller.\n");
-
/* Forward link the descriptor list */
for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
@@ -476,7 +469,7 @@ static int dw_mci_pre_dma_transfer(struct dw_mci *host,
return -EINVAL;
}
- sg_len = dma_map_sg(&host->dev,
+ sg_len = dma_map_sg(host->dev,
data->sg,
data->sg_len,
dw_mci_get_dma_dir(data));
@@ -519,7 +512,7 @@ static void dw_mci_post_req(struct mmc_host *mmc,
return;
if (data->host_cookie)
- dma_unmap_sg(&slot->host->dev,
+ dma_unmap_sg(slot->host->dev,
data->sg,
data->sg_len,
dw_mci_get_dma_dir(data));
@@ -545,7 +538,7 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
host->using_dma = 1;
- dev_vdbg(&host->dev,
+ dev_vdbg(host->dev,
"sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
(unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
sg_len);
@@ -814,6 +807,9 @@ static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
slot->clock = ios->clock;
}
+ if (slot->host->drv_data->set_ios)
+ slot->host->drv_data->set_ios(slot->host, ios);
+
switch (ios->power_mode) {
case MMC_POWER_UP:
set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
@@ -830,7 +826,9 @@ static int dw_mci_get_ro(struct mmc_host *mmc)
struct dw_mci_board *brd = slot->host->pdata;
/* Use platform get_ro function, else try on board write protect */
- if (brd->get_ro)
+ if (brd->quirks & DW_MCI_QUIRK_NO_WRITE_PROTECT)
+ read_only = 0;
+ else if (brd->get_ro)
read_only = brd->get_ro(slot->id);
else
read_only =
@@ -939,12 +937,12 @@ static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
slot = list_entry(host->queue.next,
struct dw_mci_slot, queue_node);
list_del(&slot->queue_node);
- dev_vdbg(&host->dev, "list not empty: %s is next\n",
+ dev_vdbg(host->dev, "list not empty: %s is next\n",
mmc_hostname(slot->mmc));
host->state = STATE_SENDING_CMD;
dw_mci_start_request(host, slot);
} else {
- dev_vdbg(&host->dev, "list empty\n");
+ dev_vdbg(host->dev, "list empty\n");
host->state = STATE_IDLE;
}
@@ -1083,7 +1081,7 @@ static void dw_mci_tasklet_func(unsigned long priv)
data->bytes_xfered = 0;
data->error = -ETIMEDOUT;
} else {
- dev_err(&host->dev,
+ dev_err(host->dev,
"data FIFO error "
"(status=%08x)\n",
status);
@@ -1767,12 +1765,60 @@ static void dw_mci_work_routine_card(struct work_struct *work)
}
}
-static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id)
+#ifdef CONFIG_OF
+/* given a slot id, find out the device node representing that slot */
+static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
+{
+ struct device_node *np;
+ const __be32 *addr;
+ int len;
+
+ if (!dev || !dev->of_node)
+ return NULL;
+
+ for_each_child_of_node(dev->of_node, np) {
+ addr = of_get_property(np, "reg", &len);
+ if (!addr || (len < sizeof(int)))
+ continue;
+ if (be32_to_cpup(addr) == slot)
+ return np;
+ }
+ return NULL;
+}
+
+/* find out bus-width for a given slot */
+static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
+{
+ struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
+ u32 bus_wd = 1;
+
+ if (!np)
+ return 1;
+
+ if (of_property_read_u32(np, "bus-width", &bus_wd))
+ dev_err(dev, "bus-width property not found, assuming width"
+ " as 1\n");
+ return bus_wd;
+}
+#else /* CONFIG_OF */
+static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
+{
+ return 1;
+}
+static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
+{
+ return NULL;
+}
+#endif /* CONFIG_OF */
+
+static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
{
struct mmc_host *mmc;
struct dw_mci_slot *slot;
+ int ctrl_id, ret;
+ u8 bus_width;
- mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), &host->dev);
+ mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
if (!mmc)
return -ENOMEM;
@@ -1780,6 +1826,7 @@ static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id)
slot->id = id;
slot->mmc = mmc;
slot->host = host;
+ host->slot[id] = slot;
mmc->ops = &dw_mci_ops;
mmc->f_min = DIV_ROUND_UP(host->bus_hz, 510);
@@ -1800,21 +1847,44 @@ static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id)
if (host->pdata->caps)
mmc->caps = host->pdata->caps;
+ if (host->dev->of_node) {
+ ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
+ if (ctrl_id < 0)
+ ctrl_id = 0;
+ } else {
+ ctrl_id = to_platform_device(host->dev)->id;
+ }
+ if (host->drv_data && host->drv_data->caps)
+ mmc->caps |= host->drv_data->caps[ctrl_id];
+
if (host->pdata->caps2)
mmc->caps2 = host->pdata->caps2;
if (host->pdata->get_bus_wd)
- if (host->pdata->get_bus_wd(slot->id) >= 4)
- mmc->caps |= MMC_CAP_4_BIT_DATA;
+ bus_width = host->pdata->get_bus_wd(slot->id);
+ else if (host->dev->of_node)
+ bus_width = dw_mci_of_get_bus_wd(host->dev, slot->id);
+ else
+ bus_width = 1;
+
+ if (host->drv_data->setup_bus) {
+ struct device_node *slot_np;
+ slot_np = dw_mci_of_find_slot_node(host->dev, slot->id);
+ ret = host->drv_data->setup_bus(host, slot_np, bus_width);
+ if (ret)
+ goto err_setup_bus;
+ }
+
+ switch (bus_width) {
+ case 8:
+ mmc->caps |= MMC_CAP_8_BIT_DATA;
+ case 4:
+ mmc->caps |= MMC_CAP_4_BIT_DATA;
+ }
if (host->pdata->quirks & DW_MCI_QUIRK_HIGHSPEED)
mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
- if (mmc->caps2 & MMC_CAP2_POWEROFF_NOTIFY)
- mmc->power_notify_type = MMC_HOST_PW_NOTIFY_SHORT;
- else
- mmc->power_notify_type = MMC_HOST_PW_NOTIFY_NONE;
-
if (host->pdata->blk_settings) {
mmc->max_segs = host->pdata->blk_settings->max_segs;
mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
@@ -1850,7 +1920,6 @@ static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id)
else
clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
- host->slot[id] = slot;
mmc_add_host(mmc);
#if defined(CONFIG_DEBUG_FS)
@@ -1867,6 +1936,10 @@ static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id)
queue_work(host->card_workqueue, &host->card_work);
return 0;
+
+err_setup_bus:
+ mmc_free_host(mmc);
+ return -EINVAL;
}
static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
@@ -1884,10 +1957,10 @@ static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
static void dw_mci_init_dma(struct dw_mci *host)
{
/* Alloc memory for sg translation */
- host->sg_cpu = dma_alloc_coherent(&host->dev, PAGE_SIZE,
+ host->sg_cpu = dma_alloc_coherent(host->dev, PAGE_SIZE,
&host->sg_dma, GFP_KERNEL);
if (!host->sg_cpu) {
- dev_err(&host->dev, "%s: could not alloc DMA memory\n",
+ dev_err(host->dev, "%s: could not alloc DMA memory\n",
__func__);
goto no_dma;
}
@@ -1895,6 +1968,7 @@ static void dw_mci_init_dma(struct dw_mci *host)
/* Determine which DMA interface to use */
#ifdef CONFIG_MMC_DW_IDMAC
host->dma_ops = &dw_mci_idmac_ops;
+ dev_info(&host->dev, "Using internal DMA controller.\n");
#endif
if (!host->dma_ops)
@@ -1903,12 +1977,12 @@ static void dw_mci_init_dma(struct dw_mci *host)
if (host->dma_ops->init && host->dma_ops->start &&
host->dma_ops->stop && host->dma_ops->cleanup) {
if (host->dma_ops->init(host)) {
- dev_err(&host->dev, "%s: Unable to initialize "
+ dev_err(host->dev, "%s: Unable to initialize "
"DMA Controller.\n", __func__);
goto no_dma;
}
} else {
- dev_err(&host->dev, "DMA initialization not found.\n");
+ dev_err(host->dev, "DMA initialization not found.\n");
goto no_dma;
}
@@ -1916,7 +1990,7 @@ static void dw_mci_init_dma(struct dw_mci *host)
return;
no_dma:
- dev_info(&host->dev, "Using PIO mode.\n");
+ dev_info(host->dev, "Using PIO mode.\n");
host->use_dma = 0;
return;
}
@@ -1942,30 +2016,133 @@ static bool mci_wait_reset(struct device *dev, struct dw_mci *host)
return false;
}
+#ifdef CONFIG_OF
+static struct dw_mci_of_quirks {
+ char *quirk;
+ int id;
+} of_quirks[] = {
+ {
+ .quirk = "supports-highspeed",
+ .id = DW_MCI_QUIRK_HIGHSPEED,
+ }, {
+ .quirk = "broken-cd",
+ .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
+ },
+};
+
+static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
+{
+ struct dw_mci_board *pdata;
+ struct device *dev = host->dev;
+ struct device_node *np = dev->of_node;
+ int idx, ret;
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ dev_err(dev, "could not allocate memory for pdata\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* find out number of slots supported */
+ if (of_property_read_u32(dev->of_node, "num-slots",
+ &pdata->num_slots)) {
+ dev_info(dev, "num-slots property not found, "
+ "assuming 1 slot is available\n");
+ pdata->num_slots = 1;
+ }
+
+ /* get quirks */
+ for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
+ if (of_get_property(np, of_quirks[idx].quirk, NULL))
+ pdata->quirks |= of_quirks[idx].id;
+
+ if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
+ dev_info(dev, "fifo-depth property not found, using "
+ "value of FIFOTH register as default\n");
+
+ of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
+
+ if (host->drv_data->parse_dt) {
+ ret = host->drv_data->parse_dt(host);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ return pdata;
+}
+
+#else /* CONFIG_OF */
+static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
+{
+ return ERR_PTR(-EINVAL);
+}
+#endif /* CONFIG_OF */
+
int dw_mci_probe(struct dw_mci *host)
{
int width, i, ret = 0;
u32 fifo_size;
+ int init_slots = 0;
- if (!host->pdata || !host->pdata->init) {
- dev_err(&host->dev,
- "Platform data must supply init function\n");
- return -ENODEV;
+ if (!host->pdata) {
+ host->pdata = dw_mci_parse_dt(host);
+ if (IS_ERR(host->pdata)) {
+ dev_err(host->dev, "platform data not available\n");
+ return -EINVAL;
+ }
}
if (!host->pdata->select_slot && host->pdata->num_slots > 1) {
- dev_err(&host->dev,
+ dev_err(host->dev,
"Platform data must supply select_slot function\n");
return -ENODEV;
}
- if (!host->pdata->bus_hz) {
- dev_err(&host->dev,
+ host->biu_clk = clk_get(host->dev, "biu");
+ if (IS_ERR(host->biu_clk)) {
+ dev_dbg(host->dev, "biu clock not available\n");
+ } else {
+ ret = clk_prepare_enable(host->biu_clk);
+ if (ret) {
+ dev_err(host->dev, "failed to enable biu clock\n");
+ clk_put(host->biu_clk);
+ return ret;
+ }
+ }
+
+ host->ciu_clk = clk_get(host->dev, "ciu");
+ if (IS_ERR(host->ciu_clk)) {
+ dev_dbg(host->dev, "ciu clock not available\n");
+ } else {
+ ret = clk_prepare_enable(host->ciu_clk);
+ if (ret) {
+ dev_err(host->dev, "failed to enable ciu clock\n");
+ clk_put(host->ciu_clk);
+ goto err_clk_biu;
+ }
+ }
+
+ if (IS_ERR(host->ciu_clk))
+ host->bus_hz = host->pdata->bus_hz;
+ else
+ host->bus_hz = clk_get_rate(host->ciu_clk);
+
+ if (host->drv_data->setup_clock) {
+ ret = host->drv_data->setup_clock(host);
+ if (ret) {
+ dev_err(host->dev,
+ "implementation specific clock setup failed\n");
+ goto err_clk_ciu;
+ }
+ }
+
+ if (!host->bus_hz) {
+ dev_err(host->dev,
"Platform data must supply bus speed\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto err_clk_ciu;
}
- host->bus_hz = host->pdata->bus_hz;
host->quirks = host->pdata->quirks;
spin_lock_init(&host->lock);
@@ -1998,7 +2175,7 @@ int dw_mci_probe(struct dw_mci *host)
}
/* Reset all blocks */
- if (!mci_wait_reset(&host->dev, host))
+ if (!mci_wait_reset(host->dev, host))
return -ENODEV;
host->dma_ops = host->pdata->dma_ops;
@@ -2054,10 +2231,18 @@ int dw_mci_probe(struct dw_mci *host)
/* We need at least one slot to succeed */
for (i = 0; i < host->num_slots; i++) {
ret = dw_mci_init_slot(host, i);
- if (ret) {
- ret = -ENODEV;
- goto err_init_slot;
- }
+ if (ret)
+ dev_dbg(host->dev, "slot %d init failed\n", i);
+ else
+ init_slots++;
+ }
+
+ if (init_slots) {
+ dev_info(host->dev, "%d slots initialized\n", init_slots);
+ } else {
+ dev_dbg(host->dev, "attempted to initialize %d slots, "
+ "but failed on all\n", host->num_slots);
+ goto err_init_slot;
}
/*
@@ -2065,7 +2250,7 @@ int dw_mci_probe(struct dw_mci *host)
* Need to check the version-id and set data-offset for DATA register.
*/
host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
- dev_info(&host->dev, "Version ID is %04x\n", host->verid);
+ dev_info(host->dev, "Version ID is %04x\n", host->verid);
if (host->verid < DW_MMC_240A)
host->data_offset = DATA_OFFSET;
@@ -2082,22 +2267,16 @@ int dw_mci_probe(struct dw_mci *host)
DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
- dev_info(&host->dev, "DW MMC controller at irq %d, "
+ dev_info(host->dev, "DW MMC controller at irq %d, "
"%d bit host data width, "
"%u deep fifo\n",
host->irq, width, fifo_size);
if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
- dev_info(&host->dev, "Internal DMAC interrupt fix enabled.\n");
+ dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
return 0;
err_init_slot:
- /* De-init any initialized slots */
- while (i > 0) {
- if (host->slot[i])
- dw_mci_cleanup_slot(host->slot[i], i);
- i--;
- }
free_irq(host->irq, host);
err_workqueue:
@@ -2106,13 +2285,24 @@ err_workqueue:
err_dmaunmap:
if (host->use_dma && host->dma_ops->exit)
host->dma_ops->exit(host);
- dma_free_coherent(&host->dev, PAGE_SIZE,
+ dma_free_coherent(host->dev, PAGE_SIZE,
host->sg_cpu, host->sg_dma);
if (host->vmmc) {
regulator_disable(host->vmmc);
regulator_put(host->vmmc);
}
+
+err_clk_ciu:
+ if (!IS_ERR(host->ciu_clk)) {
+ clk_disable_unprepare(host->ciu_clk);
+ clk_put(host->ciu_clk);
+ }
+err_clk_biu:
+ if (!IS_ERR(host->biu_clk)) {
+ clk_disable_unprepare(host->biu_clk);
+ clk_put(host->biu_clk);
+ }
return ret;
}
EXPORT_SYMBOL(dw_mci_probe);
@@ -2125,7 +2315,7 @@ void dw_mci_remove(struct dw_mci *host)
mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
for (i = 0; i < host->num_slots; i++) {
- dev_dbg(&host->dev, "remove slot %d\n", i);
+ dev_dbg(host->dev, "remove slot %d\n", i);
if (host->slot[i])
dw_mci_cleanup_slot(host->slot[i], i);
}
@@ -2136,7 +2326,7 @@ void dw_mci_remove(struct dw_mci *host)
free_irq(host->irq, host);
destroy_workqueue(host->card_workqueue);
- dma_free_coherent(&host->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
+ dma_free_coherent(host->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
if (host->use_dma && host->dma_ops->exit)
host->dma_ops->exit(host);
@@ -2146,6 +2336,12 @@ void dw_mci_remove(struct dw_mci *host)
regulator_put(host->vmmc);
}
+ if (!IS_ERR(host->ciu_clk))
+ clk_disable_unprepare(host->ciu_clk);
+ if (!IS_ERR(host->biu_clk))
+ clk_disable_unprepare(host->biu_clk);
+ clk_put(host->ciu_clk);
+ clk_put(host->biu_clk);
}
EXPORT_SYMBOL(dw_mci_remove);
@@ -2188,7 +2384,7 @@ int dw_mci_resume(struct dw_mci *host)
if (host->vmmc)
regulator_enable(host->vmmc);
- if (!mci_wait_reset(&host->dev, host)) {
+ if (!mci_wait_reset(host->dev, host)) {
ret = -ENODEV;
return ret;
}
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
index 15c27e1..53b8fd9 100644
--- a/drivers/mmc/host/dw_mmc.h
+++ b/drivers/mmc/host/dw_mmc.h
@@ -182,4 +182,28 @@ extern int dw_mci_suspend(struct dw_mci *host);
extern int dw_mci_resume(struct dw_mci *host);
#endif
+/**
+ * dw_mci driver data - dw-mshc implementation specific driver data.
+ * @caps: mmc subsystem specified capabilities of the controller(s).
+ * @init: early implementation specific initialization.
+ * @setup_clock: implementation specific clock configuration.
+ * @prepare_command: handle CMD register extensions.
+ * @set_ios: handle bus specific extensions.
+ * @parse_dt: parse implementation specific device tree properties.
+ * @setup_bus: initialize io-interface
+ *
+ * Provide controller implementation specific extensions. The usage of this
+ * data structure is fully optional and usage of each member in this structure
+ * is optional as well.
+ */
+struct dw_mci_drv_data {
+ unsigned long *caps;
+ int (*init)(struct dw_mci *host);
+ int (*setup_clock)(struct dw_mci *host);
+ void (*prepare_command)(struct dw_mci *host, u32 *cmdr);
+ void (*set_ios)(struct dw_mci *host, struct mmc_ios *ios);
+ int (*parse_dt)(struct dw_mci *host);
+ int (*setup_bus)(struct dw_mci *host,
+ struct device_node *slot_np, u8 bus_width);
+};
#endif /* _DW_MMC_H_ */
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 273306c..a600eab 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -1532,20 +1532,7 @@ static struct spi_driver mmc_spi_driver = {
.remove = __devexit_p(mmc_spi_remove),
};
-
-static int __init mmc_spi_init(void)
-{
- return spi_register_driver(&mmc_spi_driver);
-}
-module_init(mmc_spi_init);
-
-
-static void __exit mmc_spi_exit(void)
-{
- spi_unregister_driver(&mmc_spi_driver);
-}
-module_exit(mmc_spi_exit);
-
+module_spi_driver(mmc_spi_driver);
MODULE_AUTHOR("Mike Lavender, David Brownell, "
"Hans-Peter Nilsson, Jan Nikitenko");
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 50ff19a..edc3e9b 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -1309,14 +1309,10 @@ static int __devinit mmci_probe(struct amba_device *dev,
goto host_free;
}
- ret = clk_prepare(host->clk);
+ ret = clk_prepare_enable(host->clk);
if (ret)
goto clk_free;
- ret = clk_enable(host->clk);
- if (ret)
- goto clk_unprep;
-
host->plat = plat;
host->variant = variant;
host->mclk = clk_get_rate(host->clk);
@@ -1515,9 +1511,7 @@ static int __devinit mmci_probe(struct amba_device *dev,
err_gpio_cd:
iounmap(host->base);
clk_disable:
- clk_disable(host->clk);
- clk_unprep:
- clk_unprepare(host->clk);
+ clk_disable_unprepare(host->clk);
clk_free:
clk_put(host->clk);
host_free:
@@ -1564,8 +1558,7 @@ static int __devexit mmci_remove(struct amba_device *dev)
gpio_free(host->gpio_cd);
iounmap(host->base);
- clk_disable(host->clk);
- clk_unprepare(host->clk);
+ clk_disable_unprepare(host->clk);
clk_put(host->clk);
if (host->vcc)
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index 7b1161d..565c2e4 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -44,6 +44,7 @@
#include <mach/hardware.h>
#define DRIVER_NAME "mxc-mmc"
+#define MXCMCI_TIMEOUT_MS 10000
#define MMC_REG_STR_STP_CLK 0x00
#define MMC_REG_STATUS 0x04
@@ -150,6 +151,8 @@ struct mxcmci_host {
int dmareq;
struct dma_slave_config dma_slave_config;
struct imx_dma_data dma_data;
+
+ struct timer_list watchdog;
};
static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios);
@@ -271,9 +274,32 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
dmaengine_submit(host->desc);
dma_async_issue_pending(host->dma);
+ mod_timer(&host->watchdog, jiffies + msecs_to_jiffies(MXCMCI_TIMEOUT_MS));
+
return 0;
}
+static void mxcmci_cmd_done(struct mxcmci_host *host, unsigned int stat);
+static void mxcmci_data_done(struct mxcmci_host *host, unsigned int stat);
+
+static void mxcmci_dma_callback(void *data)
+{
+ struct mxcmci_host *host = data;
+ u32 stat;
+
+ del_timer(&host->watchdog);
+
+ stat = readl(host->base + MMC_REG_STATUS);
+ writel(stat & ~STATUS_DATA_TRANS_DONE, host->base + MMC_REG_STATUS);
+
+ dev_dbg(mmc_dev(host->mmc), "%s: 0x%08x\n", __func__, stat);
+
+ if (stat & STATUS_READ_OP_DONE)
+ writel(STATUS_READ_OP_DONE, host->base + MMC_REG_STATUS);
+
+ mxcmci_data_done(host, stat);
+}
+
static int mxcmci_start_cmd(struct mxcmci_host *host, struct mmc_command *cmd,
unsigned int cmdat)
{
@@ -305,8 +331,14 @@ static int mxcmci_start_cmd(struct mxcmci_host *host, struct mmc_command *cmd,
int_cntr = INT_END_CMD_RES_EN;
- if (mxcmci_use_dma(host))
- int_cntr |= INT_READ_OP_EN | INT_WRITE_OP_DONE_EN;
+ if (mxcmci_use_dma(host)) {
+ if (host->dma_dir == DMA_FROM_DEVICE) {
+ host->desc->callback = mxcmci_dma_callback;
+ host->desc->callback_param = host;
+ } else {
+ int_cntr |= INT_WRITE_OP_DONE_EN;
+ }
+ }
spin_lock_irqsave(&host->lock, flags);
if (host->use_sdio)
@@ -345,11 +377,9 @@ static int mxcmci_finish_data(struct mxcmci_host *host, unsigned int stat)
struct mmc_data *data = host->data;
int data_error;
- if (mxcmci_use_dma(host)) {
- dmaengine_terminate_all(host->dma);
+ if (mxcmci_use_dma(host))
dma_unmap_sg(host->dma->device->dev, data->sg, data->sg_len,
host->dma_dir);
- }
if (stat & STATUS_ERR_MASK) {
dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n",
@@ -624,8 +654,10 @@ static irqreturn_t mxcmci_irq(int irq, void *devid)
mxcmci_cmd_done(host, stat);
if (mxcmci_use_dma(host) &&
- (stat & (STATUS_DATA_TRANS_DONE | STATUS_WRITE_OP_DONE)))
+ (stat & (STATUS_DATA_TRANS_DONE | STATUS_WRITE_OP_DONE))) {
+ del_timer(&host->watchdog);
mxcmci_data_done(host, stat);
+ }
if (host->default_irq_mask &&
(stat & (STATUS_CARD_INSERTION | STATUS_CARD_REMOVAL)))
@@ -836,6 +868,34 @@ static bool filter(struct dma_chan *chan, void *param)
return true;
}
+static void mxcmci_watchdog(unsigned long data)
+{
+ struct mmc_host *mmc = (struct mmc_host *)data;
+ struct mxcmci_host *host = mmc_priv(mmc);
+ struct mmc_request *req = host->req;
+ unsigned int stat = readl(host->base + MMC_REG_STATUS);
+
+ if (host->dma_dir == DMA_FROM_DEVICE) {
+ dmaengine_terminate_all(host->dma);
+ dev_err(mmc_dev(host->mmc),
+ "%s: read time out (status = 0x%08x)\n",
+ __func__, stat);
+ } else {
+ dev_err(mmc_dev(host->mmc),
+ "%s: write time out (status = 0x%08x)\n",
+ __func__, stat);
+ mxcmci_softreset(host);
+ }
+
+ /* Mark transfer as erroneus and inform the upper layers */
+
+ host->data->error = -ETIMEDOUT;
+ host->req = NULL;
+ host->cmd = NULL;
+ host->data = NULL;
+ mmc_request_done(host->mmc, req);
+}
+
static const struct mmc_host_ops mxcmci_ops = {
.request = mxcmci_request,
.set_ios = mxcmci_set_ios,
@@ -968,6 +1028,10 @@ static int mxcmci_probe(struct platform_device *pdev)
mmc_add_host(mmc);
+ init_timer(&host->watchdog);
+ host->watchdog.function = &mxcmci_watchdog;
+ host->watchdog.data = (unsigned long)mmc;
+
return 0;
out_free_irq:
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index c6259a8..48ad361 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -27,16 +27,10 @@
#include <linux/mmc/card.h>
#include <linux/clk.h>
#include <linux/scatterlist.h>
-#include <linux/i2c/tps65010.h>
#include <linux/slab.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-
#include <plat/mmc.h>
-#include <asm/gpio.h>
#include <plat/dma.h>
-#include <plat/fpga.h>
#define OMAP_MMC_REG_CMD 0x00
#define OMAP_MMC_REG_ARGL 0x01
@@ -105,7 +99,6 @@ struct mmc_omap_slot {
u16 saved_con;
u16 bus_mode;
unsigned int fclk_freq;
- unsigned powered:1;
struct tasklet_struct cover_tasklet;
struct timer_list cover_timer;
@@ -137,7 +130,6 @@ struct mmc_omap_host {
unsigned int phys_base;
int irq;
unsigned char bus_mode;
- unsigned char hw_bus_mode;
unsigned int reg_shift;
struct work_struct cmd_abort_work;
@@ -695,22 +687,29 @@ mmc_omap_xfer_data(struct mmc_omap_host *host, int write)
host->buffer += nwords;
}
-static inline void mmc_omap_report_irq(u16 status)
+#ifdef CONFIG_MMC_DEBUG
+static void mmc_omap_report_irq(struct mmc_omap_host *host, u16 status)
{
static const char *mmc_omap_status_bits[] = {
"EOC", "CD", "CB", "BRS", "EOFB", "DTO", "DCRC", "CTO",
"CCRC", "CRW", "AF", "AE", "OCRB", "CIRQ", "CERR"
};
- int i, c = 0;
+ int i;
+ char res[64], *buf = res;
+
+ buf += sprintf(buf, "MMC IRQ 0x%x:", status);
for (i = 0; i < ARRAY_SIZE(mmc_omap_status_bits); i++)
- if (status & (1 << i)) {
- if (c)
- printk(" ");
- printk("%s", mmc_omap_status_bits[i]);
- c++;
- }
+ if (status & (1 << i))
+ buf += sprintf(buf, " %s", mmc_omap_status_bits[i]);
+ dev_vdbg(mmc_dev(host->mmc), "%s\n", res);
}
+#else
+static void mmc_omap_report_irq(struct mmc_omap_host *host, u16 status)
+{
+}
+#endif
+
static irqreturn_t mmc_omap_irq(int irq, void *dev_id)
{
@@ -744,12 +743,10 @@ static irqreturn_t mmc_omap_irq(int irq, void *dev_id)
cmd = host->cmd->opcode;
else
cmd = -1;
-#ifdef CONFIG_MMC_DEBUG
dev_dbg(mmc_dev(host->mmc), "MMC IRQ %04x (CMD %d): ",
status, cmd);
- mmc_omap_report_irq(status);
- printk("\n");
-#endif
+ mmc_omap_report_irq(host, status);
+
if (host->total_bytes_left) {
if ((status & OMAP_MMC_STAT_A_FULL) ||
(status & OMAP_MMC_STAT_END_OF_DATA))
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 38adc33..54bfd0c 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -35,7 +35,6 @@
#include <linux/mmc/core.h>
#include <linux/mmc/mmc.h>
#include <linux/io.h>
-#include <linux/semaphore.h>
#include <linux/gpio.h>
#include <linux/regulator/consumer.h>
#include <linux/pm_runtime.h>
@@ -44,7 +43,6 @@
#include <plat/cpu.h>
/* OMAP HSMMC Host Controller Registers */
-#define OMAP_HSMMC_SYSCONFIG 0x0010
#define OMAP_HSMMC_SYSSTATUS 0x0014
#define OMAP_HSMMC_CON 0x002C
#define OMAP_HSMMC_BLK 0x0104
@@ -161,8 +159,6 @@ struct omap_hsmmc_host {
unsigned int dma_sg_idx;
unsigned char bus_mode;
unsigned char power_mode;
- u32 *buffer;
- u32 bytesleft;
int suspended;
int irq;
int use_dma, dma_ch;
@@ -171,7 +167,6 @@ struct omap_hsmmc_host {
int slot_id;
int response_busy;
int context_loss;
- int vdd;
int protect_card;
int reqs_blocked;
int use_reg;
@@ -300,12 +295,12 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
struct regulator *reg;
int ocr_value = 0;
- mmc_slot(host).set_power = omap_hsmmc_set_power;
-
reg = regulator_get(host->dev, "vmmc");
if (IS_ERR(reg)) {
dev_dbg(host->dev, "vmmc regulator missing\n");
+ return PTR_ERR(reg);
} else {
+ mmc_slot(host).set_power = omap_hsmmc_set_power;
host->vcc = reg;
ocr_value = mmc_regulator_get_ocrmask(reg);
if (!mmc_slot(host).ocr_mask) {
@@ -495,7 +490,7 @@ static void omap_hsmmc_set_clock(struct omap_hsmmc_host *host)
unsigned long regval;
unsigned long timeout;
- dev_dbg(mmc_dev(host->mmc), "Set clock to %uHz\n", ios->clock);
+ dev_vdbg(mmc_dev(host->mmc), "Set clock to %uHz\n", ios->clock);
omap_hsmmc_stop_clock(host);
@@ -579,21 +574,8 @@ static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host)
if (host->context_loss == context_loss)
return 1;
- /* Wait for hardware reset */
- timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS);
- while ((OMAP_HSMMC_READ(host->base, SYSSTATUS) & RESETDONE) != RESETDONE
- && time_before(jiffies, timeout))
- ;
-
- /* Do software reset */
- OMAP_HSMMC_WRITE(host->base, SYSCONFIG, SOFTRESET);
- timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS);
- while ((OMAP_HSMMC_READ(host->base, SYSSTATUS) & RESETDONE) != RESETDONE
- && time_before(jiffies, timeout))
- ;
-
- OMAP_HSMMC_WRITE(host->base, SYSCONFIG,
- OMAP_HSMMC_READ(host->base, SYSCONFIG) | AUTOIDLE);
+ if (!OMAP_HSMMC_READ(host->base, SYSSTATUS) & RESETDONE)
+ return 1;
if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) {
if (host->power_mode != MMC_POWER_OFF &&
@@ -745,7 +727,7 @@ omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd,
{
int cmdreg = 0, resptype = 0, cmdtype = 0;
- dev_dbg(mmc_dev(host->mmc), "%s: CMD%d, argument 0x%08x\n",
+ dev_vdbg(mmc_dev(host->mmc), "%s: CMD%d, argument 0x%08x\n",
mmc_hostname(host->mmc), cmd->opcode, cmd->arg);
host->cmd = cmd;
@@ -934,7 +916,7 @@ static void omap_hsmmc_dbg_report_irq(struct omap_hsmmc_host *host, u32 status)
buf += len;
}
- dev_dbg(mmc_dev(host->mmc), "%s\n", res);
+ dev_vdbg(mmc_dev(host->mmc), "%s\n", res);
}
#else
static inline void omap_hsmmc_dbg_report_irq(struct omap_hsmmc_host *host,
@@ -981,72 +963,40 @@ static inline void omap_hsmmc_reset_controller_fsm(struct omap_hsmmc_host *host,
__func__);
}
+static void hsmmc_command_incomplete(struct omap_hsmmc_host *host, int err)
+{
+ omap_hsmmc_reset_controller_fsm(host, SRC);
+ host->cmd->error = err;
+
+ if (host->data) {
+ omap_hsmmc_reset_controller_fsm(host, SRD);
+ omap_hsmmc_dma_cleanup(host, err);
+ }
+
+}
+
static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status)
{
struct mmc_data *data;
int end_cmd = 0, end_trans = 0;
- if (!host->req_in_progress) {
- do {
- OMAP_HSMMC_WRITE(host->base, STAT, status);
- /* Flush posted write */
- status = OMAP_HSMMC_READ(host->base, STAT);
- } while (status & INT_EN_MASK);
- return;
- }
-
data = host->data;
- dev_dbg(mmc_dev(host->mmc), "IRQ Status is %x\n", status);
+ dev_vdbg(mmc_dev(host->mmc), "IRQ Status is %x\n", status);
if (status & ERR) {
omap_hsmmc_dbg_report_irq(host, status);
- if ((status & CMD_TIMEOUT) ||
- (status & CMD_CRC)) {
- if (host->cmd) {
- if (status & CMD_TIMEOUT) {
- omap_hsmmc_reset_controller_fsm(host,
- SRC);
- host->cmd->error = -ETIMEDOUT;
- } else {
- host->cmd->error = -EILSEQ;
- }
- end_cmd = 1;
- }
- if (host->data || host->response_busy) {
- if (host->data)
- omap_hsmmc_dma_cleanup(host,
- -ETIMEDOUT);
- host->response_busy = 0;
- omap_hsmmc_reset_controller_fsm(host, SRD);
- }
- }
- if ((status & DATA_TIMEOUT) ||
- (status & DATA_CRC)) {
- if (host->data || host->response_busy) {
- int err = (status & DATA_TIMEOUT) ?
- -ETIMEDOUT : -EILSEQ;
-
- if (host->data)
- omap_hsmmc_dma_cleanup(host, err);
- else
- host->mrq->cmd->error = err;
- host->response_busy = 0;
- omap_hsmmc_reset_controller_fsm(host, SRD);
- end_trans = 1;
- }
- }
- if (status & CARD_ERR) {
- dev_dbg(mmc_dev(host->mmc),
- "Ignoring card err CMD%d\n", host->cmd->opcode);
- if (host->cmd)
- end_cmd = 1;
- if (host->data)
- end_trans = 1;
+ if (status & (CMD_TIMEOUT | DATA_TIMEOUT))
+ hsmmc_command_incomplete(host, -ETIMEDOUT);
+ else if (status & (CMD_CRC | DATA_CRC))
+ hsmmc_command_incomplete(host, -EILSEQ);
+
+ end_cmd = 1;
+ if (host->data || host->response_busy) {
+ end_trans = 1;
+ host->response_busy = 0;
}
}
- OMAP_HSMMC_WRITE(host->base, STAT, status);
-
if (end_cmd || ((status & CC) && host->cmd))
omap_hsmmc_cmd_done(host, host->cmd);
if ((end_trans || (status & TC)) && host->mrq)
@@ -1062,11 +1012,13 @@ static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id)
int status;
status = OMAP_HSMMC_READ(host->base, STAT);
- do {
+ while (status & INT_EN_MASK && host->req_in_progress) {
omap_hsmmc_do_irq(host, status);
+
/* Flush posted write */
+ OMAP_HSMMC_WRITE(host->base, STAT, status);
status = OMAP_HSMMC_READ(host->base, STAT);
- } while (status & INT_EN_MASK);
+ }
return IRQ_HANDLED;
}
@@ -1501,12 +1453,10 @@ static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
case MMC_POWER_OFF:
mmc_slot(host).set_power(host->dev, host->slot_id,
0, 0);
- host->vdd = 0;
break;
case MMC_POWER_UP:
mmc_slot(host).set_power(host->dev, host->slot_id,
1, ios->vdd);
- host->vdd = ios->vdd;
break;
case MMC_POWER_ON:
do_send_init_stream = 1;
@@ -1598,10 +1548,6 @@ static void omap_hsmmc_conf_bus_power(struct omap_hsmmc_host *host)
value = OMAP_HSMMC_READ(host->base, CAPA);
OMAP_HSMMC_WRITE(host->base, CAPA, value | capa);
- /* Set the controller to AUTO IDLE mode */
- value = OMAP_HSMMC_READ(host->base, SYSCONFIG);
- OMAP_HSMMC_WRITE(host->base, SYSCONFIG, value | AUTOIDLE);
-
/* Set SD bus power bit */
set_sd_bus_power(host);
}
@@ -1659,8 +1605,6 @@ static int omap_hsmmc_regs_show(struct seq_file *s, void *data)
pm_runtime_get_sync(host->dev);
- seq_printf(s, "SYSCONFIG:\t0x%08x\n",
- OMAP_HSMMC_READ(host->base, SYSCONFIG));
seq_printf(s, "CON:\t\t0x%08x\n",
OMAP_HSMMC_READ(host->base, CON));
seq_printf(s, "HCTL:\t\t0x%08x\n",
@@ -2105,8 +2049,7 @@ static int omap_hsmmc_suspend(struct device *dev)
if (ret) {
host->suspended = 0;
if (host->pdata->resume) {
- ret = host->pdata->resume(dev, host->slot_id);
- if (ret)
+ if (host->pdata->resume(dev, host->slot_id))
dev_dbg(dev, "Unmask interrupt failed\n");
}
goto err;
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index ca3915d..3f9d6d5 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -30,6 +30,9 @@
#include <linux/regulator/consumer.h>
#include <linux/gpio.h>
#include <linux/gfp.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/of_device.h>
#include <asm/sizes.h>
@@ -573,6 +576,50 @@ static irqreturn_t pxamci_detect_irq(int irq, void *devid)
return IRQ_HANDLED;
}
+#ifdef CONFIG_OF
+static const struct of_device_id pxa_mmc_dt_ids[] = {
+ { .compatible = "marvell,pxa-mmc" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, pxa_mmc_dt_ids);
+
+static int __devinit pxamci_of_init(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct pxamci_platform_data *pdata;
+ u32 tmp;
+
+ if (!np)
+ return 0;
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ pdata->gpio_card_detect =
+ of_get_named_gpio(np, "cd-gpios", 0);
+ pdata->gpio_card_ro =
+ of_get_named_gpio(np, "wp-gpios", 0);
+
+ /* pxa-mmc specific */
+ pdata->gpio_power =
+ of_get_named_gpio(np, "pxa-mmc,gpio-power", 0);
+
+ if (of_property_read_u32(np, "pxa-mmc,detect-delay-ms", &tmp) == 0)
+ pdata->detect_delay_ms = tmp;
+
+ pdev->dev.platform_data = pdata;
+
+ return 0;
+}
+#else
+static int __devinit pxamci_of_init(struct platform_device *pdev)
+{
+ return 0;
+}
+#endif
+
static int pxamci_probe(struct platform_device *pdev)
{
struct mmc_host *mmc;
@@ -580,6 +627,10 @@ static int pxamci_probe(struct platform_device *pdev)
struct resource *r, *dmarx, *dmatx;
int ret, irq, gpio_cd = -1, gpio_ro = -1, gpio_power = -1;
+ ret = pxamci_of_init(pdev);
+ if (ret)
+ return ret;
+
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
if (!r || irq < 0)
@@ -866,6 +917,7 @@ static struct platform_driver pxamci_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(pxa_mmc_dt_ids),
#ifdef CONFIG_PM
.pm = &pxamci_pm_ops,
#endif
diff --git a/drivers/mmc/host/sdhci-dove.c b/drivers/mmc/host/sdhci-dove.c
index a6e53a1..90140eb 100644
--- a/drivers/mmc/host/sdhci-dove.c
+++ b/drivers/mmc/host/sdhci-dove.c
@@ -24,6 +24,7 @@
#include <linux/err.h>
#include <linux/module.h>
#include <linux/mmc/host.h>
+#include <linux/of.h>
#include "sdhci-pltfm.h"
@@ -126,11 +127,18 @@ static int __devexit sdhci_dove_remove(struct platform_device *pdev)
return sdhci_pltfm_unregister(pdev);
}
+static const struct of_device_id sdhci_dove_of_match_table[] __devinitdata = {
+ { .compatible = "marvell,dove-sdhci", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sdhci_dove_of_match_table);
+
static struct platform_driver sdhci_dove_driver = {
.driver = {
.name = "sdhci-dove",
.owner = THIS_MODULE,
.pm = SDHCI_PLTFM_PMOPS,
+ .of_match_table = of_match_ptr(sdhci_dove_of_match_table),
},
.probe = sdhci_dove_probe,
.remove = __devexit_p(sdhci_dove_remove),
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index f8eb1fb..ae5fcbf 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -21,6 +21,32 @@
#include "sdhci-pltfm.h"
#include "sdhci-esdhc.h"
+#define VENDOR_V_22 0x12
+static u32 esdhc_readl(struct sdhci_host *host, int reg)
+{
+ u32 ret;
+
+ ret = in_be32(host->ioaddr + reg);
+ /*
+ * The bit of ADMA flag in eSDHC is not compatible with standard
+ * SDHC register, so set fake flag SDHCI_CAN_DO_ADMA2 when ADMA is
+ * supported by eSDHC.
+ * And for many FSL eSDHC controller, the reset value of field
+ * SDHCI_CAN_DO_ADMA1 is one, but some of them can't support ADMA,
+ * only these vendor version is greater than 2.2/0x12 support ADMA.
+ * For FSL eSDHC, must aligned 4-byte, so use 0xFC to read the
+ * the verdor version number, oxFE is SDHCI_HOST_VERSION.
+ */
+ if ((reg == SDHCI_CAPABILITIES) && (ret & SDHCI_CAN_DO_ADMA1)) {
+ u32 tmp = in_be32(host->ioaddr + SDHCI_SLOT_INT_STATUS);
+ tmp = (tmp & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT;
+ if (tmp > VENDOR_V_22)
+ ret |= SDHCI_CAN_DO_ADMA2;
+ }
+
+ return ret;
+}
+
static u16 esdhc_readw(struct sdhci_host *host, int reg)
{
u16 ret;
@@ -144,7 +170,7 @@ static void esdhc_of_resume(struct sdhci_host *host)
#endif
static struct sdhci_ops sdhci_esdhc_ops = {
- .read_l = sdhci_be32bs_readl,
+ .read_l = esdhc_readl,
.read_w = esdhc_readw,
.read_b = esdhc_readb,
.write_l = sdhci_be32bs_writel,
@@ -161,9 +187,13 @@ static struct sdhci_ops sdhci_esdhc_ops = {
};
static struct sdhci_pltfm_data sdhci_esdhc_pdata = {
- /* card detection could be handled via GPIO */
+ /*
+ * card detection could be handled via GPIO
+ * eSDHC cannot support End Attribute in NOP ADMA descriptor
+ */
.quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION
- | SDHCI_QUIRK_NO_CARD_NO_RESET,
+ | SDHCI_QUIRK_NO_CARD_NO_RESET
+ | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
.ops = &sdhci_esdhc_ops,
};
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index 9722d43..4bb74b0 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -1476,24 +1476,7 @@ static struct pci_driver sdhci_driver = {
},
};
-/*****************************************************************************\
- * *
- * Driver init/exit *
- * *
-\*****************************************************************************/
-
-static int __init sdhci_drv_init(void)
-{
- return pci_register_driver(&sdhci_driver);
-}
-
-static void __exit sdhci_drv_exit(void)
-{
- pci_unregister_driver(&sdhci_driver);
-}
-
-module_init(sdhci_drv_init);
-module_exit(sdhci_drv_exit);
+module_pci_driver(sdhci_driver);
MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
MODULE_DESCRIPTION("Secure Digital Host Controller Interface PCI driver");
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index d9a4ef4..65551a9 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -75,6 +75,9 @@ void sdhci_get_of_property(struct platform_device *pdev)
if (sdhci_of_wp_inverted(np))
host->quirks |= SDHCI_QUIRK_INVERTED_WRITE_PROTECT;
+ if (of_get_property(np, "broken-cd", NULL))
+ host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
+
if (of_device_is_compatible(np, "fsl,p2020-rev1-esdhc"))
host->quirks |= SDHCI_QUIRK_BROKEN_DMA;
diff --git a/drivers/mmc/host/sdhci-pxav2.c b/drivers/mmc/host/sdhci-pxav2.c
index b6ee885..8e63a9c 100644
--- a/drivers/mmc/host/sdhci-pxav2.c
+++ b/drivers/mmc/host/sdhci-pxav2.c
@@ -197,7 +197,7 @@ static int __devinit sdhci_pxav2_probe(struct platform_device *pdev)
goto err_clk_get;
}
pltfm_host->clk = clk;
- clk_enable(clk);
+ clk_prepare_enable(clk);
host->quirks = SDHCI_QUIRK_BROKEN_ADMA
| SDHCI_QUIRK_BROKEN_TIMEOUT_VAL
@@ -239,7 +239,7 @@ static int __devinit sdhci_pxav2_probe(struct platform_device *pdev)
return 0;
err_add_host:
- clk_disable(clk);
+ clk_disable_unprepare(clk);
clk_put(clk);
err_clk_get:
sdhci_pltfm_free(pdev);
@@ -255,7 +255,7 @@ static int __devexit sdhci_pxav2_remove(struct platform_device *pdev)
sdhci_remove_host(host, 1);
- clk_disable(pltfm_host->clk);
+ clk_disable_unprepare(pltfm_host->clk);
clk_put(pltfm_host->clk);
sdhci_pltfm_free(pdev);
kfree(pxa);
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index 07fe383..e918a2b 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -24,12 +24,14 @@
#include <linux/gpio.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
+#include <linux/mmc/slot-gpio.h>
#include <linux/platform_data/pxa_sdhci.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/of_gpio.h>
#include "sdhci.h"
#include "sdhci-pltfm.h"
@@ -182,6 +184,7 @@ static struct sdhci_pxa_platdata *pxav3_get_mmc_pdata(struct device *dev)
struct device_node *np = dev->of_node;
u32 bus_width;
u32 clk_delay_cycles;
+ enum of_gpio_flags gpio_flags;
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
@@ -198,6 +201,10 @@ static struct sdhci_pxa_platdata *pxav3_get_mmc_pdata(struct device *dev)
if (clk_delay_cycles > 0)
pdata->clk_delay_cycles = clk_delay_cycles;
+ pdata->ext_cd_gpio = of_get_named_gpio_flags(np, "cd-gpios", 0, &gpio_flags);
+ if (gpio_flags != OF_GPIO_ACTIVE_LOW)
+ pdata->host_caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
+
return pdata;
}
#else
@@ -231,14 +238,14 @@ static int __devinit sdhci_pxav3_probe(struct platform_device *pdev)
pltfm_host = sdhci_priv(host);
pltfm_host->priv = pxa;
- clk = clk_get(dev, "PXA-SDHCLK");
+ clk = clk_get(dev, NULL);
if (IS_ERR(clk)) {
dev_err(dev, "failed to get io clock\n");
ret = PTR_ERR(clk);
goto err_clk_get;
}
pltfm_host->clk = clk;
- clk_enable(clk);
+ clk_prepare_enable(clk);
host->quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL
| SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC
@@ -266,12 +273,25 @@ static int __devinit sdhci_pxav3_probe(struct platform_device *pdev)
host->quirks |= pdata->quirks;
if (pdata->host_caps)
host->mmc->caps |= pdata->host_caps;
+ if (pdata->host_caps2)
+ host->mmc->caps2 |= pdata->host_caps2;
if (pdata->pm_caps)
host->mmc->pm_caps |= pdata->pm_caps;
+
+ if (gpio_is_valid(pdata->ext_cd_gpio)) {
+ ret = mmc_gpio_request_cd(host->mmc, pdata->ext_cd_gpio);
+ if (ret) {
+ dev_err(mmc_dev(host->mmc),
+ "failed to allocate card detect gpio\n");
+ goto err_cd_req;
+ }
+ }
}
host->ops = &pxav3_sdhci_ops;
+ sdhci_get_of_property(pdev);
+
ret = sdhci_add_host(host);
if (ret) {
dev_err(&pdev->dev, "failed to add host\n");
@@ -283,8 +303,10 @@ static int __devinit sdhci_pxav3_probe(struct platform_device *pdev)
return 0;
err_add_host:
- clk_disable(clk);
+ clk_disable_unprepare(clk);
clk_put(clk);
+ mmc_gpio_free_cd(host->mmc);
+err_cd_req:
err_clk_get:
sdhci_pltfm_free(pdev);
kfree(pxa);
@@ -296,11 +318,16 @@ static int __devexit sdhci_pxav3_remove(struct platform_device *pdev)
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_pxa *pxa = pltfm_host->priv;
+ struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data;
sdhci_remove_host(host, 1);
- clk_disable(pltfm_host->clk);
+ clk_disable_unprepare(pltfm_host->clk);
clk_put(pltfm_host->clk);
+
+ if (gpio_is_valid(pdata->ext_cd_gpio))
+ mmc_gpio_free_cd(host->mmc);
+
sdhci_pltfm_free(pdev);
kfree(pxa);
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index a50c205..2903949 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -34,6 +34,9 @@
#define MAX_BUS_CLK (4)
+/* Number of gpio's used is max data bus width + command and clock lines */
+#define NUM_GPIOS(x) (x + 2)
+
/**
* struct sdhci_s3c - S3C SDHCI instance
* @host: The SDHCI host created
@@ -41,6 +44,7 @@
* @ioarea: The resource created when we claimed the IO area.
* @pdata: The platform data for this controller.
* @cur_clk: The index of the current bus clock.
+ * @gpios: List of gpio numbers parsed from device tree.
* @clk_io: The clock for the internal bus interface.
* @clk_bus: The clocks that are available for the SD/MMC bus clock.
*/
@@ -52,6 +56,7 @@ struct sdhci_s3c {
unsigned int cur_clk;
int ext_cd_irq;
int ext_cd_gpio;
+ int *gpios;
struct clk *clk_io;
struct clk *clk_bus[MAX_BUS_CLK];
@@ -166,7 +171,7 @@ static unsigned int sdhci_s3c_consider_clock(struct sdhci_s3c *ourhost,
dev_dbg(&ourhost->pdev->dev, "clk %d: rate %ld, want %d, got %ld\n",
src, rate, wanted, rate / div);
- return (wanted - (rate / div));
+ return wanted - (rate / div);
}
/**
@@ -203,10 +208,12 @@ static void sdhci_s3c_set_clock(struct sdhci_host *host, unsigned int clock)
best_src, clock, best);
/* select the new clock source */
-
if (ourhost->cur_clk != best_src) {
struct clk *clk = ourhost->clk_bus[best_src];
+ clk_enable(clk);
+ clk_disable(ourhost->clk_bus[ourhost->cur_clk]);
+
/* turn clock off to card before changing clock source */
writew(0, host->ioaddr + SDHCI_CLOCK_CONTROL);
@@ -288,6 +295,7 @@ static unsigned int sdhci_cmu_get_min_clock(struct sdhci_host *host)
static void sdhci_cmu_set_clock(struct sdhci_host *host, unsigned int clock)
{
struct sdhci_s3c *ourhost = to_s3c(host);
+ struct device *dev = &ourhost->pdev->dev;
unsigned long timeout;
u16 clk = 0;
@@ -309,8 +317,8 @@ static void sdhci_cmu_set_clock(struct sdhci_host *host, unsigned int clock)
while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
& SDHCI_CLOCK_INT_STABLE)) {
if (timeout == 0) {
- printk(KERN_ERR "%s: Internal clock never "
- "stabilised.\n", mmc_hostname(host->mmc));
+ dev_err(dev, "%s: Internal clock never stabilised.\n",
+ mmc_hostname(host->mmc));
return;
}
timeout--;
@@ -404,7 +412,9 @@ static void sdhci_s3c_setup_card_detect_gpio(struct sdhci_s3c *sc)
if (sc->ext_cd_irq &&
request_threaded_irq(sc->ext_cd_irq, NULL,
sdhci_s3c_gpio_card_detect_thread,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
dev_name(dev), sc) == 0) {
int status = gpio_get_value(sc->ext_cd_gpio);
if (pdata->ext_cd_gpio_invert)
@@ -419,9 +429,121 @@ static void sdhci_s3c_setup_card_detect_gpio(struct sdhci_s3c *sc)
}
}
+#ifdef CONFIG_OF
+static int __devinit sdhci_s3c_parse_dt(struct device *dev,
+ struct sdhci_host *host, struct s3c_sdhci_platdata *pdata)
+{
+ struct device_node *node = dev->of_node;
+ struct sdhci_s3c *ourhost = to_s3c(host);
+ u32 max_width;
+ int gpio, cnt, ret;
+
+ /* if the bus-width property is not specified, assume width as 1 */
+ if (of_property_read_u32(node, "bus-width", &max_width))
+ max_width = 1;
+ pdata->max_width = max_width;
+
+ ourhost->gpios = devm_kzalloc(dev, NUM_GPIOS(pdata->max_width) *
+ sizeof(int), GFP_KERNEL);
+ if (!ourhost->gpios)
+ return -ENOMEM;
+
+ /* get the card detection method */
+ if (of_get_property(node, "broken-cd", 0)) {
+ pdata->cd_type = S3C_SDHCI_CD_NONE;
+ goto setup_bus;
+ }
+
+ if (of_get_property(node, "non-removable", 0)) {
+ pdata->cd_type = S3C_SDHCI_CD_PERMANENT;
+ goto setup_bus;
+ }
+
+ gpio = of_get_named_gpio(node, "cd-gpios", 0);
+ if (gpio_is_valid(gpio)) {
+ pdata->cd_type = S3C_SDHCI_CD_GPIO;
+ goto found_cd;
+ } else if (gpio != -ENOENT) {
+ dev_err(dev, "invalid card detect gpio specified\n");
+ return -EINVAL;
+ }
+
+ gpio = of_get_named_gpio(node, "samsung,cd-pinmux-gpio", 0);
+ if (gpio_is_valid(gpio)) {
+ pdata->cd_type = S3C_SDHCI_CD_INTERNAL;
+ goto found_cd;
+ } else if (gpio != -ENOENT) {
+ dev_err(dev, "invalid card detect gpio specified\n");
+ return -EINVAL;
+ }
+
+ dev_info(dev, "assuming no card detect line available\n");
+ pdata->cd_type = S3C_SDHCI_CD_NONE;
+
+ found_cd:
+ if (pdata->cd_type == S3C_SDHCI_CD_GPIO) {
+ pdata->ext_cd_gpio = gpio;
+ ourhost->ext_cd_gpio = -1;
+ if (of_get_property(node, "cd-inverted", NULL))
+ pdata->ext_cd_gpio_invert = 1;
+ } else if (pdata->cd_type == S3C_SDHCI_CD_INTERNAL) {
+ ret = gpio_request(gpio, "sdhci-cd");
+ if (ret) {
+ dev_err(dev, "card detect gpio request failed\n");
+ return -EINVAL;
+ }
+ ourhost->ext_cd_gpio = gpio;
+ }
+
+ setup_bus:
+ /* get the gpios for command, clock and data lines */
+ for (cnt = 0; cnt < NUM_GPIOS(pdata->max_width); cnt++) {
+ gpio = of_get_gpio(node, cnt);
+ if (!gpio_is_valid(gpio)) {
+ dev_err(dev, "invalid gpio[%d]\n", cnt);
+ goto err_free_dt_cd_gpio;
+ }
+ ourhost->gpios[cnt] = gpio;
+ }
+
+ for (cnt = 0; cnt < NUM_GPIOS(pdata->max_width); cnt++) {
+ ret = gpio_request(ourhost->gpios[cnt], "sdhci-gpio");
+ if (ret) {
+ dev_err(dev, "gpio[%d] request failed\n", cnt);
+ goto err_free_dt_gpios;
+ }
+ }
+
+ return 0;
+
+ err_free_dt_gpios:
+ while (--cnt >= 0)
+ gpio_free(ourhost->gpios[cnt]);
+ err_free_dt_cd_gpio:
+ if (pdata->cd_type == S3C_SDHCI_CD_INTERNAL)
+ gpio_free(ourhost->ext_cd_gpio);
+ return -EINVAL;
+}
+#else
+static int __devinit sdhci_s3c_parse_dt(struct device *dev,
+ struct sdhci_host *host, struct s3c_sdhci_platdata *pdata)
+{
+ return -EINVAL;
+}
+#endif
+
+static const struct of_device_id sdhci_s3c_dt_match[];
+
static inline struct sdhci_s3c_drv_data *sdhci_s3c_get_driver_data(
struct platform_device *pdev)
{
+#ifdef CONFIG_OF
+ if (pdev->dev.of_node) {
+ const struct of_device_id *match;
+ match = of_match_node(sdhci_s3c_dt_match, pdev->dev.of_node);
+ return (struct sdhci_s3c_drv_data *)match->data;
+ }
+#endif
return (struct sdhci_s3c_drv_data *)
platform_get_device_id(pdev)->driver_data;
}
@@ -436,7 +558,7 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
struct resource *res;
int ret, irq, ptr, clks;
- if (!pdev->dev.platform_data) {
+ if (!pdev->dev.platform_data && !pdev->dev.of_node) {
dev_err(dev, "no device data specified\n");
return -ENOENT;
}
@@ -452,21 +574,28 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
dev_err(dev, "sdhci_alloc_host() failed\n");
return PTR_ERR(host);
}
+ sc = sdhci_priv(host);
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata) {
ret = -ENOMEM;
- goto err_io_clk;
+ goto err_pdata;
+ }
+
+ if (pdev->dev.of_node) {
+ ret = sdhci_s3c_parse_dt(&pdev->dev, host, pdata);
+ if (ret)
+ goto err_pdata;
+ } else {
+ memcpy(pdata, pdev->dev.platform_data, sizeof(*pdata));
+ sc->ext_cd_gpio = -1; /* invalid gpio number */
}
- memcpy(pdata, pdev->dev.platform_data, sizeof(*pdata));
drv_data = sdhci_s3c_get_driver_data(pdev);
- sc = sdhci_priv(host);
sc->host = host;
sc->pdev = pdev;
sc->pdata = pdata;
- sc->ext_cd_gpio = -1; /* invalid gpio number */
platform_set_drvdata(pdev, host);
@@ -486,9 +615,8 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
snprintf(name, 14, "mmc_busclk.%d", ptr);
clk = clk_get(dev, name);
- if (IS_ERR(clk)) {
+ if (IS_ERR(clk))
continue;
- }
clks++;
sc->clk_bus[ptr] = clk;
@@ -499,8 +627,6 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
*/
sc->cur_clk = ptr;
- clk_enable(clk);
-
dev_info(dev, "clock source %d: %s (%ld Hz)\n",
ptr, name, clk_get_rate(clk));
}
@@ -511,6 +637,10 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
goto err_no_busclks;
}
+#ifndef CONFIG_PM_RUNTIME
+ clk_enable(sc->clk_bus[sc->cur_clk]);
+#endif
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
host->ioaddr = devm_request_and_ioremap(&pdev->dev, res);
if (!host->ioaddr) {
@@ -616,12 +746,17 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
gpio_is_valid(pdata->ext_cd_gpio))
sdhci_s3c_setup_card_detect_gpio(sc);
+#ifdef CONFIG_PM_RUNTIME
+ clk_disable(sc->clk_io);
+#endif
return 0;
err_req_regs:
+#ifndef CONFIG_PM_RUNTIME
+ clk_disable(sc->clk_bus[sc->cur_clk]);
+#endif
for (ptr = 0; ptr < MAX_BUS_CLK; ptr++) {
if (sc->clk_bus[ptr]) {
- clk_disable(sc->clk_bus[ptr]);
clk_put(sc->clk_bus[ptr]);
}
}
@@ -631,6 +766,12 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
clk_put(sc->clk_io);
err_io_clk:
+ for (ptr = 0; ptr < NUM_GPIOS(sc->pdata->max_width); ptr++)
+ gpio_free(sc->gpios[ptr]);
+ if (pdata->cd_type == S3C_SDHCI_CD_INTERNAL)
+ gpio_free(sc->ext_cd_gpio);
+
+ err_pdata:
sdhci_free_host(host);
return ret;
@@ -638,9 +779,9 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
static int __devexit sdhci_s3c_remove(struct platform_device *pdev)
{
- struct s3c_sdhci_platdata *pdata = pdev->dev.platform_data;
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_s3c *sc = sdhci_priv(host);
+ struct s3c_sdhci_platdata *pdata = sc->pdata;
int ptr;
if (pdata->cd_type == S3C_SDHCI_CD_EXTERNAL && pdata->ext_cd_cleanup)
@@ -652,19 +793,30 @@ static int __devexit sdhci_s3c_remove(struct platform_device *pdev)
if (gpio_is_valid(sc->ext_cd_gpio))
gpio_free(sc->ext_cd_gpio);
+#ifdef CONFIG_PM_RUNTIME
+ clk_enable(sc->clk_io);
+#endif
sdhci_remove_host(host, 1);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- for (ptr = 0; ptr < 3; ptr++) {
+#ifndef CONFIG_PM_RUNTIME
+ clk_disable(sc->clk_bus[sc->cur_clk]);
+#endif
+ for (ptr = 0; ptr < MAX_BUS_CLK; ptr++) {
if (sc->clk_bus[ptr]) {
- clk_disable(sc->clk_bus[ptr]);
clk_put(sc->clk_bus[ptr]);
}
}
clk_disable(sc->clk_io);
clk_put(sc->clk_io);
+ if (pdev->dev.of_node) {
+ for (ptr = 0; ptr < NUM_GPIOS(sc->pdata->max_width); ptr++)
+ gpio_free(sc->gpios[ptr]);
+ }
+
sdhci_free_host(host);
platform_set_drvdata(pdev, NULL);
@@ -691,15 +843,28 @@ static int sdhci_s3c_resume(struct device *dev)
static int sdhci_s3c_runtime_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_s3c *ourhost = to_s3c(host);
+ struct clk *busclk = ourhost->clk_io;
+ int ret;
+
+ ret = sdhci_runtime_suspend_host(host);
- return sdhci_runtime_suspend_host(host);
+ clk_disable(ourhost->clk_bus[ourhost->cur_clk]);
+ clk_disable(busclk);
+ return ret;
}
static int sdhci_s3c_runtime_resume(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_s3c *ourhost = to_s3c(host);
+ struct clk *busclk = ourhost->clk_io;
+ int ret;
- return sdhci_runtime_resume_host(host);
+ clk_enable(busclk);
+ clk_enable(ourhost->clk_bus[ourhost->cur_clk]);
+ ret = sdhci_runtime_resume_host(host);
+ return ret;
}
#endif
@@ -737,6 +902,16 @@ static struct platform_device_id sdhci_s3c_driver_ids[] = {
};
MODULE_DEVICE_TABLE(platform, sdhci_s3c_driver_ids);
+#ifdef CONFIG_OF
+static const struct of_device_id sdhci_s3c_dt_match[] = {
+ { .compatible = "samsung,s3c6410-sdhci", },
+ { .compatible = "samsung,exynos4210-sdhci",
+ .data = (void *)EXYNOS4_SDHCI_DRV_DATA },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sdhci_s3c_dt_match);
+#endif
+
static struct platform_driver sdhci_s3c_driver = {
.probe = sdhci_s3c_probe,
.remove = __devexit_p(sdhci_s3c_remove),
@@ -744,6 +919,7 @@ static struct platform_driver sdhci_s3c_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "s3c-sdhci",
+ .of_match_table = of_match_ptr(sdhci_s3c_dt_match),
.pm = SDHCI_S3C_PMOPS,
},
};
diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c
index 423da81..6be89c0 100644
--- a/drivers/mmc/host/sdhci-spear.c
+++ b/drivers/mmc/host/sdhci-spear.c
@@ -20,6 +20,8 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/slab.h>
@@ -68,8 +70,42 @@ static irqreturn_t sdhci_gpio_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
+#ifdef CONFIG_OF
+static struct sdhci_plat_data * __devinit
+sdhci_probe_config_dt(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct sdhci_plat_data *pdata = NULL;
+ int cd_gpio;
+
+ cd_gpio = of_get_named_gpio(np, "cd-gpios", 0);
+ if (!gpio_is_valid(cd_gpio))
+ cd_gpio = -1;
+
+ /* If pdata is required */
+ if (cd_gpio != -1) {
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ dev_err(&pdev->dev, "DT: kzalloc failed\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+
+ pdata->card_int_gpio = cd_gpio;
+
+ return pdata;
+}
+#else
+static struct sdhci_plat_data * __devinit
+sdhci_probe_config_dt(struct platform_device *pdev)
+{
+ return ERR_PTR(-ENOSYS);
+}
+#endif
+
static int __devinit sdhci_probe(struct platform_device *pdev)
{
+ struct device_node *np = pdev->dev.of_node;
struct sdhci_host *host;
struct resource *iomem;
struct spear_sdhci *sdhci;
@@ -104,14 +140,22 @@ static int __devinit sdhci_probe(struct platform_device *pdev)
goto err;
}
- ret = clk_enable(sdhci->clk);
+ ret = clk_prepare_enable(sdhci->clk);
if (ret) {
dev_dbg(&pdev->dev, "Error enabling clock\n");
goto put_clk;
}
- /* overwrite platform_data */
- sdhci->data = dev_get_platdata(&pdev->dev);
+ if (np) {
+ sdhci->data = sdhci_probe_config_dt(pdev);
+ if (IS_ERR(sdhci->data)) {
+ dev_err(&pdev->dev, "DT: Failed to get pdata\n");
+ return -ENODEV;
+ }
+ } else {
+ sdhci->data = dev_get_platdata(&pdev->dev);
+ }
+
pdev->dev.platform_data = sdhci;
if (pdev->dev.parent)
@@ -216,7 +260,7 @@ set_drvdata:
free_host:
sdhci_free_host(host);
disable_clk:
- clk_disable(sdhci->clk);
+ clk_disable_unprepare(sdhci->clk);
put_clk:
clk_put(sdhci->clk);
err:
@@ -238,7 +282,7 @@ static int __devexit sdhci_remove(struct platform_device *pdev)
sdhci_remove_host(host, dead);
sdhci_free_host(host);
- clk_disable(sdhci->clk);
+ clk_disable_unprepare(sdhci->clk);
clk_put(sdhci->clk);
return 0;
@@ -253,7 +297,7 @@ static int sdhci_suspend(struct device *dev)
ret = sdhci_suspend_host(host);
if (!ret)
- clk_disable(sdhci->clk);
+ clk_disable_unprepare(sdhci->clk);
return ret;
}
@@ -264,7 +308,7 @@ static int sdhci_resume(struct device *dev)
struct spear_sdhci *sdhci = dev_get_platdata(dev);
int ret;
- ret = clk_enable(sdhci->clk);
+ ret = clk_prepare_enable(sdhci->clk);
if (ret) {
dev_dbg(dev, "Resume: Error enabling clock\n");
return ret;
@@ -276,11 +320,20 @@ static int sdhci_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(sdhci_pm_ops, sdhci_suspend, sdhci_resume);
+#ifdef CONFIG_OF
+static const struct of_device_id sdhci_spear_id_table[] = {
+ { .compatible = "st,spear300-sdhci" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sdhci_spear_id_table);
+#endif
+
static struct platform_driver sdhci_driver = {
.driver = {
.name = "sdhci",
.owner = THIS_MODULE,
.pm = &sdhci_pm_ops,
+ .of_match_table = of_match_ptr(sdhci_spear_id_table),
},
.probe = sdhci_probe,
.remove = __devexit_p(sdhci_remove),
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index d43e746..84e8d0c 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -257,10 +257,9 @@ static int __devinit sdhci_tegra_probe(struct platform_device *pdev)
int rc;
match = of_match_device(sdhci_tegra_dt_match, &pdev->dev);
- if (match)
- soc_data = match->data;
- else
- soc_data = &soc_data_tegra20;
+ if (!match)
+ return -EINVAL;
+ soc_data = match->data;
host = sdhci_pltfm_init(pdev, soc_data->pdata);
if (IS_ERR(host))
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 9a11dc3..7922adb 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -28,6 +28,7 @@
#include <linux/mmc/mmc.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
+#include <linux/mmc/slot-gpio.h>
#include "sdhci.h"
@@ -1293,6 +1294,13 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
SDHCI_CARD_PRESENT;
+ /* If we're using a cd-gpio, testing the presence bit might fail. */
+ if (!present) {
+ int ret = mmc_gpio_get_cd(host->mmc);
+ if (ret > 0)
+ present = true;
+ }
+
if (!present || host->flags & SDHCI_DEVICE_DEAD) {
host->mrq->cmd->error = -ENOMEDIUM;
tasklet_schedule(&host->finish_tasklet);
@@ -1597,57 +1605,65 @@ static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
spin_unlock_irqrestore(&host->lock, flags);
}
-static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host,
- struct mmc_ios *ios)
+static int sdhci_do_3_3v_signal_voltage_switch(struct sdhci_host *host,
+ u16 ctrl)
{
- u8 pwr;
- u16 clk, ctrl;
- u32 present_state;
+ int ret;
- /*
- * Signal Voltage Switching is only applicable for Host Controllers
- * v3.00 and above.
- */
- if (host->version < SDHCI_SPEC_300)
- return 0;
+ /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
+ ctrl &= ~SDHCI_CTRL_VDD_180;
+ sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
- /*
- * We first check whether the request is to set signalling voltage
- * to 3.3V. If so, we change the voltage to 3.3V and return quickly.
- */
+ if (host->vqmmc) {
+ ret = regulator_set_voltage(host->vqmmc, 3300000, 3300000);
+ if (ret) {
+ pr_warning("%s: Switching to 3.3V signalling voltage "
+ " failed\n", mmc_hostname(host->mmc));
+ return -EIO;
+ }
+ }
+ /* Wait for 5ms */
+ usleep_range(5000, 5500);
+
+ /* 3.3V regulator output should be stable within 5 ms */
ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
- if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
- /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
- ctrl &= ~SDHCI_CTRL_VDD_180;
- sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
+ if (!(ctrl & SDHCI_CTRL_VDD_180))
+ return 0;
- /* Wait for 5ms */
- usleep_range(5000, 5500);
+ pr_warning("%s: 3.3V regulator output did not became stable\n",
+ mmc_hostname(host->mmc));
- /* 3.3V regulator output should be stable within 5 ms */
- ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
- if (!(ctrl & SDHCI_CTRL_VDD_180))
- return 0;
- else {
- pr_info(DRIVER_NAME ": Switching to 3.3V "
- "signalling voltage failed\n");
- return -EIO;
- }
- } else if (!(ctrl & SDHCI_CTRL_VDD_180) &&
- (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180)) {
- /* Stop SDCLK */
- clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
- clk &= ~SDHCI_CLOCK_CARD_EN;
- sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+ return -EIO;
+}
- /* Check whether DAT[3:0] is 0000 */
- present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
- if (!((present_state & SDHCI_DATA_LVL_MASK) >>
- SDHCI_DATA_LVL_SHIFT)) {
- /*
- * Enable 1.8V Signal Enable in the Host Control2
- * register
- */
+static int sdhci_do_1_8v_signal_voltage_switch(struct sdhci_host *host,
+ u16 ctrl)
+{
+ u8 pwr;
+ u16 clk;
+ u32 present_state;
+ int ret;
+
+ /* Stop SDCLK */
+ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ clk &= ~SDHCI_CLOCK_CARD_EN;
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+
+ /* Check whether DAT[3:0] is 0000 */
+ present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
+ if (!((present_state & SDHCI_DATA_LVL_MASK) >>
+ SDHCI_DATA_LVL_SHIFT)) {
+ /*
+ * Enable 1.8V Signal Enable in the Host Control2
+ * register
+ */
+ if (host->vqmmc)
+ ret = regulator_set_voltage(host->vqmmc,
+ 1800000, 1800000);
+ else
+ ret = 0;
+
+ if (!ret) {
ctrl |= SDHCI_CTRL_VDD_180;
sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
@@ -1656,7 +1672,7 @@ static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host,
ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
if (ctrl & SDHCI_CTRL_VDD_180) {
- /* Provide SDCLK again and wait for 1ms*/
+ /* Provide SDCLK again and wait for 1ms */
clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
clk |= SDHCI_CLOCK_CARD_EN;
sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
@@ -1673,29 +1689,55 @@ static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host,
return 0;
}
}
+ }
- /*
- * If we are here, that means the switch to 1.8V signaling
- * failed. We power cycle the card, and retry initialization
- * sequence by setting S18R to 0.
- */
- pwr = sdhci_readb(host, SDHCI_POWER_CONTROL);
- pwr &= ~SDHCI_POWER_ON;
- sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
- if (host->vmmc)
- regulator_disable(host->vmmc);
+ /*
+ * If we are here, that means the switch to 1.8V signaling
+ * failed. We power cycle the card, and retry initialization
+ * sequence by setting S18R to 0.
+ */
+ pwr = sdhci_readb(host, SDHCI_POWER_CONTROL);
+ pwr &= ~SDHCI_POWER_ON;
+ sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
+ if (host->vmmc)
+ regulator_disable(host->vmmc);
- /* Wait for 1ms as per the spec */
- usleep_range(1000, 1500);
- pwr |= SDHCI_POWER_ON;
- sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
- if (host->vmmc)
- regulator_enable(host->vmmc);
+ /* Wait for 1ms as per the spec */
+ usleep_range(1000, 1500);
+ pwr |= SDHCI_POWER_ON;
+ sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
+ if (host->vmmc)
+ regulator_enable(host->vmmc);
- pr_info(DRIVER_NAME ": Switching to 1.8V signalling "
- "voltage failed, retrying with S18R set to 0\n");
- return -EAGAIN;
- } else
+ pr_warning("%s: Switching to 1.8V signalling voltage failed, "
+ "retrying with S18R set to 0\n", mmc_hostname(host->mmc));
+
+ return -EAGAIN;
+}
+
+static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host,
+ struct mmc_ios *ios)
+{
+ u16 ctrl;
+
+ /*
+ * Signal Voltage Switching is only applicable for Host Controllers
+ * v3.00 and above.
+ */
+ if (host->version < SDHCI_SPEC_300)
+ return 0;
+
+ /*
+ * We first check whether the request is to set signalling voltage
+ * to 3.3V. If so, we change the voltage to 3.3V and return quickly.
+ */
+ ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+ if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330)
+ return sdhci_do_3_3v_signal_voltage_switch(host, ctrl);
+ else if (!(ctrl & SDHCI_CTRL_VDD_180) &&
+ (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180))
+ return sdhci_do_1_8v_signal_voltage_switch(host, ctrl);
+ else
/* No signal voltage switch required */
return 0;
}
@@ -2802,6 +2844,18 @@ int sdhci_add_host(struct sdhci_host *host)
!(host->mmc->caps & MMC_CAP_NONREMOVABLE))
mmc->caps |= MMC_CAP_NEEDS_POLL;
+ /* If vqmmc regulator and no 1.8V signalling, then there's no UHS */
+ host->vqmmc = regulator_get(mmc_dev(mmc), "vqmmc");
+ if (IS_ERR(host->vqmmc)) {
+ pr_info("%s: no vqmmc regulator found\n", mmc_hostname(mmc));
+ host->vqmmc = NULL;
+ }
+ else if (regulator_is_supported_voltage(host->vqmmc, 1800000, 1800000))
+ regulator_enable(host->vqmmc);
+ else
+ caps[1] &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
+ SDHCI_SUPPORT_DDR50);
+
/* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
if (caps[1] & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
SDHCI_SUPPORT_DDR50))
@@ -2832,15 +2886,6 @@ int sdhci_add_host(struct sdhci_host *host)
if (caps[1] & SDHCI_DRIVER_TYPE_D)
mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
- /*
- * If Power Off Notify capability is enabled by the host,
- * set notify to short power off notify timeout value.
- */
- if (mmc->caps2 & MMC_CAP2_POWEROFF_NOTIFY)
- mmc->power_notify_type = MMC_HOST_PW_NOTIFY_SHORT;
- else
- mmc->power_notify_type = MMC_HOST_PW_NOTIFY_NONE;
-
/* Initial value for re-tuning timer count */
host->tuning_count = (caps[1] & SDHCI_RETUNING_TIMER_COUNT_MASK) >>
SDHCI_RETUNING_TIMER_COUNT_SHIFT;
@@ -2862,7 +2907,8 @@ int sdhci_add_host(struct sdhci_host *host)
if (IS_ERR(host->vmmc)) {
pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc));
host->vmmc = NULL;
- }
+ } else
+ regulator_enable(host->vmmc);
#ifdef CONFIG_REGULATOR
if (host->vmmc) {
@@ -3119,8 +3165,15 @@ void sdhci_remove_host(struct sdhci_host *host, int dead)
tasklet_kill(&host->card_tasklet);
tasklet_kill(&host->finish_tasklet);
- if (host->vmmc)
+ if (host->vmmc) {
+ regulator_disable(host->vmmc);
regulator_put(host->vmmc);
+ }
+
+ if (host->vqmmc) {
+ regulator_disable(host->vqmmc);
+ regulator_put(host->vqmmc);
+ }
kfree(host->adma_desc);
kfree(host->align_buffer);
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 5d81427..11d2bc3 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -1213,7 +1213,9 @@ static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFRE);
sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
} else if (state & INT_DTRANE) {
- sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_DTRANE);
+ sh_mmcif_writel(host->addr, MMCIF_CE_INT,
+ ~(INT_CMD12DRE | INT_CMD12RBE |
+ INT_CMD12CRE | INT_DTRANE));
sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
} else if (state & INT_CMD12RBE) {
sh_mmcif_writel(host->addr, MMCIF_CE_INT,
@@ -1229,6 +1231,10 @@ static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
host->sd_error = true;
dev_dbg(&host->pd->dev, "int err state = %08x\n", state);
}
+ if (host->state == STATE_IDLE) {
+ dev_info(&host->pd->dev, "Spurious IRQ status 0x%x", state);
+ return IRQ_HANDLED;
+ }
if (state & ~(INT_CMD12RBE | INT_CMD12CRE)) {
if (!host->dma_active)
return IRQ_WAKE_THREAD;
diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c
index 4b83c43..f18bece 100644
--- a/drivers/mmc/host/via-sdmmc.c
+++ b/drivers/mmc/host/via-sdmmc.c
@@ -1337,21 +1337,7 @@ static struct pci_driver via_sd_driver = {
.resume = via_sd_resume,
};
-static int __init via_sd_drv_init(void)
-{
- pr_info(DRV_NAME ": VIA SD/MMC Card Reader driver "
- "(C) 2008 VIA Technologies, Inc.\n");
-
- return pci_register_driver(&via_sd_driver);
-}
-
-static void __exit via_sd_drv_exit(void)
-{
- pci_unregister_driver(&via_sd_driver);
-}
-
-module_init(via_sd_drv_init);
-module_exit(via_sd_drv_exit);
+module_pci_driver(via_sd_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("VIA Technologies Inc.");
diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
index 58eab9a..d5655a6 100644
--- a/drivers/mmc/host/vub300.c
+++ b/drivers/mmc/host/vub300.c
@@ -2358,9 +2358,9 @@ error5:
* which is contained at the end of struct mmc
*/
error4:
- usb_free_urb(command_out_urb);
-error1:
usb_free_urb(command_res_urb);
+error1:
+ usb_free_urb(command_out_urb);
error0:
return retval;
}
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 27143e0..73fcbbe 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -148,6 +148,13 @@ config MTD_BCM63XX_PARTS
This provides partions parsing for BCM63xx devices with CFE
bootloaders.
+config MTD_BCM47XX_PARTS
+ tristate "BCM47XX partitioning support"
+ depends on BCM47XX
+ help
+ This provides partitions parser for devices based on BCM47xx
+ boards.
+
comment "User Modules And Translation Layers"
config MTD_CHAR
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
index f901354..18a38e5 100644
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o
obj-$(CONFIG_MTD_AFS_PARTS) += afs.o
obj-$(CONFIG_MTD_AR7_PARTS) += ar7part.o
obj-$(CONFIG_MTD_BCM63XX_PARTS) += bcm63xxpart.o
+obj-$(CONFIG_MTD_BCM47XX_PARTS) += bcm47xxpart.o
# 'Users' - code which presents functionality to userspace.
obj-$(CONFIG_MTD_CHAR) += mtdchar.o
diff --git a/drivers/mtd/bcm47xxpart.c b/drivers/mtd/bcm47xxpart.c
new file mode 100644
index 0000000..e06d782
--- /dev/null
+++ b/drivers/mtd/bcm47xxpart.c
@@ -0,0 +1,202 @@
+/*
+ * BCM47XX MTD partitioning
+ *
+ * Copyright © 2012 Rafał Miłecki <zajec5@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <asm/mach-bcm47xx/nvram.h>
+
+/* 10 parts were found on sflash on Netgear WNDR4500 */
+#define BCM47XXPART_MAX_PARTS 12
+
+/*
+ * Amount of bytes we read when analyzing each block of flash memory.
+ * Set it big enough to allow detecting partition and reading important data.
+ */
+#define BCM47XXPART_BYTES_TO_READ 0x404
+
+/* Magics */
+#define BOARD_DATA_MAGIC 0x5246504D /* MPFR */
+#define POT_MAGIC1 0x54544f50 /* POTT */
+#define POT_MAGIC2 0x504f /* OP */
+#define ML_MAGIC1 0x39685a42
+#define ML_MAGIC2 0x26594131
+#define TRX_MAGIC 0x30524448
+
+struct trx_header {
+ uint32_t magic;
+ uint32_t length;
+ uint32_t crc32;
+ uint16_t flags;
+ uint16_t version;
+ uint32_t offset[3];
+} __packed;
+
+static void bcm47xxpart_add_part(struct mtd_partition *part, char *name,
+ u64 offset, uint32_t mask_flags)
+{
+ part->name = name;
+ part->offset = offset;
+ part->mask_flags = mask_flags;
+}
+
+static int bcm47xxpart_parse(struct mtd_info *master,
+ struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
+{
+ struct mtd_partition *parts;
+ uint8_t i, curr_part = 0;
+ uint32_t *buf;
+ size_t bytes_read;
+ uint32_t offset;
+ uint32_t blocksize = 0x10000;
+ struct trx_header *trx;
+
+ /* Alloc */
+ parts = kzalloc(sizeof(struct mtd_partition) * BCM47XXPART_MAX_PARTS,
+ GFP_KERNEL);
+ buf = kzalloc(BCM47XXPART_BYTES_TO_READ, GFP_KERNEL);
+
+ /* Parse block by block looking for magics */
+ for (offset = 0; offset <= master->size - blocksize;
+ offset += blocksize) {
+ /* Nothing more in higher memory */
+ if (offset >= 0x2000000)
+ break;
+
+ if (curr_part > BCM47XXPART_MAX_PARTS) {
+ pr_warn("Reached maximum number of partitions, scanning stopped!\n");
+ break;
+ }
+
+ /* Read beginning of the block */
+ if (mtd_read(master, offset, BCM47XXPART_BYTES_TO_READ,
+ &bytes_read, (uint8_t *)buf) < 0) {
+ pr_err("mtd_read error while parsing (offset: 0x%X)!\n",
+ offset);
+ continue;
+ }
+
+ /* CFE has small NVRAM at 0x400 */
+ if (buf[0x400 / 4] == NVRAM_HEADER) {
+ bcm47xxpart_add_part(&parts[curr_part++], "boot",
+ offset, MTD_WRITEABLE);
+ continue;
+ }
+
+ /* Standard NVRAM */
+ if (buf[0x000 / 4] == NVRAM_HEADER) {
+ bcm47xxpart_add_part(&parts[curr_part++], "nvram",
+ offset, 0);
+ continue;
+ }
+
+ /*
+ * board_data starts with board_id which differs across boards,
+ * but we can use 'MPFR' (hopefully) magic at 0x100
+ */
+ if (buf[0x100 / 4] == BOARD_DATA_MAGIC) {
+ bcm47xxpart_add_part(&parts[curr_part++], "board_data",
+ offset, MTD_WRITEABLE);
+ continue;
+ }
+
+ /* POT(TOP) */
+ if (buf[0x000 / 4] == POT_MAGIC1 &&
+ (buf[0x004 / 4] & 0xFFFF) == POT_MAGIC2) {
+ bcm47xxpart_add_part(&parts[curr_part++], "POT", offset,
+ MTD_WRITEABLE);
+ continue;
+ }
+
+ /* ML */
+ if (buf[0x010 / 4] == ML_MAGIC1 &&
+ buf[0x014 / 4] == ML_MAGIC2) {
+ bcm47xxpart_add_part(&parts[curr_part++], "ML", offset,
+ MTD_WRITEABLE);
+ continue;
+ }
+
+ /* TRX */
+ if (buf[0x000 / 4] == TRX_MAGIC) {
+ trx = (struct trx_header *)buf;
+
+ i = 0;
+ /* We have LZMA loader if offset[2] points to sth */
+ if (trx->offset[2]) {
+ bcm47xxpart_add_part(&parts[curr_part++],
+ "loader",
+ offset + trx->offset[i],
+ 0);
+ i++;
+ }
+
+ bcm47xxpart_add_part(&parts[curr_part++], "linux",
+ offset + trx->offset[i], 0);
+ i++;
+
+ /*
+ * Pure rootfs size is known and can be calculated as:
+ * trx->length - trx->offset[i]. We don't fill it as
+ * we want to have jffs2 (overlay) in the same mtd.
+ */
+ bcm47xxpart_add_part(&parts[curr_part++], "rootfs",
+ offset + trx->offset[i], 0);
+ i++;
+
+ /*
+ * We have whole TRX scanned, skip to the next part. Use
+ * roundown (not roundup), as the loop will increase
+ * offset in next step.
+ */
+ offset = rounddown(offset + trx->length, blocksize);
+ continue;
+ }
+ }
+ kfree(buf);
+
+ /*
+ * Assume that partitions end at the beginning of the one they are
+ * followed by.
+ */
+ for (i = 0; i < curr_part - 1; i++)
+ parts[i].size = parts[i + 1].offset - parts[i].offset;
+ if (curr_part > 0)
+ parts[curr_part - 1].size =
+ master->size - parts[curr_part - 1].offset;
+
+ *pparts = parts;
+ return curr_part;
+};
+
+static struct mtd_part_parser bcm47xxpart_mtd_parser = {
+ .owner = THIS_MODULE,
+ .parse_fn = bcm47xxpart_parse,
+ .name = "bcm47xxpart",
+};
+
+static int __init bcm47xxpart_init(void)
+{
+ return register_mtd_parser(&bcm47xxpart_mtd_parser);
+}
+
+static void __exit bcm47xxpart_exit(void)
+{
+ deregister_mtd_parser(&bcm47xxpart_mtd_parser);
+}
+
+module_init(bcm47xxpart_init);
+module_exit(bcm47xxpart_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MTD partitioning for BCM47XX flash memories");
diff --git a/drivers/mtd/chips/Kconfig b/drivers/mtd/chips/Kconfig
index b1e3c26..e469b01 100644
--- a/drivers/mtd/chips/Kconfig
+++ b/drivers/mtd/chips/Kconfig
@@ -43,9 +43,6 @@ choice
prompt "Flash cmd/query data swapping"
depends on MTD_CFI_ADV_OPTIONS
default MTD_CFI_NOSWAP
-
-config MTD_CFI_NOSWAP
- bool "NO"
---help---
This option defines the way in which the CPU attempts to arrange
data bits when writing the 'magic' commands to the chips. Saying
@@ -55,12 +52,8 @@ config MTD_CFI_NOSWAP
Specific arrangements are possible with the BIG_ENDIAN_BYTE and
LITTLE_ENDIAN_BYTE, if the bytes are reversed.
- If you have a LART, on which the data (and address) lines were
- connected in a fashion which ensured that the nets were as short
- as possible, resulting in a bit-shuffling which seems utterly
- random to the untrained eye, you need the LART_ENDIAN_BYTE option.
-
- Yes, there really exists something sicker than PDP-endian :)
+config MTD_CFI_NOSWAP
+ bool "NO"
config MTD_CFI_BE_BYTE_SWAP
bool "BIG_ENDIAN_BYTE"
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index dbbd2ed..7751443 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -2043,7 +2043,7 @@ static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip
{
struct cfi_private *cfi = map->fldrv_priv;
struct cfi_pri_intelext *extp = cfi->cmdset_priv;
- int udelay;
+ int mdelay;
int ret;
adr += chip->start;
@@ -2072,9 +2072,17 @@ static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip
* If Instant Individual Block Locking supported then no need
* to delay.
*/
- udelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1000000/HZ : 0;
+ /*
+ * Unlocking may take up to 1.4 seconds on some Intel flashes. So
+ * lets use a max of 1.5 seconds (1500ms) as timeout.
+ *
+ * See "Clear Block Lock-Bits Time" on page 40 in
+ * "3 Volt Intel StrataFlash Memory" 28F128J3,28F640J3,28F320J3 manual
+ * from February 2003
+ */
+ mdelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1500 : 0;
- ret = WAIT_TIMEOUT(map, chip, adr, udelay, udelay * 100);
+ ret = WAIT_TIMEOUT(map, chip, adr, mdelay, mdelay * 1000);
if (ret) {
map_write(map, CMD(0x70), adr);
chip->state = FL_STATUS;
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 22d0493..5ff5c4a 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -431,6 +431,68 @@ static void cfi_fixup_major_minor(struct cfi_private *cfi,
}
}
+static int is_m29ew(struct cfi_private *cfi)
+{
+ if (cfi->mfr == CFI_MFR_INTEL &&
+ ((cfi->device_type == CFI_DEVICETYPE_X8 && (cfi->id & 0xff) == 0x7e) ||
+ (cfi->device_type == CFI_DEVICETYPE_X16 && cfi->id == 0x227e)))
+ return 1;
+ return 0;
+}
+
+/*
+ * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 20:
+ * Some revisions of the M29EW suffer from erase suspend hang ups. In
+ * particular, it can occur when the sequence
+ * Erase Confirm -> Suspend -> Program -> Resume
+ * causes a lockup due to internal timing issues. The consequence is that the
+ * erase cannot be resumed without inserting a dummy command after programming
+ * and prior to resuming. [...] The work-around is to issue a dummy write cycle
+ * that writes an F0 command code before the RESUME command.
+ */
+static void cfi_fixup_m29ew_erase_suspend(struct map_info *map,
+ unsigned long adr)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ /* before resume, insert a dummy 0xF0 cycle for Micron M29EW devices */
+ if (is_m29ew(cfi))
+ map_write(map, CMD(0xF0), adr);
+}
+
+/*
+ * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 22:
+ *
+ * Some revisions of the M29EW (for example, A1 and A2 step revisions)
+ * are affected by a problem that could cause a hang up when an ERASE SUSPEND
+ * command is issued after an ERASE RESUME operation without waiting for a
+ * minimum delay. The result is that once the ERASE seems to be completed
+ * (no bits are toggling), the contents of the Flash memory block on which
+ * the erase was ongoing could be inconsistent with the expected values
+ * (typically, the array value is stuck to the 0xC0, 0xC4, 0x80, or 0x84
+ * values), causing a consequent failure of the ERASE operation.
+ * The occurrence of this issue could be high, especially when file system
+ * operations on the Flash are intensive. As a result, it is recommended
+ * that a patch be applied. Intensive file system operations can cause many
+ * calls to the garbage routine to free Flash space (also by erasing physical
+ * Flash blocks) and as a result, many consecutive SUSPEND and RESUME
+ * commands can occur. The problem disappears when a delay is inserted after
+ * the RESUME command by using the udelay() function available in Linux.
+ * The DELAY value must be tuned based on the customer's platform.
+ * The maximum value that fixes the problem in all cases is 500us.
+ * But, in our experience, a delay of 30 µs to 50 µs is sufficient
+ * in most cases.
+ * We have chosen 500µs because this latency is acceptable.
+ */
+static void cfi_fixup_m29ew_delay_after_resume(struct cfi_private *cfi)
+{
+ /*
+ * Resolving the Delay After Resume Issue see Micron TN-13-07
+ * Worst case delay must be 500µs but 30-50µs should be ok as well
+ */
+ if (is_m29ew(cfi))
+ cfi_udelay(500);
+}
+
struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
{
struct cfi_private *cfi = map->fldrv_priv;
@@ -776,7 +838,10 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
switch(chip->oldstate) {
case FL_ERASING:
+ cfi_fixup_m29ew_erase_suspend(map,
+ chip->in_progress_block_addr);
map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr);
+ cfi_fixup_m29ew_delay_after_resume(cfi);
chip->oldstate = FL_READY;
chip->state = FL_ERASING;
break;
@@ -916,6 +981,8 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
/* Disallow XIP again */
local_irq_disable();
+ /* Correct Erase Suspend Hangups for M29EW */
+ cfi_fixup_m29ew_erase_suspend(map, adr);
/* Resume the write or erase operation */
map_write(map, cfi->sector_erase_cmd, adr);
chip->state = oldstate;
diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c
index 4558e0f..aed1b8a 100644
--- a/drivers/mtd/cmdlinepart.c
+++ b/drivers/mtd/cmdlinepart.c
@@ -39,11 +39,10 @@
#include <linux/kernel.h>
#include <linux/slab.h>
-
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
-#include <linux/bootmem.h>
#include <linux/module.h>
+#include <linux/err.h>
/* error message prefix */
#define ERRP "mtd: "
@@ -72,7 +71,7 @@ static struct cmdline_mtd_partition *partitions;
/* the command line passed to mtdpart_setup() */
static char *cmdline;
-static int cmdline_parsed = 0;
+static int cmdline_parsed;
/*
* Parse one partition definition for an MTD. Since there can be many
@@ -83,15 +82,14 @@ static int cmdline_parsed = 0;
* syntax has been verified ok.
*/
static struct mtd_partition * newpart(char *s,
- char **retptr,
- int *num_parts,
- int this_part,
- unsigned char **extra_mem_ptr,
- int extra_mem_size)
+ char **retptr,
+ int *num_parts,
+ int this_part,
+ unsigned char **extra_mem_ptr,
+ int extra_mem_size)
{
struct mtd_partition *parts;
- unsigned long size;
- unsigned long offset = OFFSET_CONTINUOUS;
+ unsigned long size, offset = OFFSET_CONTINUOUS;
char *name;
int name_len;
unsigned char *extra_mem;
@@ -99,124 +97,106 @@ static struct mtd_partition * newpart(char *s,
unsigned int mask_flags;
/* fetch the partition size */
- if (*s == '-')
- { /* assign all remaining space to this partition */
+ if (*s == '-') {
+ /* assign all remaining space to this partition */
size = SIZE_REMAINING;
s++;
- }
- else
- {
+ } else {
size = memparse(s, &s);
- if (size < PAGE_SIZE)
- {
+ if (size < PAGE_SIZE) {
printk(KERN_ERR ERRP "partition size too small (%lx)\n", size);
- return NULL;
+ return ERR_PTR(-EINVAL);
}
}
/* fetch partition name and flags */
mask_flags = 0; /* this is going to be a regular partition */
delim = 0;
- /* check for offset */
- if (*s == '@')
- {
- s++;
- offset = memparse(s, &s);
- }
- /* now look for name */
+
+ /* check for offset */
+ if (*s == '@') {
+ s++;
+ offset = memparse(s, &s);
+ }
+
+ /* now look for name */
if (*s == '(')
- {
delim = ')';
- }
- if (delim)
- {
+ if (delim) {
char *p;
- name = ++s;
+ name = ++s;
p = strchr(name, delim);
- if (!p)
- {
+ if (!p) {
printk(KERN_ERR ERRP "no closing %c found in partition name\n", delim);
- return NULL;
+ return ERR_PTR(-EINVAL);
}
name_len = p - name;
s = p + 1;
- }
- else
- {
- name = NULL;
+ } else {
+ name = NULL;
name_len = 13; /* Partition_000 */
}
/* record name length for memory allocation later */
extra_mem_size += name_len + 1;
- /* test for options */
- if (strncmp(s, "ro", 2) == 0)
- {
+ /* test for options */
+ if (strncmp(s, "ro", 2) == 0) {
mask_flags |= MTD_WRITEABLE;
s += 2;
- }
+ }
- /* if lk is found do NOT unlock the MTD partition*/
- if (strncmp(s, "lk", 2) == 0)
- {
+ /* if lk is found do NOT unlock the MTD partition*/
+ if (strncmp(s, "lk", 2) == 0) {
mask_flags |= MTD_POWERUP_LOCK;
s += 2;
- }
+ }
/* test if more partitions are following */
- if (*s == ',')
- {
- if (size == SIZE_REMAINING)
- {
+ if (*s == ',') {
+ if (size == SIZE_REMAINING) {
printk(KERN_ERR ERRP "no partitions allowed after a fill-up partition\n");
- return NULL;
+ return ERR_PTR(-EINVAL);
}
/* more partitions follow, parse them */
parts = newpart(s + 1, &s, num_parts, this_part + 1,
&extra_mem, extra_mem_size);
- if (!parts)
- return NULL;
- }
- else
- { /* this is the last partition: allocate space for all */
+ if (IS_ERR(parts))
+ return parts;
+ } else {
+ /* this is the last partition: allocate space for all */
int alloc_size;
*num_parts = this_part + 1;
alloc_size = *num_parts * sizeof(struct mtd_partition) +
extra_mem_size;
+
parts = kzalloc(alloc_size, GFP_KERNEL);
if (!parts)
- return NULL;
+ return ERR_PTR(-ENOMEM);
extra_mem = (unsigned char *)(parts + *num_parts);
}
+
/* enter this partition (offset will be calculated later if it is zero at this point) */
parts[this_part].size = size;
parts[this_part].offset = offset;
parts[this_part].mask_flags = mask_flags;
if (name)
- {
strlcpy(extra_mem, name, name_len + 1);
- }
else
- {
sprintf(extra_mem, "Partition_%03d", this_part);
- }
parts[this_part].name = extra_mem;
extra_mem += name_len + 1;
dbg(("partition %d: name <%s>, offset %llx, size %llx, mask flags %x\n",
- this_part,
- parts[this_part].name,
- parts[this_part].offset,
- parts[this_part].size,
- parts[this_part].mask_flags));
+ this_part, parts[this_part].name, parts[this_part].offset,
+ parts[this_part].size, parts[this_part].mask_flags));
/* return (updated) pointer to extra_mem memory */
if (extra_mem_ptr)
- *extra_mem_ptr = extra_mem;
+ *extra_mem_ptr = extra_mem;
/* return (updated) pointer command line string */
*retptr = s;
@@ -236,16 +216,16 @@ static int mtdpart_setup_real(char *s)
{
struct cmdline_mtd_partition *this_mtd;
struct mtd_partition *parts;
- int mtd_id_len;
- int num_parts;
+ int mtd_id_len, num_parts;
char *p, *mtd_id;
- mtd_id = s;
+ mtd_id = s;
+
/* fetch <mtd-id> */
- if (!(p = strchr(s, ':')))
- {
+ p = strchr(s, ':');
+ if (!p) {
printk(KERN_ERR ERRP "no mtd-id\n");
- return 0;
+ return -EINVAL;
}
mtd_id_len = p - mtd_id;
@@ -262,8 +242,7 @@ static int mtdpart_setup_real(char *s)
(unsigned char**)&this_mtd, /* out: extra mem */
mtd_id_len + 1 + sizeof(*this_mtd) +
sizeof(void*)-1 /*alignment*/);
- if(!parts)
- {
+ if (IS_ERR(parts)) {
/*
* An error occurred. We're either:
* a) out of memory, or
@@ -271,12 +250,12 @@ static int mtdpart_setup_real(char *s)
* Either way, this mtd is hosed and we're
* unlikely to succeed in parsing any more
*/
- return 0;
+ return PTR_ERR(parts);
}
/* align this_mtd */
this_mtd = (struct cmdline_mtd_partition *)
- ALIGN((unsigned long)this_mtd, sizeof(void*));
+ ALIGN((unsigned long)this_mtd, sizeof(void *));
/* enter results */
this_mtd->parts = parts;
this_mtd->num_parts = num_parts;
@@ -296,14 +275,14 @@ static int mtdpart_setup_real(char *s)
break;
/* does another spec follow? */
- if (*s != ';')
- {
+ if (*s != ';') {
printk(KERN_ERR ERRP "bad character after partition (%c)\n", *s);
- return 0;
+ return -EINVAL;
}
s++;
}
- return 1;
+
+ return 0;
}
/*
@@ -318,44 +297,58 @@ static int parse_cmdline_partitions(struct mtd_info *master,
struct mtd_part_parser_data *data)
{
unsigned long offset;
- int i;
+ int i, err;
struct cmdline_mtd_partition *part;
const char *mtd_id = master->name;
/* parse command line */
- if (!cmdline_parsed)
- mtdpart_setup_real(cmdline);
+ if (!cmdline_parsed) {
+ err = mtdpart_setup_real(cmdline);
+ if (err)
+ return err;
+ }
- for(part = partitions; part; part = part->next)
- {
- if ((!mtd_id) || (!strcmp(part->mtd_id, mtd_id)))
- {
- for(i = 0, offset = 0; i < part->num_parts; i++)
- {
+ for (part = partitions; part; part = part->next) {
+ if ((!mtd_id) || (!strcmp(part->mtd_id, mtd_id))) {
+ for (i = 0, offset = 0; i < part->num_parts; i++) {
if (part->parts[i].offset == OFFSET_CONTINUOUS)
- part->parts[i].offset = offset;
+ part->parts[i].offset = offset;
else
- offset = part->parts[i].offset;
+ offset = part->parts[i].offset;
+
if (part->parts[i].size == SIZE_REMAINING)
- part->parts[i].size = master->size - offset;
- if (offset + part->parts[i].size > master->size)
- {
+ part->parts[i].size = master->size - offset;
+
+ if (part->parts[i].size == 0) {
+ printk(KERN_WARNING ERRP
+ "%s: skipping zero sized partition\n",
+ part->mtd_id);
+ part->num_parts--;
+ memmove(&part->parts[i],
+ &part->parts[i + 1],
+ sizeof(*part->parts) * (part->num_parts - i));
+ continue;
+ }
+
+ if (offset + part->parts[i].size > master->size) {
printk(KERN_WARNING ERRP
"%s: partitioning exceeds flash size, truncating\n",
part->mtd_id);
part->parts[i].size = master->size - offset;
- part->num_parts = i;
}
offset += part->parts[i].size;
}
+
*pparts = kmemdup(part->parts,
sizeof(*part->parts) * part->num_parts,
GFP_KERNEL);
if (!*pparts)
return -ENOMEM;
+
return part->num_parts;
}
}
+
return 0;
}
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index 4cdb2af..27f80cd 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -97,7 +97,7 @@ config MTD_M25P80
doesn't support the JEDEC ID instruction.
config M25PXX_USE_FAST_READ
- bool "Use FAST_READ OPCode allowing SPI CLK <= 50MHz"
+ bool "Use FAST_READ OPCode allowing SPI CLK >= 50MHz"
depends on MTD_M25P80
default y
help
@@ -120,6 +120,14 @@ config MTD_SST25L
Set up your spi devices with the right board-specific platform data,
if you want to specify device partitioning.
+config MTD_BCM47XXSFLASH
+ tristate "R/O support for serial flash on BCMA bus"
+ depends on BCMA_SFLASH
+ help
+ BCMA bus can have various flash memories attached, they are
+ registered by bcma as platform devices. This enables driver for
+ serial flash memories (only read-only mode is implemented).
+
config MTD_SLRAM
tristate "Uncached system RAM"
help
diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile
index a4dd1d8..395733a 100644
--- a/drivers/mtd/devices/Makefile
+++ b/drivers/mtd/devices/Makefile
@@ -19,5 +19,6 @@ obj-$(CONFIG_MTD_DATAFLASH) += mtd_dataflash.o
obj-$(CONFIG_MTD_M25P80) += m25p80.o
obj-$(CONFIG_MTD_SPEAR_SMI) += spear_smi.o
obj-$(CONFIG_MTD_SST25L) += sst25l.o
+obj-$(CONFIG_MTD_BCM47XXSFLASH) += bcm47xxsflash.o
CFLAGS_docg3.o += -I$(src) \ No newline at end of file
diff --git a/drivers/mtd/devices/bcm47xxsflash.c b/drivers/mtd/devices/bcm47xxsflash.c
new file mode 100644
index 0000000..2dc5a6f
--- /dev/null
+++ b/drivers/mtd/devices/bcm47xxsflash.c
@@ -0,0 +1,105 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/platform_device.h>
+#include <linux/bcma/bcma.h>
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Serial flash driver for BCMA bus");
+
+static const char *probes[] = { "bcm47xxpart", NULL };
+
+static int bcm47xxsflash_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ struct bcma_sflash *sflash = mtd->priv;
+
+ /* Check address range */
+ if ((from + len) > mtd->size)
+ return -EINVAL;
+
+ memcpy_fromio(buf, (void __iomem *)KSEG0ADDR(sflash->window + from),
+ len);
+
+ return len;
+}
+
+static void bcm47xxsflash_fill_mtd(struct bcma_sflash *sflash,
+ struct mtd_info *mtd)
+{
+ mtd->priv = sflash;
+ mtd->name = "bcm47xxsflash";
+ mtd->owner = THIS_MODULE;
+ mtd->type = MTD_ROM;
+ mtd->size = sflash->size;
+ mtd->_read = bcm47xxsflash_read;
+
+ /* TODO: implement writing support and verify/change following code */
+ mtd->flags = MTD_CAP_ROM;
+ mtd->writebufsize = mtd->writesize = 1;
+}
+
+static int bcm47xxsflash_probe(struct platform_device *pdev)
+{
+ struct bcma_sflash *sflash = dev_get_platdata(&pdev->dev);
+ int err;
+
+ sflash->mtd = kzalloc(sizeof(struct mtd_info), GFP_KERNEL);
+ if (!sflash->mtd) {
+ err = -ENOMEM;
+ goto out;
+ }
+ bcm47xxsflash_fill_mtd(sflash, sflash->mtd);
+
+ err = mtd_device_parse_register(sflash->mtd, probes, NULL, NULL, 0);
+ if (err) {
+ pr_err("Failed to register MTD device: %d\n", err);
+ goto err_dev_reg;
+ }
+
+ return 0;
+
+err_dev_reg:
+ kfree(sflash->mtd);
+out:
+ return err;
+}
+
+static int __devexit bcm47xxsflash_remove(struct platform_device *pdev)
+{
+ struct bcma_sflash *sflash = dev_get_platdata(&pdev->dev);
+
+ mtd_device_unregister(sflash->mtd);
+ kfree(sflash->mtd);
+
+ return 0;
+}
+
+static struct platform_driver bcma_sflash_driver = {
+ .remove = __devexit_p(bcm47xxsflash_remove),
+ .driver = {
+ .name = "bcma_sflash",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init bcm47xxsflash_init(void)
+{
+ int err;
+
+ err = platform_driver_probe(&bcma_sflash_driver, bcm47xxsflash_probe);
+ if (err)
+ pr_err("Failed to register BCMA serial flash driver: %d\n",
+ err);
+
+ return err;
+}
+
+static void __exit bcm47xxsflash_exit(void)
+{
+ platform_driver_unregister(&bcma_sflash_driver);
+}
+
+module_init(bcm47xxsflash_init);
+module_exit(bcm47xxsflash_exit);
diff --git a/drivers/mtd/devices/doc2001plus.c b/drivers/mtd/devices/doc2001plus.c
index 04eb2e4..4f2220a 100644
--- a/drivers/mtd/devices/doc2001plus.c
+++ b/drivers/mtd/devices/doc2001plus.c
@@ -659,23 +659,15 @@ static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
#ifdef ECC_DEBUG
printk("%s(%d): Millennium Plus ECC error (from=0x%x:\n",
__FILE__, __LINE__, (int)from);
- printk(" syndrome= %02x:%02x:%02x:%02x:%02x:"
- "%02x\n",
- syndrome[0], syndrome[1], syndrome[2],
- syndrome[3], syndrome[4], syndrome[5]);
- printk(" eccbuf= %02x:%02x:%02x:%02x:%02x:"
- "%02x\n",
- eccbuf[0], eccbuf[1], eccbuf[2],
- eccbuf[3], eccbuf[4], eccbuf[5]);
+ printk(" syndrome= %*phC\n", 6, syndrome);
+ printk(" eccbuf= %*phC\n", 6, eccbuf);
#endif
ret = -EIO;
}
}
#ifdef PSYCHO_DEBUG
- printk("ECC DATA at %lx: %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n",
- (long)from, eccbuf[0], eccbuf[1], eccbuf[2], eccbuf[3],
- eccbuf[4], eccbuf[5]);
+ printk("ECC DATA at %lx: %*ph\n", (long)from, 6, eccbuf);
#endif
/* disable the ECC engine */
WriteDOC(DOC_ECC_DIS, docptr , Mplus_ECCConf);
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
index f70854d..d34d83b 100644
--- a/drivers/mtd/devices/docg3.c
+++ b/drivers/mtd/devices/docg3.c
@@ -919,19 +919,13 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t from,
eccconf1 = doc_register_readb(docg3, DOC_ECCCONF1);
if (nboob >= DOC_LAYOUT_OOB_SIZE) {
- doc_dbg("OOB - INFO: %02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
- oobbuf[0], oobbuf[1], oobbuf[2], oobbuf[3],
- oobbuf[4], oobbuf[5], oobbuf[6]);
+ doc_dbg("OOB - INFO: %*phC\n", 7, oobbuf);
doc_dbg("OOB - HAMMING: %02x\n", oobbuf[7]);
- doc_dbg("OOB - BCH_ECC: %02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
- oobbuf[8], oobbuf[9], oobbuf[10], oobbuf[11],
- oobbuf[12], oobbuf[13], oobbuf[14]);
+ doc_dbg("OOB - BCH_ECC: %*phC\n", 7, oobbuf + 8);
doc_dbg("OOB - UNUSED: %02x\n", oobbuf[15]);
}
doc_dbg("ECC checks: ECCConf1=%x\n", eccconf1);
- doc_dbg("ECC HW_ECC: %02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
- hwecc[0], hwecc[1], hwecc[2], hwecc[3], hwecc[4],
- hwecc[5], hwecc[6]);
+ doc_dbg("ECC HW_ECC: %*phC\n", 7, hwecc);
ret = -EIO;
if (is_prot_seq_error(docg3))
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 5d0d68c..03838ba 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -633,11 +633,14 @@ static const struct spi_device_id m25p_ids[] = {
{ "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) },
{ "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
+ { "at45db081d", INFO(0x1f2500, 0, 64 * 1024, 16, SECT_4K) },
+
/* EON -- en25xxx */
{ "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64, SECT_4K) },
{ "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) },
{ "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) },
{ "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) },
+ { "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) },
/* Everspin */
{ "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2) },
@@ -646,6 +649,7 @@ static const struct spi_device_id m25p_ids[] = {
{ "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) },
{ "320s33b", INFO(0x898912, 0, 64 * 1024, 64, 0) },
{ "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) },
+ { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, 0) },
/* Macronix */
{ "mx25l2005a", INFO(0xc22012, 0, 64 * 1024, 4, SECT_4K) },
@@ -659,15 +663,15 @@ static const struct spi_device_id m25p_ids[] = {
{ "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, 0) },
{ "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
+ /* Micron */
+ { "n25q128", INFO(0x20ba18, 0, 64 * 1024, 256, 0) },
+ { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K) },
+
/* Spansion -- single (large) sector size only, at least
* for the chips listed here (without boot sectors).
*/
- { "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) },
- { "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0) },
- { "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32, 0) },
- { "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64, 0) },
- { "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, SECT_4K) },
- { "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128, 0) },
+ { "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, 0) },
+ { "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128, 0) },
{ "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, 0) },
{ "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, 0) },
{ "s25fl512s", INFO(0x010220, 0x4d00, 256 * 1024, 256, 0) },
@@ -676,6 +680,11 @@ static const struct spi_device_id m25p_ids[] = {
{ "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) },
{ "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, 0) },
{ "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, 0) },
+ { "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) },
+ { "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0) },
+ { "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32, 0) },
+ { "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64, 0) },
+ { "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128, 0) },
{ "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32, SECT_4K) },
{ "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
@@ -699,6 +708,7 @@ static const struct spi_device_id m25p_ids[] = {
{ "m25p32", INFO(0x202016, 0, 64 * 1024, 64, 0) },
{ "m25p64", INFO(0x202017, 0, 64 * 1024, 128, 0) },
{ "m25p128", INFO(0x202018, 0, 256 * 1024, 64, 0) },
+ { "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64, 0) },
{ "m25p05-nonjedec", INFO(0, 0, 32 * 1024, 2, 0) },
{ "m25p10-nonjedec", INFO(0, 0, 32 * 1024, 4, 0) },
@@ -714,6 +724,7 @@ static const struct spi_device_id m25p_ids[] = {
{ "m45pe80", INFO(0x204014, 0, 64 * 1024, 16, 0) },
{ "m45pe16", INFO(0x204015, 0, 64 * 1024, 32, 0) },
+ { "m25pe20", INFO(0x208012, 0, 64 * 1024, 4, 0) },
{ "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0) },
{ "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) },
@@ -730,6 +741,7 @@ static const struct spi_device_id m25p_ids[] = {
{ "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) },
{ "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) },
{ "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) },
+ { "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64, SECT_4K) },
{ "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
{ "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
{ "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) },
diff --git a/drivers/mtd/devices/spear_smi.c b/drivers/mtd/devices/spear_smi.c
index 6796036..dcc3c95 100644
--- a/drivers/mtd/devices/spear_smi.c
+++ b/drivers/mtd/devices/spear_smi.c
@@ -26,6 +26,7 @@
#include <linux/module.h>
#include <linux/param.h>
#include <linux/platform_device.h>
+#include <linux/pm.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/spear_smi.h>
@@ -240,8 +241,8 @@ static int spear_smi_read_sr(struct spear_smi *dev, u32 bank)
/* copy dev->status (lower 16 bits) in order to release lock */
if (ret > 0)
ret = dev->status & 0xffff;
- else
- ret = -EIO;
+ else if (ret == 0)
+ ret = -ETIMEDOUT;
/* restore the ctrl regs state */
writel(ctrlreg1, dev->io_base + SMI_CR1);
@@ -269,16 +270,19 @@ static int spear_smi_wait_till_ready(struct spear_smi *dev, u32 bank,
finish = jiffies + timeout;
do {
status = spear_smi_read_sr(dev, bank);
- if (status < 0)
- continue; /* try till timeout */
- else if (!(status & SR_WIP))
+ if (status < 0) {
+ if (status == -ETIMEDOUT)
+ continue; /* try till finish */
+ return status;
+ } else if (!(status & SR_WIP)) {
return 0;
+ }
cond_resched();
} while (!time_after_eq(jiffies, finish));
dev_err(&dev->pdev->dev, "smi controller is busy, timeout\n");
- return status;
+ return -EBUSY;
}
/**
@@ -335,6 +339,9 @@ static void spear_smi_hw_init(struct spear_smi *dev)
val = HOLD1 | BANK_EN | DSEL_TIME | (prescale << 8);
mutex_lock(&dev->lock);
+ /* clear all interrupt conditions */
+ writel(0, dev->io_base + SMI_SR);
+
writel(val, dev->io_base + SMI_CR1);
mutex_unlock(&dev->lock);
}
@@ -391,11 +398,11 @@ static int spear_smi_write_enable(struct spear_smi *dev, u32 bank)
writel(ctrlreg1, dev->io_base + SMI_CR1);
writel(0, dev->io_base + SMI_CR2);
- if (ret <= 0) {
+ if (ret == 0) {
ret = -EIO;
dev_err(&dev->pdev->dev,
"smi controller failed on write enable\n");
- } else {
+ } else if (ret > 0) {
/* check whether write mode status is set for required bank */
if (dev->status & (1 << (bank + WM_SHIFT)))
ret = 0;
@@ -462,10 +469,10 @@ static int spear_smi_erase_sector(struct spear_smi *dev,
ret = wait_event_interruptible_timeout(dev->cmd_complete,
dev->status & TFF, SMI_CMD_TIMEOUT);
- if (ret <= 0) {
+ if (ret == 0) {
ret = -EIO;
dev_err(&dev->pdev->dev, "sector erase failed\n");
- } else
+ } else if (ret > 0)
ret = 0; /* success */
/* restore ctrl regs */
@@ -820,7 +827,7 @@ static int spear_smi_setup_banks(struct platform_device *pdev,
if (!flash_info)
return -ENODEV;
- flash = kzalloc(sizeof(*flash), GFP_ATOMIC);
+ flash = devm_kzalloc(&pdev->dev, sizeof(*flash), GFP_ATOMIC);
if (!flash)
return -ENOMEM;
flash->bank = bank;
@@ -831,15 +838,13 @@ static int spear_smi_setup_banks(struct platform_device *pdev,
flash_index = spear_smi_probe_flash(dev, bank);
if (flash_index < 0) {
dev_info(&dev->pdev->dev, "smi-nor%d not found\n", bank);
- ret = flash_index;
- goto err_probe;
+ return flash_index;
}
/* map the memory for nor flash chip */
- flash->base_addr = ioremap(flash_info->mem_base, flash_info->size);
- if (!flash->base_addr) {
- ret = -EIO;
- goto err_probe;
- }
+ flash->base_addr = devm_ioremap(&pdev->dev, flash_info->mem_base,
+ flash_info->size);
+ if (!flash->base_addr)
+ return -EIO;
dev->flash[bank] = flash;
flash->mtd.priv = dev;
@@ -881,17 +886,10 @@ static int spear_smi_setup_banks(struct platform_device *pdev,
count);
if (ret) {
dev_err(&dev->pdev->dev, "Err MTD partition=%d\n", ret);
- goto err_map;
+ return ret;
}
return 0;
-
-err_map:
- iounmap(flash->base_addr);
-
-err_probe:
- kfree(flash);
- return ret;
}
/**
@@ -928,20 +926,13 @@ static int __devinit spear_smi_probe(struct platform_device *pdev)
}
} else {
pdata = dev_get_platdata(&pdev->dev);
- if (pdata < 0) {
+ if (!pdata) {
ret = -ENODEV;
dev_err(&pdev->dev, "no platform data\n");
goto err;
}
}
- smi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!smi_base) {
- ret = -ENODEV;
- dev_err(&pdev->dev, "invalid smi base address\n");
- goto err;
- }
-
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = -ENODEV;
@@ -949,32 +940,26 @@ static int __devinit spear_smi_probe(struct platform_device *pdev)
goto err;
}
- dev = kzalloc(sizeof(*dev), GFP_ATOMIC);
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_ATOMIC);
if (!dev) {
ret = -ENOMEM;
dev_err(&pdev->dev, "mem alloc fail\n");
goto err;
}
- smi_base = request_mem_region(smi_base->start, resource_size(smi_base),
- pdev->name);
- if (!smi_base) {
- ret = -EBUSY;
- dev_err(&pdev->dev, "request mem region fail\n");
- goto err_mem;
- }
+ smi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dev->io_base = ioremap(smi_base->start, resource_size(smi_base));
+ dev->io_base = devm_request_and_ioremap(&pdev->dev, smi_base);
if (!dev->io_base) {
ret = -EIO;
- dev_err(&pdev->dev, "ioremap fail\n");
- goto err_ioremap;
+ dev_err(&pdev->dev, "devm_request_and_ioremap fail\n");
+ goto err;
}
dev->pdev = pdev;
dev->clk_rate = pdata->clk_rate;
- if (dev->clk_rate < 0 || dev->clk_rate > SMI_MAX_CLOCK_FREQ)
+ if (dev->clk_rate > SMI_MAX_CLOCK_FREQ)
dev->clk_rate = SMI_MAX_CLOCK_FREQ;
dev->num_flashes = pdata->num_flashes;
@@ -984,17 +969,18 @@ static int __devinit spear_smi_probe(struct platform_device *pdev)
dev->num_flashes = MAX_NUM_FLASH_CHIP;
}
- dev->clk = clk_get(&pdev->dev, NULL);
+ dev->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(dev->clk)) {
ret = PTR_ERR(dev->clk);
- goto err_clk;
+ goto err;
}
ret = clk_prepare_enable(dev->clk);
if (ret)
- goto err_clk_prepare_enable;
+ goto err;
- ret = request_irq(irq, spear_smi_int_handler, 0, pdev->name, dev);
+ ret = devm_request_irq(&pdev->dev, irq, spear_smi_int_handler, 0,
+ pdev->name, dev);
if (ret) {
dev_err(&dev->pdev->dev, "SMI IRQ allocation failed\n");
goto err_irq;
@@ -1017,18 +1003,9 @@ static int __devinit spear_smi_probe(struct platform_device *pdev)
return 0;
err_bank_setup:
- free_irq(irq, dev);
platform_set_drvdata(pdev, NULL);
err_irq:
clk_disable_unprepare(dev->clk);
-err_clk_prepare_enable:
- clk_put(dev->clk);
-err_clk:
- iounmap(dev->io_base);
-err_ioremap:
- release_mem_region(smi_base->start, resource_size(smi_base));
-err_mem:
- kfree(dev);
err:
return ret;
}
@@ -1042,11 +1019,8 @@ err:
static int __devexit spear_smi_remove(struct platform_device *pdev)
{
struct spear_smi *dev;
- struct spear_smi_plat_data *pdata;
struct spear_snor_flash *flash;
- struct resource *smi_base;
- int ret;
- int i, irq;
+ int ret, i;
dev = platform_get_drvdata(pdev);
if (!dev) {
@@ -1054,8 +1028,6 @@ static int __devexit spear_smi_remove(struct platform_device *pdev)
return -ENODEV;
}
- pdata = dev_get_platdata(&pdev->dev);
-
/* clean up for all nor flash */
for (i = 0; i < dev->num_flashes; i++) {
flash = dev->flash[i];
@@ -1066,49 +1038,41 @@ static int __devexit spear_smi_remove(struct platform_device *pdev)
ret = mtd_device_unregister(&flash->mtd);
if (ret)
dev_err(&pdev->dev, "error removing mtd\n");
-
- iounmap(flash->base_addr);
- kfree(flash);
}
- irq = platform_get_irq(pdev, 0);
- free_irq(irq, dev);
-
clk_disable_unprepare(dev->clk);
- clk_put(dev->clk);
- iounmap(dev->io_base);
- kfree(dev);
-
- smi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(smi_base->start, resource_size(smi_base));
platform_set_drvdata(pdev, NULL);
return 0;
}
-int spear_smi_suspend(struct platform_device *pdev, pm_message_t state)
+#ifdef CONFIG_PM
+static int spear_smi_suspend(struct device *dev)
{
- struct spear_smi *dev = platform_get_drvdata(pdev);
+ struct spear_smi *sdev = dev_get_drvdata(dev);
- if (dev && dev->clk)
- clk_disable_unprepare(dev->clk);
+ if (sdev && sdev->clk)
+ clk_disable_unprepare(sdev->clk);
return 0;
}
-int spear_smi_resume(struct platform_device *pdev)
+static int spear_smi_resume(struct device *dev)
{
- struct spear_smi *dev = platform_get_drvdata(pdev);
+ struct spear_smi *sdev = dev_get_drvdata(dev);
int ret = -EPERM;
- if (dev && dev->clk)
- ret = clk_prepare_enable(dev->clk);
+ if (sdev && sdev->clk)
+ ret = clk_prepare_enable(sdev->clk);
if (!ret)
- spear_smi_hw_init(dev);
+ spear_smi_hw_init(sdev);
return ret;
}
+static SIMPLE_DEV_PM_OPS(spear_smi_pm_ops, spear_smi_suspend, spear_smi_resume);
+#endif
+
#ifdef CONFIG_OF
static const struct of_device_id spear_smi_id_table[] = {
{ .compatible = "st,spear600-smi" },
@@ -1123,11 +1087,12 @@ static struct platform_driver spear_smi_driver = {
.bus = &platform_bus_type,
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(spear_smi_id_table),
+#ifdef CONFIG_PM
+ .pm = &spear_smi_pm_ops,
+#endif
},
.probe = spear_smi_probe,
.remove = __devexit_p(spear_smi_remove),
- .suspend = spear_smi_suspend,
- .resume = spear_smi_resume,
};
static int spear_smi_init(void)
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 5ba2458..2e47c2e 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -373,7 +373,7 @@ config MTD_FORTUNET
have such a board, say 'Y'.
config MTD_AUTCPU12
- tristate "NV-RAM mapping AUTCPU12 board"
+ bool "NV-RAM mapping AUTCPU12 board"
depends on ARCH_AUTCPU12
help
This enables access to the NV-RAM on autronix autcpu12 board.
@@ -443,22 +443,10 @@ config MTD_GPIO_ADDR
config MTD_UCLINUX
bool "Generic uClinux RAM/ROM filesystem support"
- depends on MTD_RAM=y && !MMU
+ depends on MTD_RAM=y && (!MMU || COLDFIRE)
help
Map driver to support image based filesystems for uClinux.
-config MTD_WRSBC8260
- tristate "Map driver for WindRiver PowerQUICC II MPC82xx board"
- depends on (SBC82xx || SBC8560)
- select MTD_MAP_BANK_WIDTH_4
- select MTD_MAP_BANK_WIDTH_1
- select MTD_CFI_I1
- select MTD_CFI_I4
- help
- Map driver for WindRiver PowerQUICC II MPC82xx board. Drives
- all three flash regions on CS0, CS1 and CS6 if they are configured
- correctly by the boot loader.
-
config MTD_DMV182
tristate "Map driver for Dy-4 SVME/DMV-182 board."
depends on DMV182
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index 68a9a91..deb43e9 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -47,7 +47,6 @@ obj-$(CONFIG_MTD_SCB2_FLASH) += scb2_flash.o
obj-$(CONFIG_MTD_H720X) += h720x-flash.o
obj-$(CONFIG_MTD_IXP4XX) += ixp4xx.o
obj-$(CONFIG_MTD_IXP2000) += ixp2000.o
-obj-$(CONFIG_MTD_WRSBC8260) += wr_sbc82xx_flash.o
obj-$(CONFIG_MTD_DMV182) += dmv182.o
obj-$(CONFIG_MTD_PLATRAM) += plat-ram.o
obj-$(CONFIG_MTD_INTEL_VR_NOR) += intel_vr_nor.o
diff --git a/drivers/mtd/maps/autcpu12-nvram.c b/drivers/mtd/maps/autcpu12-nvram.c
index e5bfd0e..76fb594 100644
--- a/drivers/mtd/maps/autcpu12-nvram.c
+++ b/drivers/mtd/maps/autcpu12-nvram.c
@@ -15,43 +15,54 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
+#include <linux/sizes.h>
-#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
-#include <linux/ioport.h>
#include <linux/init.h>
-#include <asm/io.h>
-#include <asm/sizes.h>
-#include <mach/hardware.h>
-#include <mach/autcpu12.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-
-
-static struct mtd_info *sram_mtd;
-struct map_info autcpu12_sram_map = {
- .name = "SRAM",
- .size = 32768,
- .bankwidth = 4,
- .phys = 0x12000000,
+struct autcpu12_nvram_priv {
+ struct mtd_info *mtd;
+ struct map_info map;
};
-static int __init init_autcpu12_sram (void)
+static int __devinit autcpu12_nvram_probe(struct platform_device *pdev)
{
- int err, save0, save1;
+ map_word tmp, save0, save1;
+ struct resource *res;
+ struct autcpu12_nvram_priv *priv;
- autcpu12_sram_map.virt = ioremap(0x12000000, SZ_128K);
- if (!autcpu12_sram_map.virt) {
- printk("Failed to ioremap autcpu12 NV-RAM space\n");
- err = -EIO;
- goto out;
+ priv = devm_kzalloc(&pdev->dev,
+ sizeof(struct autcpu12_nvram_priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, priv);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "failed to get memory resource\n");
+ return -ENOENT;
+ }
+
+ priv->map.bankwidth = 4;
+ priv->map.phys = res->start;
+ priv->map.size = resource_size(res);
+ priv->map.virt = devm_request_and_ioremap(&pdev->dev, res);
+ strcpy((char *)priv->map.name, res->name);
+ if (!priv->map.virt) {
+ dev_err(&pdev->dev, "failed to remap mem resource\n");
+ return -EBUSY;
}
- simple_map_init(&autcpu_sram_map);
+
+ simple_map_init(&priv->map);
/*
* Check for 32K/128K
@@ -61,65 +72,59 @@ static int __init init_autcpu12_sram (void)
* Read and check result on ofs 0x0
* Restore contents
*/
- save0 = map_read32(&autcpu12_sram_map,0);
- save1 = map_read32(&autcpu12_sram_map,0x10000);
- map_write32(&autcpu12_sram_map,~save0,0x10000);
- /* if we find this pattern on 0x0, we have 32K size
- * restore contents and exit
- */
- if ( map_read32(&autcpu12_sram_map,0) != save0) {
- map_write32(&autcpu12_sram_map,save0,0x0);
- goto map;
+ save0 = map_read(&priv->map, 0);
+ save1 = map_read(&priv->map, 0x10000);
+ tmp.x[0] = ~save0.x[0];
+ map_write(&priv->map, tmp, 0x10000);
+ tmp = map_read(&priv->map, 0);
+ /* if we find this pattern on 0x0, we have 32K size */
+ if (!map_word_equal(&priv->map, tmp, save0)) {
+ map_write(&priv->map, save0, 0x0);
+ priv->map.size = SZ_32K;
+ } else
+ map_write(&priv->map, save1, 0x10000);
+
+ priv->mtd = do_map_probe("map_ram", &priv->map);
+ if (!priv->mtd) {
+ dev_err(&pdev->dev, "probing failed\n");
+ return -ENXIO;
}
- /* We have a 128K found, restore 0x10000 and set size
- * to 128K
- */
- map_write32(&autcpu12_sram_map,save1,0x10000);
- autcpu12_sram_map.size = SZ_128K;
-
-map:
- sram_mtd = do_map_probe("map_ram", &autcpu12_sram_map);
- if (!sram_mtd) {
- printk("NV-RAM probe failed\n");
- err = -ENXIO;
- goto out_ioremap;
- }
-
- sram_mtd->owner = THIS_MODULE;
- sram_mtd->erasesize = 16;
- if (mtd_device_register(sram_mtd, NULL, 0)) {
- printk("NV-RAM device addition failed\n");
- err = -ENOMEM;
- goto out_probe;
+ priv->mtd->owner = THIS_MODULE;
+ priv->mtd->erasesize = 16;
+ priv->mtd->dev.parent = &pdev->dev;
+ if (!mtd_device_register(priv->mtd, NULL, 0)) {
+ dev_info(&pdev->dev,
+ "NV-RAM device size %ldKiB registered on AUTCPU12\n",
+ priv->map.size / SZ_1K);
+ return 0;
}
- printk("NV-RAM device size %ldKiB registered on AUTCPU12\n",autcpu12_sram_map.size/SZ_1K);
-
- return 0;
-
-out_probe:
- map_destroy(sram_mtd);
- sram_mtd = 0;
-
-out_ioremap:
- iounmap((void *)autcpu12_sram_map.virt);
-out:
- return err;
+ map_destroy(priv->mtd);
+ dev_err(&pdev->dev, "NV-RAM device addition failed\n");
+ return -ENOMEM;
}
-static void __exit cleanup_autcpu12_maps(void)
+static int __devexit autcpu12_nvram_remove(struct platform_device *pdev)
{
- if (sram_mtd) {
- mtd_device_unregister(sram_mtd);
- map_destroy(sram_mtd);
- iounmap((void *)autcpu12_sram_map.virt);
- }
+ struct autcpu12_nvram_priv *priv = platform_get_drvdata(pdev);
+
+ mtd_device_unregister(priv->mtd);
+ map_destroy(priv->mtd);
+
+ return 0;
}
-module_init(init_autcpu12_sram);
-module_exit(cleanup_autcpu12_maps);
+static struct platform_driver autcpu12_nvram_driver = {
+ .driver = {
+ .name = "autcpu12_nvram",
+ .owner = THIS_MODULE,
+ },
+ .probe = autcpu12_nvram_probe,
+ .remove = __devexit_p(autcpu12_nvram_remove),
+};
+module_platform_driver(autcpu12_nvram_driver);
MODULE_AUTHOR("Thomas Gleixner");
-MODULE_DESCRIPTION("autcpu12 NV-RAM map driver");
+MODULE_DESCRIPTION("autcpu12 NVRAM map driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/maps/pci.c b/drivers/mtd/maps/pci.c
index f14ce0a..1c30c1a 100644
--- a/drivers/mtd/maps/pci.c
+++ b/drivers/mtd/maps/pci.c
@@ -43,26 +43,14 @@ static map_word mtd_pci_read8(struct map_info *_map, unsigned long ofs)
struct map_pci_info *map = (struct map_pci_info *)_map;
map_word val;
val.x[0]= readb(map->base + map->translate(map, ofs));
-// printk("read8 : %08lx => %02x\n", ofs, val.x[0]);
return val;
}
-#if 0
-static map_word mtd_pci_read16(struct map_info *_map, unsigned long ofs)
-{
- struct map_pci_info *map = (struct map_pci_info *)_map;
- map_word val;
- val.x[0] = readw(map->base + map->translate(map, ofs));
-// printk("read16: %08lx => %04x\n", ofs, val.x[0]);
- return val;
-}
-#endif
static map_word mtd_pci_read32(struct map_info *_map, unsigned long ofs)
{
struct map_pci_info *map = (struct map_pci_info *)_map;
map_word val;
val.x[0] = readl(map->base + map->translate(map, ofs));
-// printk("read32: %08lx => %08x\n", ofs, val.x[0]);
return val;
}
@@ -75,22 +63,12 @@ static void mtd_pci_copyfrom(struct map_info *_map, void *to, unsigned long from
static void mtd_pci_write8(struct map_info *_map, map_word val, unsigned long ofs)
{
struct map_pci_info *map = (struct map_pci_info *)_map;
-// printk("write8 : %08lx <= %02x\n", ofs, val.x[0]);
writeb(val.x[0], map->base + map->translate(map, ofs));
}
-#if 0
-static void mtd_pci_write16(struct map_info *_map, map_word val, unsigned long ofs)
-{
- struct map_pci_info *map = (struct map_pci_info *)_map;
-// printk("write16: %08lx <= %04x\n", ofs, val.x[0]);
- writew(val.x[0], map->base + map->translate(map, ofs));
-}
-#endif
static void mtd_pci_write32(struct map_info *_map, map_word val, unsigned long ofs)
{
struct map_pci_info *map = (struct map_pci_info *)_map;
-// printk("write32: %08lx <= %08x\n", ofs, val.x[0]);
writel(val.x[0], map->base + map->translate(map, ofs));
}
@@ -358,4 +336,3 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
MODULE_DESCRIPTION("Generic PCI map driver");
MODULE_DEVICE_TABLE(pci, mtd_pci_ids);
-
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index 2e6fb68..6f19aca 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -169,6 +169,7 @@ static int __devinit of_flash_probe(struct platform_device *dev)
struct mtd_info **mtd_list = NULL;
resource_size_t res_size;
struct mtd_part_parser_data ppdata;
+ bool map_indirect;
match = of_match_device(of_flash_match, &dev->dev);
if (!match)
@@ -192,6 +193,8 @@ static int __devinit of_flash_probe(struct platform_device *dev)
}
count /= reg_tuple_size;
+ map_indirect = of_property_read_bool(dp, "no-unaligned-direct-access");
+
err = -ENOMEM;
info = kzalloc(sizeof(struct of_flash) +
sizeof(struct of_flash_list) * count, GFP_KERNEL);
@@ -247,6 +250,17 @@ static int __devinit of_flash_probe(struct platform_device *dev)
simple_map_init(&info->list[i].map);
+ /*
+ * On some platforms (e.g. MPC5200) a direct 1:1 mapping
+ * may cause problems with JFFS2 usage, as the local bus (LPB)
+ * doesn't support unaligned accesses as implemented in the
+ * JFFS2 code via memcpy(). By setting NO_XIP, the
+ * flash will not be exposed directly to the MTD users
+ * (e.g. JFFS2) any more.
+ */
+ if (map_indirect)
+ info->list[i].map.phys = NO_XIP;
+
if (probe_type) {
info->list[i].mtd = do_map_probe(probe_type,
&info->list[i].map);
diff --git a/drivers/mtd/maps/rbtx4939-flash.c b/drivers/mtd/maps/rbtx4939-flash.c
index 6f52e1f..49c3fe7 100644
--- a/drivers/mtd/maps/rbtx4939-flash.c
+++ b/drivers/mtd/maps/rbtx4939-flash.c
@@ -100,8 +100,6 @@ static int rbtx4939_flash_probe(struct platform_device *dev)
goto err_out;
}
info->mtd->owner = THIS_MODULE;
- if (err)
- goto err_out;
err = mtd_device_parse_register(info->mtd, NULL, NULL, pdata->parts,
pdata->nr_parts);
diff --git a/drivers/mtd/maps/uclinux.c b/drivers/mtd/maps/uclinux.c
index c3bb304..299bf88 100644
--- a/drivers/mtd/maps/uclinux.c
+++ b/drivers/mtd/maps/uclinux.c
@@ -67,10 +67,16 @@ static int __init uclinux_mtd_init(void)
printk("uclinux[mtd]: RAM probe address=0x%x size=0x%x\n",
(int) mapp->phys, (int) mapp->size);
- mapp->virt = ioremap_nocache(mapp->phys, mapp->size);
+ /*
+ * The filesystem is guaranteed to be in direct mapped memory. It is
+ * directly following the kernels own bss region. Following the same
+ * mechanism used by architectures setting up traditional initrds we
+ * use phys_to_virt to get the virtual address of its start.
+ */
+ mapp->virt = phys_to_virt(mapp->phys);
if (mapp->virt == 0) {
- printk("uclinux[mtd]: ioremap_nocache() failed\n");
+ printk("uclinux[mtd]: no virtual mapping?\n");
return(-EIO);
}
@@ -79,7 +85,6 @@ static int __init uclinux_mtd_init(void)
mtd = do_map_probe("map_ram", mapp);
if (!mtd) {
printk("uclinux[mtd]: failed to find a mapping?\n");
- iounmap(mapp->virt);
return(-ENXIO);
}
@@ -102,10 +107,8 @@ static void __exit uclinux_mtd_cleanup(void)
map_destroy(uclinux_ram_mtdinfo);
uclinux_ram_mtdinfo = NULL;
}
- if (uclinux_ram_map.virt) {
- iounmap((void *) uclinux_ram_map.virt);
+ if (uclinux_ram_map.virt)
uclinux_ram_map.virt = 0;
- }
}
/****************************************************************************/
diff --git a/drivers/mtd/maps/wr_sbc82xx_flash.c b/drivers/mtd/maps/wr_sbc82xx_flash.c
deleted file mode 100644
index e7534c8..0000000
--- a/drivers/mtd/maps/wr_sbc82xx_flash.c
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- * Map for flash chips on Wind River PowerQUICC II SBC82xx board.
- *
- * Copyright (C) 2004 Red Hat, Inc.
- *
- * Author: David Woodhouse <dwmw2@infradead.org>
- *
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <asm/io.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-
-#include <asm/immap_cpm2.h>
-
-static struct mtd_info *sbcmtd[3];
-
-struct map_info sbc82xx_flash_map[3] = {
- {.name = "Boot flash"},
- {.name = "Alternate boot flash"},
- {.name = "User flash"}
-};
-
-static struct mtd_partition smallflash_parts[] = {
- {
- .name = "space",
- .size = 0x100000,
- .offset = 0,
- }, {
- .name = "bootloader",
- .size = MTDPART_SIZ_FULL,
- .offset = MTDPART_OFS_APPEND,
- }
-};
-
-static struct mtd_partition bigflash_parts[] = {
- {
- .name = "bootloader",
- .size = 0x00100000,
- .offset = 0,
- }, {
- .name = "file system",
- .size = 0x01f00000,
- .offset = MTDPART_OFS_APPEND,
- }, {
- .name = "boot config",
- .size = 0x00100000,
- .offset = MTDPART_OFS_APPEND,
- }, {
- .name = "space",
- .size = 0x01f00000,
- .offset = MTDPART_OFS_APPEND,
- }
-};
-
-static const char *part_probes[] __initconst = {"cmdlinepart", "RedBoot", NULL};
-
-#define init_sbc82xx_one_flash(map, br, or) \
-do { \
- (map).phys = (br & 1) ? (br & 0xffff8000) : 0; \
- (map).size = (br & 1) ? (~(or & 0xffff8000) + 1) : 0; \
- switch (br & 0x00001800) { \
- case 0x00000000: \
- case 0x00000800: (map).bankwidth = 1; break; \
- case 0x00001000: (map).bankwidth = 2; break; \
- case 0x00001800: (map).bankwidth = 4; break; \
- } \
-} while (0);
-
-static int __init init_sbc82xx_flash(void)
-{
- volatile memctl_cpm2_t *mc = &cpm2_immr->im_memctl;
- int bigflash;
- int i;
-
-#ifdef CONFIG_SBC8560
- mc = ioremap(0xff700000 + 0x5000, sizeof(memctl_cpm2_t));
-#else
- mc = &cpm2_immr->im_memctl;
-#endif
-
- bigflash = 1;
- if ((mc->memc_br0 & 0x00001800) == 0x00001800)
- bigflash = 0;
-
- init_sbc82xx_one_flash(sbc82xx_flash_map[0], mc->memc_br0, mc->memc_or0);
- init_sbc82xx_one_flash(sbc82xx_flash_map[1], mc->memc_br6, mc->memc_or6);
- init_sbc82xx_one_flash(sbc82xx_flash_map[2], mc->memc_br1, mc->memc_or1);
-
-#ifdef CONFIG_SBC8560
- iounmap((void *) mc);
-#endif
-
- for (i=0; i<3; i++) {
- int8_t flashcs[3] = { 0, 6, 1 };
- int nr_parts;
- struct mtd_partition *defparts;
-
- printk(KERN_NOTICE "PowerQUICC II %s (%ld MiB on CS%d",
- sbc82xx_flash_map[i].name,
- (sbc82xx_flash_map[i].size >> 20),
- flashcs[i]);
- if (!sbc82xx_flash_map[i].phys) {
- /* We know it can't be at zero. */
- printk("): disabled by bootloader.\n");
- continue;
- }
- printk(" at %08lx)\n", sbc82xx_flash_map[i].phys);
-
- sbc82xx_flash_map[i].virt = ioremap(sbc82xx_flash_map[i].phys,
- sbc82xx_flash_map[i].size);
-
- if (!sbc82xx_flash_map[i].virt) {
- printk("Failed to ioremap\n");
- continue;
- }
-
- simple_map_init(&sbc82xx_flash_map[i]);
-
- sbcmtd[i] = do_map_probe("cfi_probe", &sbc82xx_flash_map[i]);
-
- if (!sbcmtd[i])
- continue;
-
- sbcmtd[i]->owner = THIS_MODULE;
-
- /* No partitioning detected. Use default */
- if (i == 2) {
- defparts = NULL;
- nr_parts = 0;
- } else if (i == bigflash) {
- defparts = bigflash_parts;
- nr_parts = ARRAY_SIZE(bigflash_parts);
- } else {
- defparts = smallflash_parts;
- nr_parts = ARRAY_SIZE(smallflash_parts);
- }
-
- mtd_device_parse_register(sbcmtd[i], part_probes, NULL,
- defparts, nr_parts);
- }
- return 0;
-}
-
-static void __exit cleanup_sbc82xx_flash(void)
-{
- int i;
-
- for (i=0; i<3; i++) {
- if (!sbcmtd[i])
- continue;
-
- mtd_device_unregister(sbcmtd[i]);
-
- map_destroy(sbcmtd[i]);
-
- iounmap((void *)sbc82xx_flash_map[i].virt);
- sbc82xx_flash_map[i].virt = 0;
- }
-}
-
-module_init(init_sbc82xx_flash);
-module_exit(cleanup_sbc82xx_flash);
-
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
-MODULE_DESCRIPTION("Flash map driver for WindRiver PowerQUICC II");
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index a6e7451..82c0616 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -1162,7 +1162,11 @@ static int mtdchar_mmap(struct file *file, struct vm_area_struct *vma)
resource_size_t start, off;
unsigned long len, vma_len;
- if (mtd->type == MTD_RAM || mtd->type == MTD_ROM) {
+ /* This is broken because it assumes the MTD device is map-based
+ and that mtd->priv is a valid struct map_info. It should be
+ replaced with something that uses the mtd_get_unmapped_area()
+ operation properly. */
+ if (0 /*mtd->type == MTD_RAM || mtd->type == MTD_ROM*/) {
off = get_vm_offset(vma);
start = map->phys;
len = PAGE_ALIGN((start & ~PAGE_MASK) + map->size);
@@ -1182,7 +1186,7 @@ static int mtdchar_mmap(struct file *file, struct vm_area_struct *vma)
return -EINVAL;
if (set_vm_offset(vma, off) < 0)
return -EINVAL;
- vma->vm_flags |= VM_IO | VM_RESERVED;
+ vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
#ifdef pgprot_noncached
if (file->f_flags & O_DSYNC || off >= __pa(high_memory))
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 5757307..374c46d 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -858,6 +858,27 @@ int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
}
EXPORT_SYMBOL_GPL(mtd_panic_write);
+int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
+{
+ int ret_code;
+ ops->retlen = ops->oobretlen = 0;
+ if (!mtd->_read_oob)
+ return -EOPNOTSUPP;
+ /*
+ * In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics
+ * similar to mtd->_read(), returning a non-negative integer
+ * representing max bitflips. In other cases, mtd->_read_oob() may
+ * return -EUCLEAN. In all cases, perform similar logic to mtd_read().
+ */
+ ret_code = mtd->_read_oob(mtd, from, ops);
+ if (unlikely(ret_code < 0))
+ return ret_code;
+ if (mtd->ecc_strength == 0)
+ return 0; /* device lacks ecc */
+ return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
+}
+EXPORT_SYMBOL_GPL(mtd_read_oob);
+
/*
* Method to access the protection register area, present in some flash
* devices. The user data is one time programmable but the factory data is read
@@ -1056,8 +1077,7 @@ EXPORT_SYMBOL_GPL(mtd_writev);
* until the request succeeds or until the allocation size falls below
* the system page size. This attempts to make sure it does not adversely
* impact system performance, so when allocating more than one page, we
- * ask the memory allocator to avoid re-trying, swapping, writing back
- * or performing I/O.
+ * ask the memory allocator to avoid re-trying.
*
* Note, this function also makes sure that the allocated buffer is aligned to
* the MTD device's min. I/O unit, i.e. the "mtd->writesize" value.
@@ -1071,8 +1091,7 @@ EXPORT_SYMBOL_GPL(mtd_writev);
*/
void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
{
- gfp_t flags = __GFP_NOWARN | __GFP_WAIT |
- __GFP_NORETRY | __GFP_NO_KSWAPD;
+ gfp_t flags = __GFP_NOWARN | __GFP_WAIT | __GFP_NORETRY;
size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE);
void *kbuf;
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index 438737a..f5b3f91 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -169,14 +169,7 @@ static void mtdoops_workfunc_erase(struct work_struct *work)
cxt->nextpage = 0;
}
- while (1) {
- ret = mtd_block_isbad(mtd, cxt->nextpage * record_size);
- if (!ret)
- break;
- if (ret < 0) {
- printk(KERN_ERR "mtdoops: block_isbad failed, aborting\n");
- return;
- }
+ while ((ret = mtd_block_isbad(mtd, cxt->nextpage * record_size)) > 0) {
badblock:
printk(KERN_WARNING "mtdoops: bad block at %08lx\n",
cxt->nextpage * record_size);
@@ -190,6 +183,11 @@ badblock:
}
}
+ if (ret < 0) {
+ printk(KERN_ERR "mtdoops: mtd_block_isbad failed, aborting\n");
+ return;
+ }
+
for (j = 0, ret = -1; (j < 3) && (ret < 0); j++)
ret = mtdoops_erase_block(cxt, cxt->nextpage * record_size);
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 3a49e6d..70fa70a 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -711,6 +711,8 @@ static const char *default_mtd_part_types[] = {
* partition parsers, specified in @types. However, if @types is %NULL, then
* the default list of parsers is used. The default list contains only the
* "cmdlinepart" and "ofpart" parsers ATM.
+ * Note: If there are more then one parser in @types, the kernel only takes the
+ * partitions parsed out by the first parser.
*
* This function may return:
* o a negative error code in case of failure
@@ -735,11 +737,12 @@ int parse_mtd_partitions(struct mtd_info *master, const char **types,
if (!parser)
continue;
ret = (*parser->parse_fn)(master, pparts, data);
+ put_partition_parser(parser);
if (ret > 0) {
printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n",
ret, parser->name, master->name);
+ break;
}
- put_partition_parser(parser);
}
return ret;
}
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 598cd0a..4883139 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -22,15 +22,6 @@ menuconfig MTD_NAND
if MTD_NAND
-config MTD_NAND_VERIFY_WRITE
- bool "Verify NAND page writes"
- help
- This adds an extra check when data is written to the flash. The
- NAND flash device internally checks only bits transitioning
- from 1 to 0. There is a rare possibility that even though the
- device thinks the write was successful, a bit could have been
- flipped accidentally due to device wear or something else.
-
config MTD_NAND_BCH
tristate
select BCH
@@ -267,22 +258,6 @@ config MTD_NAND_S3C2410_CLKSTOP
when the is NAND chip selected or released, but will save
approximately 5mA of power when there is nothing happening.
-config MTD_NAND_BCM_UMI
- tristate "NAND Flash support for BCM Reference Boards"
- depends on ARCH_BCMRING
- help
- This enables the NAND flash controller on the BCM UMI block.
-
- No board specific support is done by this driver, each board
- must advertise a platform_device for the driver to attach.
-
-config MTD_NAND_BCM_UMI_HWCS
- bool "BCM UMI NAND Hardware CS"
- depends on MTD_NAND_BCM_UMI
- help
- Enable the use of the BCM UMI block's internal CS using NAND.
- This should only be used if you know the external NAND CS can toggle.
-
config MTD_NAND_DISKONCHIP
tristate "DiskOnChip 2000, Millennium and Millennium Plus (NAND reimplementation) (EXPERIMENTAL)"
depends on EXPERIMENTAL
@@ -356,7 +331,7 @@ config MTD_NAND_DISKONCHIP_BBTWRITE
config MTD_NAND_DOCG4
tristate "Support for DiskOnChip G4 (EXPERIMENTAL)"
- depends on EXPERIMENTAL
+ depends on EXPERIMENTAL && HAS_IOMEM
select BCH
select BITREVERSE
help
@@ -414,6 +389,28 @@ config MTD_NAND_PXA3xx
This enables the driver for the NAND flash device found on
PXA3xx processors
+config MTD_NAND_SLC_LPC32XX
+ tristate "NXP LPC32xx SLC Controller"
+ depends on ARCH_LPC32XX
+ help
+ Enables support for NXP's LPC32XX SLC (i.e. for Single Level Cell
+ chips) NAND controller. This is the default for the PHYTEC 3250
+ reference board which contains a NAND256R3A2CZA6 chip.
+
+ Please check the actual NAND chip connected and its support
+ by the SLC NAND controller.
+
+config MTD_NAND_MLC_LPC32XX
+ tristate "NXP LPC32xx MLC Controller"
+ depends on ARCH_LPC32XX
+ help
+ Uses the LPC32XX MLC (i.e. for Multi Level Cell chips) NAND
+ controller. This is the default for the WORK92105 controller
+ board.
+
+ Please check the actual NAND chip connected and its support
+ by the MLC NAND controller.
+
config MTD_NAND_CM_X270
tristate "Support for NAND Flash on CM-X270 modules"
depends on MACH_ARMCORE
@@ -439,10 +436,10 @@ config MTD_NAND_NANDSIM
MTD nand layer.
config MTD_NAND_GPMI_NAND
- bool "GPMI NAND Flash Controller driver"
+ tristate "GPMI NAND Flash Controller driver"
depends on MTD_NAND && MXS_DMA
help
- Enables NAND Flash support for IMX23 or IMX28.
+ Enables NAND Flash support for IMX23, IMX28 or IMX6.
The GPMI controller is very powerful, with the help of BCH
module, it can do the hardware ECC. The GPMI supports several
NAND flashs at the same time. The GPMI may conflicts with other
@@ -510,7 +507,7 @@ config MTD_NAND_MPC5121_NFC
config MTD_NAND_MXC
tristate "MXC NAND support"
- depends on IMX_HAVE_PLATFORM_MXC_NAND
+ depends on ARCH_MXC
help
This enables the driver for the NAND flash controller on the
MXC processors.
@@ -567,4 +564,12 @@ config MTD_NAND_FSMC
Enables support for NAND Flash chips on the ST Microelectronics
Flexible Static Memory Controller (FSMC)
+config MTD_NAND_XWAY
+ tristate "Support for NAND on Lantiq XWAY SoC"
+ depends on LANTIQ && SOC_TYPE_XWAY
+ select MTD_NAND_PLATFORM
+ help
+ Enables support for NAND Flash chips on Lantiq XWAY SoCs. NAND is attached
+ to the External Bus Unit (EBU).
+
endif # MTD_NAND
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index d4b4d87..2cbd091 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -40,16 +40,18 @@ obj-$(CONFIG_MTD_NAND_ORION) += orion_nand.o
obj-$(CONFIG_MTD_NAND_FSL_ELBC) += fsl_elbc_nand.o
obj-$(CONFIG_MTD_NAND_FSL_IFC) += fsl_ifc_nand.o
obj-$(CONFIG_MTD_NAND_FSL_UPM) += fsl_upm.o
+obj-$(CONFIG_MTD_NAND_SLC_LPC32XX) += lpc32xx_slc.o
+obj-$(CONFIG_MTD_NAND_MLC_LPC32XX) += lpc32xx_mlc.o
obj-$(CONFIG_MTD_NAND_SH_FLCTL) += sh_flctl.o
obj-$(CONFIG_MTD_NAND_MXC) += mxc_nand.o
obj-$(CONFIG_MTD_NAND_SOCRATES) += socrates_nand.o
obj-$(CONFIG_MTD_NAND_TXX9NDFMC) += txx9ndfmc.o
obj-$(CONFIG_MTD_NAND_NUC900) += nuc900_nand.o
obj-$(CONFIG_MTD_NAND_NOMADIK) += nomadik_nand.o
-obj-$(CONFIG_MTD_NAND_BCM_UMI) += bcm_umi_nand.o nand_bcm_umi.o
obj-$(CONFIG_MTD_NAND_MPC5121_NFC) += mpc5121_nfc.o
obj-$(CONFIG_MTD_NAND_RICOH) += r852.o
obj-$(CONFIG_MTD_NAND_JZ4740) += jz4740_nand.o
obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi-nand/
+obj-$(CONFIG_MTD_NAND_XWAY) += xway_nand.o
nand-objs := nand_base.o nand_bbt.o
diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c
index a7040af..9e7723a 100644
--- a/drivers/mtd/nand/ams-delta.c
+++ b/drivers/mtd/nand/ams-delta.c
@@ -107,18 +107,6 @@ static void ams_delta_read_buf(struct mtd_info *mtd, u_char *buf, int len)
buf[i] = ams_delta_read_byte(mtd);
}
-static int ams_delta_verify_buf(struct mtd_info *mtd, const u_char *buf,
- int len)
-{
- int i;
-
- for (i=0; i<len; i++)
- if (buf[i] != ams_delta_read_byte(mtd))
- return -EFAULT;
-
- return 0;
-}
-
/*
* Command control function
*
@@ -237,7 +225,6 @@ static int __devinit ams_delta_init(struct platform_device *pdev)
this->read_byte = ams_delta_read_byte;
this->write_buf = ams_delta_write_buf;
this->read_buf = ams_delta_read_buf;
- this->verify_buf = ams_delta_verify_buf;
this->cmd_ctrl = ams_delta_hwcontrol;
if (gpio_request(AMS_DELTA_GPIO_PIN_NAND_RB, "nand_rdy") == 0) {
this->dev_ready = ams_delta_nand_ready;
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 97ac671..9144557 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -1,20 +1,22 @@
/*
- * Copyright (C) 2003 Rick Bronson
+ * Copyright © 2003 Rick Bronson
*
* Derived from drivers/mtd/nand/autcpu12.c
- * Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de)
+ * Copyright © 2001 Thomas Gleixner (gleixner@autronix.de)
*
* Derived from drivers/mtd/spia.c
- * Copyright (C) 2000 Steven J. Hill (sjhill@cotw.com)
+ * Copyright © 2000 Steven J. Hill (sjhill@cotw.com)
*
*
* Add Hardware ECC support for AT91SAM9260 / AT91SAM9263
- * Richard Genoud (richard.genoud@gmail.com), Adeneo Copyright (C) 2007
+ * Richard Genoud (richard.genoud@gmail.com), Adeneo Copyright © 2007
*
* Derived from Das U-Boot source code
* (u-boot-1.1.5/board/atmel/at91sam9263ek/nand.c)
- * (C) Copyright 2006 ATMEL Rousset, Lacressonniere Nicolas
+ * © Copyright 2006 ATMEL Rousset, Lacressonniere Nicolas
*
+ * Add Programmable Multibit ECC support for various AT91 SoC
+ * © Copyright 2012 ATMEL, Hong Xu
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -93,8 +95,36 @@ struct atmel_nand_host {
struct completion comp;
struct dma_chan *dma_chan;
+
+ bool has_pmecc;
+ u8 pmecc_corr_cap;
+ u16 pmecc_sector_size;
+ u32 pmecc_lookup_table_offset;
+
+ int pmecc_bytes_per_sector;
+ int pmecc_sector_number;
+ int pmecc_degree; /* Degree of remainders */
+ int pmecc_cw_len; /* Length of codeword */
+
+ void __iomem *pmerrloc_base;
+ void __iomem *pmecc_rom_base;
+
+ /* lookup table for alpha_to and index_of */
+ void __iomem *pmecc_alpha_to;
+ void __iomem *pmecc_index_of;
+
+ /* data for pmecc computation */
+ int16_t *pmecc_partial_syn;
+ int16_t *pmecc_si;
+ int16_t *pmecc_smu; /* Sigma table */
+ int16_t *pmecc_lmu; /* polynomal order */
+ int *pmecc_mu;
+ int *pmecc_dmu;
+ int *pmecc_delta;
};
+static struct nand_ecclayout atmel_pmecc_oobinfo;
+
static int cpu_has_dma(void)
{
return cpu_is_at91sam9rl() || cpu_is_at91sam9g45();
@@ -288,6 +318,703 @@ static void atmel_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
}
/*
+ * Return number of ecc bytes per sector according to sector size and
+ * correction capability
+ *
+ * Following table shows what at91 PMECC supported:
+ * Correction Capability Sector_512_bytes Sector_1024_bytes
+ * ===================== ================ =================
+ * 2-bits 4-bytes 4-bytes
+ * 4-bits 7-bytes 7-bytes
+ * 8-bits 13-bytes 14-bytes
+ * 12-bits 20-bytes 21-bytes
+ * 24-bits 39-bytes 42-bytes
+ */
+static int __devinit pmecc_get_ecc_bytes(int cap, int sector_size)
+{
+ int m = 12 + sector_size / 512;
+ return (m * cap + 7) / 8;
+}
+
+static void __devinit pmecc_config_ecc_layout(struct nand_ecclayout *layout,
+ int oobsize, int ecc_len)
+{
+ int i;
+
+ layout->eccbytes = ecc_len;
+
+ /* ECC will occupy the last ecc_len bytes continuously */
+ for (i = 0; i < ecc_len; i++)
+ layout->eccpos[i] = oobsize - ecc_len + i;
+
+ layout->oobfree[0].offset = 2;
+ layout->oobfree[0].length =
+ oobsize - ecc_len - layout->oobfree[0].offset;
+}
+
+static void __devinit __iomem *pmecc_get_alpha_to(struct atmel_nand_host *host)
+{
+ int table_size;
+
+ table_size = host->pmecc_sector_size == 512 ?
+ PMECC_LOOKUP_TABLE_SIZE_512 : PMECC_LOOKUP_TABLE_SIZE_1024;
+
+ return host->pmecc_rom_base + host->pmecc_lookup_table_offset +
+ table_size * sizeof(int16_t);
+}
+
+static void pmecc_data_free(struct atmel_nand_host *host)
+{
+ kfree(host->pmecc_partial_syn);
+ kfree(host->pmecc_si);
+ kfree(host->pmecc_lmu);
+ kfree(host->pmecc_smu);
+ kfree(host->pmecc_mu);
+ kfree(host->pmecc_dmu);
+ kfree(host->pmecc_delta);
+}
+
+static int __devinit pmecc_data_alloc(struct atmel_nand_host *host)
+{
+ const int cap = host->pmecc_corr_cap;
+
+ host->pmecc_partial_syn = kzalloc((2 * cap + 1) * sizeof(int16_t),
+ GFP_KERNEL);
+ host->pmecc_si = kzalloc((2 * cap + 1) * sizeof(int16_t), GFP_KERNEL);
+ host->pmecc_lmu = kzalloc((cap + 1) * sizeof(int16_t), GFP_KERNEL);
+ host->pmecc_smu = kzalloc((cap + 2) * (2 * cap + 1) * sizeof(int16_t),
+ GFP_KERNEL);
+ host->pmecc_mu = kzalloc((cap + 1) * sizeof(int), GFP_KERNEL);
+ host->pmecc_dmu = kzalloc((cap + 1) * sizeof(int), GFP_KERNEL);
+ host->pmecc_delta = kzalloc((cap + 1) * sizeof(int), GFP_KERNEL);
+
+ if (host->pmecc_partial_syn &&
+ host->pmecc_si &&
+ host->pmecc_lmu &&
+ host->pmecc_smu &&
+ host->pmecc_mu &&
+ host->pmecc_dmu &&
+ host->pmecc_delta)
+ return 0;
+
+ /* error happened */
+ pmecc_data_free(host);
+ return -ENOMEM;
+}
+
+static void pmecc_gen_syndrome(struct mtd_info *mtd, int sector)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct atmel_nand_host *host = nand_chip->priv;
+ int i;
+ uint32_t value;
+
+ /* Fill odd syndromes */
+ for (i = 0; i < host->pmecc_corr_cap; i++) {
+ value = pmecc_readl_rem_relaxed(host->ecc, sector, i / 2);
+ if (i & 1)
+ value >>= 16;
+ value &= 0xffff;
+ host->pmecc_partial_syn[(2 * i) + 1] = (int16_t)value;
+ }
+}
+
+static void pmecc_substitute(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct atmel_nand_host *host = nand_chip->priv;
+ int16_t __iomem *alpha_to = host->pmecc_alpha_to;
+ int16_t __iomem *index_of = host->pmecc_index_of;
+ int16_t *partial_syn = host->pmecc_partial_syn;
+ const int cap = host->pmecc_corr_cap;
+ int16_t *si;
+ int i, j;
+
+ /* si[] is a table that holds the current syndrome value,
+ * an element of that table belongs to the field
+ */
+ si = host->pmecc_si;
+
+ memset(&si[1], 0, sizeof(int16_t) * (2 * cap - 1));
+
+ /* Computation 2t syndromes based on S(x) */
+ /* Odd syndromes */
+ for (i = 1; i < 2 * cap; i += 2) {
+ for (j = 0; j < host->pmecc_degree; j++) {
+ if (partial_syn[i] & ((unsigned short)0x1 << j))
+ si[i] = readw_relaxed(alpha_to + i * j) ^ si[i];
+ }
+ }
+ /* Even syndrome = (Odd syndrome) ** 2 */
+ for (i = 2, j = 1; j <= cap; i = ++j << 1) {
+ if (si[j] == 0) {
+ si[i] = 0;
+ } else {
+ int16_t tmp;
+
+ tmp = readw_relaxed(index_of + si[j]);
+ tmp = (tmp * 2) % host->pmecc_cw_len;
+ si[i] = readw_relaxed(alpha_to + tmp);
+ }
+ }
+
+ return;
+}
+
+static void pmecc_get_sigma(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct atmel_nand_host *host = nand_chip->priv;
+
+ int16_t *lmu = host->pmecc_lmu;
+ int16_t *si = host->pmecc_si;
+ int *mu = host->pmecc_mu;
+ int *dmu = host->pmecc_dmu; /* Discrepancy */
+ int *delta = host->pmecc_delta; /* Delta order */
+ int cw_len = host->pmecc_cw_len;
+ const int16_t cap = host->pmecc_corr_cap;
+ const int num = 2 * cap + 1;
+ int16_t __iomem *index_of = host->pmecc_index_of;
+ int16_t __iomem *alpha_to = host->pmecc_alpha_to;
+ int i, j, k;
+ uint32_t dmu_0_count, tmp;
+ int16_t *smu = host->pmecc_smu;
+
+ /* index of largest delta */
+ int ro;
+ int largest;
+ int diff;
+
+ dmu_0_count = 0;
+
+ /* First Row */
+
+ /* Mu */
+ mu[0] = -1;
+
+ memset(smu, 0, sizeof(int16_t) * num);
+ smu[0] = 1;
+
+ /* discrepancy set to 1 */
+ dmu[0] = 1;
+ /* polynom order set to 0 */
+ lmu[0] = 0;
+ delta[0] = (mu[0] * 2 - lmu[0]) >> 1;
+
+ /* Second Row */
+
+ /* Mu */
+ mu[1] = 0;
+ /* Sigma(x) set to 1 */
+ memset(&smu[num], 0, sizeof(int16_t) * num);
+ smu[num] = 1;
+
+ /* discrepancy set to S1 */
+ dmu[1] = si[1];
+
+ /* polynom order set to 0 */
+ lmu[1] = 0;
+
+ delta[1] = (mu[1] * 2 - lmu[1]) >> 1;
+
+ /* Init the Sigma(x) last row */
+ memset(&smu[(cap + 1) * num], 0, sizeof(int16_t) * num);
+
+ for (i = 1; i <= cap; i++) {
+ mu[i + 1] = i << 1;
+ /* Begin Computing Sigma (Mu+1) and L(mu) */
+ /* check if discrepancy is set to 0 */
+ if (dmu[i] == 0) {
+ dmu_0_count++;
+
+ tmp = ((cap - (lmu[i] >> 1) - 1) / 2);
+ if ((cap - (lmu[i] >> 1) - 1) & 0x1)
+ tmp += 2;
+ else
+ tmp += 1;
+
+ if (dmu_0_count == tmp) {
+ for (j = 0; j <= (lmu[i] >> 1) + 1; j++)
+ smu[(cap + 1) * num + j] =
+ smu[i * num + j];
+
+ lmu[cap + 1] = lmu[i];
+ return;
+ }
+
+ /* copy polynom */
+ for (j = 0; j <= lmu[i] >> 1; j++)
+ smu[(i + 1) * num + j] = smu[i * num + j];
+
+ /* copy previous polynom order to the next */
+ lmu[i + 1] = lmu[i];
+ } else {
+ ro = 0;
+ largest = -1;
+ /* find largest delta with dmu != 0 */
+ for (j = 0; j < i; j++) {
+ if ((dmu[j]) && (delta[j] > largest)) {
+ largest = delta[j];
+ ro = j;
+ }
+ }
+
+ /* compute difference */
+ diff = (mu[i] - mu[ro]);
+
+ /* Compute degree of the new smu polynomial */
+ if ((lmu[i] >> 1) > ((lmu[ro] >> 1) + diff))
+ lmu[i + 1] = lmu[i];
+ else
+ lmu[i + 1] = ((lmu[ro] >> 1) + diff) * 2;
+
+ /* Init smu[i+1] with 0 */
+ for (k = 0; k < num; k++)
+ smu[(i + 1) * num + k] = 0;
+
+ /* Compute smu[i+1] */
+ for (k = 0; k <= lmu[ro] >> 1; k++) {
+ int16_t a, b, c;
+
+ if (!(smu[ro * num + k] && dmu[i]))
+ continue;
+ a = readw_relaxed(index_of + dmu[i]);
+ b = readw_relaxed(index_of + dmu[ro]);
+ c = readw_relaxed(index_of + smu[ro * num + k]);
+ tmp = a + (cw_len - b) + c;
+ a = readw_relaxed(alpha_to + tmp % cw_len);
+ smu[(i + 1) * num + (k + diff)] = a;
+ }
+
+ for (k = 0; k <= lmu[i] >> 1; k++)
+ smu[(i + 1) * num + k] ^= smu[i * num + k];
+ }
+
+ /* End Computing Sigma (Mu+1) and L(mu) */
+ /* In either case compute delta */
+ delta[i + 1] = (mu[i + 1] * 2 - lmu[i + 1]) >> 1;
+
+ /* Do not compute discrepancy for the last iteration */
+ if (i >= cap)
+ continue;
+
+ for (k = 0; k <= (lmu[i + 1] >> 1); k++) {
+ tmp = 2 * (i - 1);
+ if (k == 0) {
+ dmu[i + 1] = si[tmp + 3];
+ } else if (smu[(i + 1) * num + k] && si[tmp + 3 - k]) {
+ int16_t a, b, c;
+ a = readw_relaxed(index_of +
+ smu[(i + 1) * num + k]);
+ b = si[2 * (i - 1) + 3 - k];
+ c = readw_relaxed(index_of + b);
+ tmp = a + c;
+ tmp %= cw_len;
+ dmu[i + 1] = readw_relaxed(alpha_to + tmp) ^
+ dmu[i + 1];
+ }
+ }
+ }
+
+ return;
+}
+
+static int pmecc_err_location(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct atmel_nand_host *host = nand_chip->priv;
+ unsigned long end_time;
+ const int cap = host->pmecc_corr_cap;
+ const int num = 2 * cap + 1;
+ int sector_size = host->pmecc_sector_size;
+ int err_nbr = 0; /* number of error */
+ int roots_nbr; /* number of roots */
+ int i;
+ uint32_t val;
+ int16_t *smu = host->pmecc_smu;
+
+ pmerrloc_writel(host->pmerrloc_base, ELDIS, PMERRLOC_DISABLE);
+
+ for (i = 0; i <= host->pmecc_lmu[cap + 1] >> 1; i++) {
+ pmerrloc_writel_sigma_relaxed(host->pmerrloc_base, i,
+ smu[(cap + 1) * num + i]);
+ err_nbr++;
+ }
+
+ val = (err_nbr - 1) << 16;
+ if (sector_size == 1024)
+ val |= 1;
+
+ pmerrloc_writel(host->pmerrloc_base, ELCFG, val);
+ pmerrloc_writel(host->pmerrloc_base, ELEN,
+ sector_size * 8 + host->pmecc_degree * cap);
+
+ end_time = jiffies + msecs_to_jiffies(PMECC_MAX_TIMEOUT_MS);
+ while (!(pmerrloc_readl_relaxed(host->pmerrloc_base, ELISR)
+ & PMERRLOC_CALC_DONE)) {
+ if (unlikely(time_after(jiffies, end_time))) {
+ dev_err(host->dev, "PMECC: Timeout to calculate error location.\n");
+ return -1;
+ }
+ cpu_relax();
+ }
+
+ roots_nbr = (pmerrloc_readl_relaxed(host->pmerrloc_base, ELISR)
+ & PMERRLOC_ERR_NUM_MASK) >> 8;
+ /* Number of roots == degree of smu hence <= cap */
+ if (roots_nbr == host->pmecc_lmu[cap + 1] >> 1)
+ return err_nbr - 1;
+
+ /* Number of roots does not match the degree of smu
+ * unable to correct error */
+ return -1;
+}
+
+static void pmecc_correct_data(struct mtd_info *mtd, uint8_t *buf, uint8_t *ecc,
+ int sector_num, int extra_bytes, int err_nbr)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct atmel_nand_host *host = nand_chip->priv;
+ int i = 0;
+ int byte_pos, bit_pos, sector_size, pos;
+ uint32_t tmp;
+ uint8_t err_byte;
+
+ sector_size = host->pmecc_sector_size;
+
+ while (err_nbr) {
+ tmp = pmerrloc_readl_el_relaxed(host->pmerrloc_base, i) - 1;
+ byte_pos = tmp / 8;
+ bit_pos = tmp % 8;
+
+ if (byte_pos >= (sector_size + extra_bytes))
+ BUG(); /* should never happen */
+
+ if (byte_pos < sector_size) {
+ err_byte = *(buf + byte_pos);
+ *(buf + byte_pos) ^= (1 << bit_pos);
+
+ pos = sector_num * host->pmecc_sector_size + byte_pos;
+ dev_info(host->dev, "Bit flip in data area, byte_pos: %d, bit_pos: %d, 0x%02x -> 0x%02x\n",
+ pos, bit_pos, err_byte, *(buf + byte_pos));
+ } else {
+ /* Bit flip in OOB area */
+ tmp = sector_num * host->pmecc_bytes_per_sector
+ + (byte_pos - sector_size);
+ err_byte = ecc[tmp];
+ ecc[tmp] ^= (1 << bit_pos);
+
+ pos = tmp + nand_chip->ecc.layout->eccpos[0];
+ dev_info(host->dev, "Bit flip in OOB, oob_byte_pos: %d, bit_pos: %d, 0x%02x -> 0x%02x\n",
+ pos, bit_pos, err_byte, ecc[tmp]);
+ }
+
+ i++;
+ err_nbr--;
+ }
+
+ return;
+}
+
+static int pmecc_correction(struct mtd_info *mtd, u32 pmecc_stat, uint8_t *buf,
+ u8 *ecc)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct atmel_nand_host *host = nand_chip->priv;
+ int i, err_nbr, eccbytes;
+ uint8_t *buf_pos;
+
+ eccbytes = nand_chip->ecc.bytes;
+ for (i = 0; i < eccbytes; i++)
+ if (ecc[i] != 0xff)
+ goto normal_check;
+ /* Erased page, return OK */
+ return 0;
+
+normal_check:
+ for (i = 0; i < host->pmecc_sector_number; i++) {
+ err_nbr = 0;
+ if (pmecc_stat & 0x1) {
+ buf_pos = buf + i * host->pmecc_sector_size;
+
+ pmecc_gen_syndrome(mtd, i);
+ pmecc_substitute(mtd);
+ pmecc_get_sigma(mtd);
+
+ err_nbr = pmecc_err_location(mtd);
+ if (err_nbr == -1) {
+ dev_err(host->dev, "PMECC: Too many errors\n");
+ mtd->ecc_stats.failed++;
+ return -EIO;
+ } else {
+ pmecc_correct_data(mtd, buf_pos, ecc, i,
+ host->pmecc_bytes_per_sector, err_nbr);
+ mtd->ecc_stats.corrected += err_nbr;
+ }
+ }
+ pmecc_stat >>= 1;
+ }
+
+ return 0;
+}
+
+static int atmel_nand_pmecc_read_page(struct mtd_info *mtd,
+ struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
+{
+ struct atmel_nand_host *host = chip->priv;
+ int eccsize = chip->ecc.size;
+ uint8_t *oob = chip->oob_poi;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+ uint32_t stat;
+ unsigned long end_time;
+
+ pmecc_writel(host->ecc, CTRL, PMECC_CTRL_RST);
+ pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE);
+ pmecc_writel(host->ecc, CFG, (pmecc_readl_relaxed(host->ecc, CFG)
+ & ~PMECC_CFG_WRITE_OP) | PMECC_CFG_AUTO_ENABLE);
+
+ pmecc_writel(host->ecc, CTRL, PMECC_CTRL_ENABLE);
+ pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DATA);
+
+ chip->read_buf(mtd, buf, eccsize);
+ chip->read_buf(mtd, oob, mtd->oobsize);
+
+ end_time = jiffies + msecs_to_jiffies(PMECC_MAX_TIMEOUT_MS);
+ while ((pmecc_readl_relaxed(host->ecc, SR) & PMECC_SR_BUSY)) {
+ if (unlikely(time_after(jiffies, end_time))) {
+ dev_err(host->dev, "PMECC: Timeout to get error status.\n");
+ return -EIO;
+ }
+ cpu_relax();
+ }
+
+ stat = pmecc_readl_relaxed(host->ecc, ISR);
+ if (stat != 0)
+ if (pmecc_correction(mtd, stat, buf, &oob[eccpos[0]]) != 0)
+ return -EIO;
+
+ return 0;
+}
+
+static int atmel_nand_pmecc_write_page(struct mtd_info *mtd,
+ struct nand_chip *chip, const uint8_t *buf, int oob_required)
+{
+ struct atmel_nand_host *host = chip->priv;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+ int i, j;
+ unsigned long end_time;
+
+ pmecc_writel(host->ecc, CTRL, PMECC_CTRL_RST);
+ pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE);
+
+ pmecc_writel(host->ecc, CFG, (pmecc_readl_relaxed(host->ecc, CFG) |
+ PMECC_CFG_WRITE_OP) & ~PMECC_CFG_AUTO_ENABLE);
+
+ pmecc_writel(host->ecc, CTRL, PMECC_CTRL_ENABLE);
+ pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DATA);
+
+ chip->write_buf(mtd, (u8 *)buf, mtd->writesize);
+
+ end_time = jiffies + msecs_to_jiffies(PMECC_MAX_TIMEOUT_MS);
+ while ((pmecc_readl_relaxed(host->ecc, SR) & PMECC_SR_BUSY)) {
+ if (unlikely(time_after(jiffies, end_time))) {
+ dev_err(host->dev, "PMECC: Timeout to get ECC value.\n");
+ return -EIO;
+ }
+ cpu_relax();
+ }
+
+ for (i = 0; i < host->pmecc_sector_number; i++) {
+ for (j = 0; j < host->pmecc_bytes_per_sector; j++) {
+ int pos;
+
+ pos = i * host->pmecc_bytes_per_sector + j;
+ chip->oob_poi[eccpos[pos]] =
+ pmecc_readb_ecc_relaxed(host->ecc, i, j);
+ }
+ }
+ chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ return 0;
+}
+
+static void atmel_pmecc_core_init(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct atmel_nand_host *host = nand_chip->priv;
+ uint32_t val = 0;
+ struct nand_ecclayout *ecc_layout;
+
+ pmecc_writel(host->ecc, CTRL, PMECC_CTRL_RST);
+ pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE);
+
+ switch (host->pmecc_corr_cap) {
+ case 2:
+ val = PMECC_CFG_BCH_ERR2;
+ break;
+ case 4:
+ val = PMECC_CFG_BCH_ERR4;
+ break;
+ case 8:
+ val = PMECC_CFG_BCH_ERR8;
+ break;
+ case 12:
+ val = PMECC_CFG_BCH_ERR12;
+ break;
+ case 24:
+ val = PMECC_CFG_BCH_ERR24;
+ break;
+ }
+
+ if (host->pmecc_sector_size == 512)
+ val |= PMECC_CFG_SECTOR512;
+ else if (host->pmecc_sector_size == 1024)
+ val |= PMECC_CFG_SECTOR1024;
+
+ switch (host->pmecc_sector_number) {
+ case 1:
+ val |= PMECC_CFG_PAGE_1SECTOR;
+ break;
+ case 2:
+ val |= PMECC_CFG_PAGE_2SECTORS;
+ break;
+ case 4:
+ val |= PMECC_CFG_PAGE_4SECTORS;
+ break;
+ case 8:
+ val |= PMECC_CFG_PAGE_8SECTORS;
+ break;
+ }
+
+ val |= (PMECC_CFG_READ_OP | PMECC_CFG_SPARE_DISABLE
+ | PMECC_CFG_AUTO_DISABLE);
+ pmecc_writel(host->ecc, CFG, val);
+
+ ecc_layout = nand_chip->ecc.layout;
+ pmecc_writel(host->ecc, SAREA, mtd->oobsize - 1);
+ pmecc_writel(host->ecc, SADDR, ecc_layout->eccpos[0]);
+ pmecc_writel(host->ecc, EADDR,
+ ecc_layout->eccpos[ecc_layout->eccbytes - 1]);
+ /* See datasheet about PMECC Clock Control Register */
+ pmecc_writel(host->ecc, CLK, 2);
+ pmecc_writel(host->ecc, IDR, 0xff);
+ pmecc_writel(host->ecc, CTRL, PMECC_CTRL_ENABLE);
+}
+
+static int __init atmel_pmecc_nand_init_params(struct platform_device *pdev,
+ struct atmel_nand_host *host)
+{
+ struct mtd_info *mtd = &host->mtd;
+ struct nand_chip *nand_chip = &host->nand_chip;
+ struct resource *regs, *regs_pmerr, *regs_rom;
+ int cap, sector_size, err_no;
+
+ cap = host->pmecc_corr_cap;
+ sector_size = host->pmecc_sector_size;
+ dev_info(host->dev, "Initialize PMECC params, cap: %d, sector: %d\n",
+ cap, sector_size);
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!regs) {
+ dev_warn(host->dev,
+ "Can't get I/O resource regs for PMECC controller, rolling back on software ECC\n");
+ nand_chip->ecc.mode = NAND_ECC_SOFT;
+ return 0;
+ }
+
+ host->ecc = ioremap(regs->start, resource_size(regs));
+ if (host->ecc == NULL) {
+ dev_err(host->dev, "ioremap failed\n");
+ err_no = -EIO;
+ goto err_pmecc_ioremap;
+ }
+
+ regs_pmerr = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ regs_rom = platform_get_resource(pdev, IORESOURCE_MEM, 3);
+ if (regs_pmerr && regs_rom) {
+ host->pmerrloc_base = ioremap(regs_pmerr->start,
+ resource_size(regs_pmerr));
+ host->pmecc_rom_base = ioremap(regs_rom->start,
+ resource_size(regs_rom));
+ }
+
+ if (!host->pmerrloc_base || !host->pmecc_rom_base) {
+ dev_err(host->dev,
+ "Can not get I/O resource for PMECC ERRLOC controller or ROM!\n");
+ err_no = -EIO;
+ goto err_pmloc_ioremap;
+ }
+
+ /* ECC is calculated for the whole page (1 step) */
+ nand_chip->ecc.size = mtd->writesize;
+
+ /* set ECC page size and oob layout */
+ switch (mtd->writesize) {
+ case 2048:
+ host->pmecc_degree = PMECC_GF_DIMENSION_13;
+ host->pmecc_cw_len = (1 << host->pmecc_degree) - 1;
+ host->pmecc_sector_number = mtd->writesize / sector_size;
+ host->pmecc_bytes_per_sector = pmecc_get_ecc_bytes(
+ cap, sector_size);
+ host->pmecc_alpha_to = pmecc_get_alpha_to(host);
+ host->pmecc_index_of = host->pmecc_rom_base +
+ host->pmecc_lookup_table_offset;
+
+ nand_chip->ecc.steps = 1;
+ nand_chip->ecc.strength = cap;
+ nand_chip->ecc.bytes = host->pmecc_bytes_per_sector *
+ host->pmecc_sector_number;
+ if (nand_chip->ecc.bytes > mtd->oobsize - 2) {
+ dev_err(host->dev, "No room for ECC bytes\n");
+ err_no = -EINVAL;
+ goto err_no_ecc_room;
+ }
+ pmecc_config_ecc_layout(&atmel_pmecc_oobinfo,
+ mtd->oobsize,
+ nand_chip->ecc.bytes);
+ nand_chip->ecc.layout = &atmel_pmecc_oobinfo;
+ break;
+ case 512:
+ case 1024:
+ case 4096:
+ /* TODO */
+ dev_warn(host->dev,
+ "Unsupported page size for PMECC, use Software ECC\n");
+ default:
+ /* page size not handled by HW ECC */
+ /* switching back to soft ECC */
+ nand_chip->ecc.mode = NAND_ECC_SOFT;
+ return 0;
+ }
+
+ /* Allocate data for PMECC computation */
+ err_no = pmecc_data_alloc(host);
+ if (err_no) {
+ dev_err(host->dev,
+ "Cannot allocate memory for PMECC computation!\n");
+ goto err_pmecc_data_alloc;
+ }
+
+ nand_chip->ecc.read_page = atmel_nand_pmecc_read_page;
+ nand_chip->ecc.write_page = atmel_nand_pmecc_write_page;
+
+ atmel_pmecc_core_init(mtd);
+
+ return 0;
+
+err_pmecc_data_alloc:
+err_no_ecc_room:
+err_pmloc_ioremap:
+ iounmap(host->ecc);
+ if (host->pmerrloc_base)
+ iounmap(host->pmerrloc_base);
+ if (host->pmecc_rom_base)
+ iounmap(host->pmecc_rom_base);
+err_pmecc_ioremap:
+ return err_no;
+}
+
+/*
* Calculate HW ECC
*
* function called after a write
@@ -481,7 +1208,8 @@ static void atmel_nand_hwctl(struct mtd_info *mtd, int mode)
static int __devinit atmel_of_init_port(struct atmel_nand_host *host,
struct device_node *np)
{
- u32 val;
+ u32 val, table_offset;
+ u32 offset[2];
int ecc_mode;
struct atmel_nand_data *board = &host->board;
enum of_gpio_flags flags;
@@ -517,6 +1245,50 @@ static int __devinit atmel_of_init_port(struct atmel_nand_host *host,
board->enable_pin = of_get_gpio(np, 1);
board->det_pin = of_get_gpio(np, 2);
+ host->has_pmecc = of_property_read_bool(np, "atmel,has-pmecc");
+
+ if (!(board->ecc_mode == NAND_ECC_HW) || !host->has_pmecc)
+ return 0; /* Not using PMECC */
+
+ /* use PMECC, get correction capability, sector size and lookup
+ * table offset.
+ */
+ if (of_property_read_u32(np, "atmel,pmecc-cap", &val) != 0) {
+ dev_err(host->dev, "Cannot decide PMECC Capability\n");
+ return -EINVAL;
+ } else if ((val != 2) && (val != 4) && (val != 8) && (val != 12) &&
+ (val != 24)) {
+ dev_err(host->dev,
+ "Unsupported PMECC correction capability: %d; should be 2, 4, 8, 12 or 24\n",
+ val);
+ return -EINVAL;
+ }
+ host->pmecc_corr_cap = (u8)val;
+
+ if (of_property_read_u32(np, "atmel,pmecc-sector-size", &val) != 0) {
+ dev_err(host->dev, "Cannot decide PMECC Sector Size\n");
+ return -EINVAL;
+ } else if ((val != 512) && (val != 1024)) {
+ dev_err(host->dev,
+ "Unsupported PMECC sector size: %d; should be 512 or 1024 bytes\n",
+ val);
+ return -EINVAL;
+ }
+ host->pmecc_sector_size = (u16)val;
+
+ if (of_property_read_u32_array(np, "atmel,pmecc-lookup-table-offset",
+ offset, 2) != 0) {
+ dev_err(host->dev, "Cannot get PMECC lookup table offset\n");
+ return -EINVAL;
+ }
+ table_offset = host->pmecc_sector_size == 512 ? offset[0] : offset[1];
+
+ if (!table_offset) {
+ dev_err(host->dev, "Invalid PMECC lookup table offset\n");
+ return -EINVAL;
+ }
+ host->pmecc_lookup_table_offset = table_offset;
+
return 0;
}
#else
@@ -527,6 +1299,66 @@ static int __devinit atmel_of_init_port(struct atmel_nand_host *host,
}
#endif
+static int __init atmel_hw_nand_init_params(struct platform_device *pdev,
+ struct atmel_nand_host *host)
+{
+ struct mtd_info *mtd = &host->mtd;
+ struct nand_chip *nand_chip = &host->nand_chip;
+ struct resource *regs;
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!regs) {
+ dev_err(host->dev,
+ "Can't get I/O resource regs, use software ECC\n");
+ nand_chip->ecc.mode = NAND_ECC_SOFT;
+ return 0;
+ }
+
+ host->ecc = ioremap(regs->start, resource_size(regs));
+ if (host->ecc == NULL) {
+ dev_err(host->dev, "ioremap failed\n");
+ return -EIO;
+ }
+
+ /* ECC is calculated for the whole page (1 step) */
+ nand_chip->ecc.size = mtd->writesize;
+
+ /* set ECC page size and oob layout */
+ switch (mtd->writesize) {
+ case 512:
+ nand_chip->ecc.layout = &atmel_oobinfo_small;
+ ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_528);
+ break;
+ case 1024:
+ nand_chip->ecc.layout = &atmel_oobinfo_large;
+ ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_1056);
+ break;
+ case 2048:
+ nand_chip->ecc.layout = &atmel_oobinfo_large;
+ ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_2112);
+ break;
+ case 4096:
+ nand_chip->ecc.layout = &atmel_oobinfo_large;
+ ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_4224);
+ break;
+ default:
+ /* page size not handled by HW ECC */
+ /* switching back to soft ECC */
+ nand_chip->ecc.mode = NAND_ECC_SOFT;
+ return 0;
+ }
+
+ /* set up for HW ECC */
+ nand_chip->ecc.calculate = atmel_nand_calculate;
+ nand_chip->ecc.correct = atmel_nand_correct;
+ nand_chip->ecc.hwctl = atmel_nand_hwctl;
+ nand_chip->ecc.read_page = atmel_nand_read_page;
+ nand_chip->ecc.bytes = 4;
+ nand_chip->ecc.strength = 1;
+
+ return 0;
+}
+
/*
* Probe for the NAND device.
*/
@@ -535,7 +1367,6 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
struct atmel_nand_host *host;
struct mtd_info *mtd;
struct nand_chip *nand_chip;
- struct resource *regs;
struct resource *mem;
struct mtd_part_parser_data ppdata = {};
int res;
@@ -568,7 +1399,7 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
if (pdev->dev.of_node) {
res = atmel_of_init_port(host, pdev->dev.of_node);
if (res)
- goto err_nand_ioremap;
+ goto err_ecc_ioremap;
} else {
memcpy(&host->board, pdev->dev.platform_data,
sizeof(struct atmel_nand_data));
@@ -583,33 +1414,45 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
nand_chip->IO_ADDR_W = host->io_base;
nand_chip->cmd_ctrl = atmel_nand_cmd_ctrl;
- if (gpio_is_valid(host->board.rdy_pin))
- nand_chip->dev_ready = atmel_nand_device_ready;
+ if (gpio_is_valid(host->board.rdy_pin)) {
+ res = gpio_request(host->board.rdy_pin, "nand_rdy");
+ if (res < 0) {
+ dev_err(&pdev->dev,
+ "can't request rdy gpio %d\n",
+ host->board.rdy_pin);
+ goto err_ecc_ioremap;
+ }
- nand_chip->ecc.mode = host->board.ecc_mode;
+ res = gpio_direction_input(host->board.rdy_pin);
+ if (res < 0) {
+ dev_err(&pdev->dev,
+ "can't request input direction rdy gpio %d\n",
+ host->board.rdy_pin);
+ goto err_ecc_ioremap;
+ }
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!regs && nand_chip->ecc.mode == NAND_ECC_HW) {
- printk(KERN_ERR "atmel_nand: can't get I/O resource "
- "regs\nFalling back on software ECC\n");
- nand_chip->ecc.mode = NAND_ECC_SOFT;
+ nand_chip->dev_ready = atmel_nand_device_ready;
}
- if (nand_chip->ecc.mode == NAND_ECC_HW) {
- host->ecc = ioremap(regs->start, resource_size(regs));
- if (host->ecc == NULL) {
- printk(KERN_ERR "atmel_nand: ioremap failed\n");
- res = -EIO;
+ if (gpio_is_valid(host->board.enable_pin)) {
+ res = gpio_request(host->board.enable_pin, "nand_enable");
+ if (res < 0) {
+ dev_err(&pdev->dev,
+ "can't request enable gpio %d\n",
+ host->board.enable_pin);
+ goto err_ecc_ioremap;
+ }
+
+ res = gpio_direction_output(host->board.enable_pin, 1);
+ if (res < 0) {
+ dev_err(&pdev->dev,
+ "can't request output direction enable gpio %d\n",
+ host->board.enable_pin);
goto err_ecc_ioremap;
}
- nand_chip->ecc.calculate = atmel_nand_calculate;
- nand_chip->ecc.correct = atmel_nand_correct;
- nand_chip->ecc.hwctl = atmel_nand_hwctl;
- nand_chip->ecc.read_page = atmel_nand_read_page;
- nand_chip->ecc.bytes = 4;
- nand_chip->ecc.strength = 1;
}
+ nand_chip->ecc.mode = host->board.ecc_mode;
nand_chip->chip_delay = 20; /* 20us command delay time */
if (host->board.bus_width_16) /* 16-bit bus width */
@@ -622,6 +1465,22 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
atmel_nand_enable(host);
if (gpio_is_valid(host->board.det_pin)) {
+ res = gpio_request(host->board.det_pin, "nand_det");
+ if (res < 0) {
+ dev_err(&pdev->dev,
+ "can't request det gpio %d\n",
+ host->board.det_pin);
+ goto err_no_card;
+ }
+
+ res = gpio_direction_input(host->board.det_pin);
+ if (res < 0) {
+ dev_err(&pdev->dev,
+ "can't request input direction det gpio %d\n",
+ host->board.det_pin);
+ goto err_no_card;
+ }
+
if (gpio_get_value(host->board.det_pin)) {
printk(KERN_INFO "No SmartMedia card inserted.\n");
res = -ENXIO;
@@ -661,40 +1520,13 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
}
if (nand_chip->ecc.mode == NAND_ECC_HW) {
- /* ECC is calculated for the whole page (1 step) */
- nand_chip->ecc.size = mtd->writesize;
-
- /* set ECC page size and oob layout */
- switch (mtd->writesize) {
- case 512:
- nand_chip->ecc.layout = &atmel_oobinfo_small;
- ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_528);
- break;
- case 1024:
- nand_chip->ecc.layout = &atmel_oobinfo_large;
- ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_1056);
- break;
- case 2048:
- nand_chip->ecc.layout = &atmel_oobinfo_large;
- ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_2112);
- break;
- case 4096:
- nand_chip->ecc.layout = &atmel_oobinfo_large;
- ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_4224);
- break;
- default:
- /* page size not handled by HW ECC */
- /* switching back to soft ECC */
- nand_chip->ecc.mode = NAND_ECC_SOFT;
- nand_chip->ecc.calculate = NULL;
- nand_chip->ecc.correct = NULL;
- nand_chip->ecc.hwctl = NULL;
- nand_chip->ecc.read_page = NULL;
- nand_chip->ecc.postpad = 0;
- nand_chip->ecc.prepad = 0;
- nand_chip->ecc.bytes = 0;
- break;
- }
+ if (host->has_pmecc)
+ res = atmel_pmecc_nand_init_params(pdev, host);
+ else
+ res = atmel_hw_nand_init_params(pdev, host);
+
+ if (res != 0)
+ goto err_hw_ecc;
}
/* second phase scan */
@@ -711,14 +1543,23 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
return res;
err_scan_tail:
+ if (host->has_pmecc && host->nand_chip.ecc.mode == NAND_ECC_HW) {
+ pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE);
+ pmecc_data_free(host);
+ }
+ if (host->ecc)
+ iounmap(host->ecc);
+ if (host->pmerrloc_base)
+ iounmap(host->pmerrloc_base);
+ if (host->pmecc_rom_base)
+ iounmap(host->pmecc_rom_base);
+err_hw_ecc:
err_scan_ident:
err_no_card:
atmel_nand_disable(host);
platform_set_drvdata(pdev, NULL);
if (host->dma_chan)
dma_release_channel(host->dma_chan);
- if (host->ecc)
- iounmap(host->ecc);
err_ecc_ioremap:
iounmap(host->io_base);
err_nand_ioremap:
@@ -738,8 +1579,28 @@ static int __exit atmel_nand_remove(struct platform_device *pdev)
atmel_nand_disable(host);
+ if (host->has_pmecc && host->nand_chip.ecc.mode == NAND_ECC_HW) {
+ pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE);
+ pmerrloc_writel(host->pmerrloc_base, ELDIS,
+ PMERRLOC_DISABLE);
+ pmecc_data_free(host);
+ }
+
+ if (gpio_is_valid(host->board.det_pin))
+ gpio_free(host->board.det_pin);
+
+ if (gpio_is_valid(host->board.enable_pin))
+ gpio_free(host->board.enable_pin);
+
+ if (gpio_is_valid(host->board.rdy_pin))
+ gpio_free(host->board.rdy_pin);
+
if (host->ecc)
iounmap(host->ecc);
+ if (host->pmecc_rom_base)
+ iounmap(host->pmecc_rom_base);
+ if (host->pmerrloc_base)
+ iounmap(host->pmerrloc_base);
if (host->dma_chan)
dma_release_channel(host->dma_chan);
diff --git a/drivers/mtd/nand/atmel_nand_ecc.h b/drivers/mtd/nand/atmel_nand_ecc.h
index 578c776..8a1e9a6 100644
--- a/drivers/mtd/nand/atmel_nand_ecc.h
+++ b/drivers/mtd/nand/atmel_nand_ecc.h
@@ -3,7 +3,7 @@
* Based on AT91SAM9260 datasheet revision B.
*
* Copyright (C) 2007 Andrew Victor
- * Copyright (C) 2007 Atmel Corporation.
+ * Copyright (C) 2007 - 2012 Atmel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -36,4 +36,116 @@
#define ATMEL_ECC_NPR 0x10 /* NParity register */
#define ATMEL_ECC_NPARITY (0xffff << 0) /* NParity */
+/* PMECC Register Definitions */
+#define ATMEL_PMECC_CFG 0x000 /* Configuration Register */
+#define PMECC_CFG_BCH_ERR2 (0 << 0)
+#define PMECC_CFG_BCH_ERR4 (1 << 0)
+#define PMECC_CFG_BCH_ERR8 (2 << 0)
+#define PMECC_CFG_BCH_ERR12 (3 << 0)
+#define PMECC_CFG_BCH_ERR24 (4 << 0)
+
+#define PMECC_CFG_SECTOR512 (0 << 4)
+#define PMECC_CFG_SECTOR1024 (1 << 4)
+
+#define PMECC_CFG_PAGE_1SECTOR (0 << 8)
+#define PMECC_CFG_PAGE_2SECTORS (1 << 8)
+#define PMECC_CFG_PAGE_4SECTORS (2 << 8)
+#define PMECC_CFG_PAGE_8SECTORS (3 << 8)
+
+#define PMECC_CFG_READ_OP (0 << 12)
+#define PMECC_CFG_WRITE_OP (1 << 12)
+
+#define PMECC_CFG_SPARE_ENABLE (1 << 16)
+#define PMECC_CFG_SPARE_DISABLE (0 << 16)
+
+#define PMECC_CFG_AUTO_ENABLE (1 << 20)
+#define PMECC_CFG_AUTO_DISABLE (0 << 20)
+
+#define ATMEL_PMECC_SAREA 0x004 /* Spare area size */
+#define ATMEL_PMECC_SADDR 0x008 /* PMECC starting address */
+#define ATMEL_PMECC_EADDR 0x00c /* PMECC ending address */
+#define ATMEL_PMECC_CLK 0x010 /* PMECC clock control */
+#define PMECC_CLK_133MHZ (2 << 0)
+
+#define ATMEL_PMECC_CTRL 0x014 /* PMECC control register */
+#define PMECC_CTRL_RST (1 << 0)
+#define PMECC_CTRL_DATA (1 << 1)
+#define PMECC_CTRL_USER (1 << 2)
+#define PMECC_CTRL_ENABLE (1 << 4)
+#define PMECC_CTRL_DISABLE (1 << 5)
+
+#define ATMEL_PMECC_SR 0x018 /* PMECC status register */
+#define PMECC_SR_BUSY (1 << 0)
+#define PMECC_SR_ENABLE (1 << 4)
+
+#define ATMEL_PMECC_IER 0x01c /* PMECC interrupt enable */
+#define PMECC_IER_ENABLE (1 << 0)
+#define ATMEL_PMECC_IDR 0x020 /* PMECC interrupt disable */
+#define PMECC_IER_DISABLE (1 << 0)
+#define ATMEL_PMECC_IMR 0x024 /* PMECC interrupt mask */
+#define PMECC_IER_MASK (1 << 0)
+#define ATMEL_PMECC_ISR 0x028 /* PMECC interrupt status */
+#define ATMEL_PMECC_ECCx 0x040 /* PMECC ECC x */
+#define ATMEL_PMECC_REMx 0x240 /* PMECC REM x */
+
+/* PMERRLOC Register Definitions */
+#define ATMEL_PMERRLOC_ELCFG 0x000 /* Error location config */
+#define PMERRLOC_ELCFG_SECTOR_512 (0 << 0)
+#define PMERRLOC_ELCFG_SECTOR_1024 (1 << 0)
+#define PMERRLOC_ELCFG_NUM_ERRORS(n) ((n) << 16)
+
+#define ATMEL_PMERRLOC_ELPRIM 0x004 /* Error location primitive */
+#define ATMEL_PMERRLOC_ELEN 0x008 /* Error location enable */
+#define ATMEL_PMERRLOC_ELDIS 0x00c /* Error location disable */
+#define PMERRLOC_DISABLE (1 << 0)
+
+#define ATMEL_PMERRLOC_ELSR 0x010 /* Error location status */
+#define PMERRLOC_ELSR_BUSY (1 << 0)
+#define ATMEL_PMERRLOC_ELIER 0x014 /* Error location int enable */
+#define ATMEL_PMERRLOC_ELIDR 0x018 /* Error location int disable */
+#define ATMEL_PMERRLOC_ELIMR 0x01c /* Error location int mask */
+#define ATMEL_PMERRLOC_ELISR 0x020 /* Error location int status */
+#define PMERRLOC_ERR_NUM_MASK (0x1f << 8)
+#define PMERRLOC_CALC_DONE (1 << 0)
+#define ATMEL_PMERRLOC_SIGMAx 0x028 /* Error location SIGMA x */
+#define ATMEL_PMERRLOC_ELx 0x08c /* Error location x */
+
+/* Register access macros for PMECC */
+#define pmecc_readl_relaxed(addr, reg) \
+ readl_relaxed((addr) + ATMEL_PMECC_##reg)
+
+#define pmecc_writel(addr, reg, value) \
+ writel((value), (addr) + ATMEL_PMECC_##reg)
+
+#define pmecc_readb_ecc_relaxed(addr, sector, n) \
+ readb_relaxed((addr) + ATMEL_PMECC_ECCx + ((sector) * 0x40) + (n))
+
+#define pmecc_readl_rem_relaxed(addr, sector, n) \
+ readl_relaxed((addr) + ATMEL_PMECC_REMx + ((sector) * 0x40) + ((n) * 4))
+
+#define pmerrloc_readl_relaxed(addr, reg) \
+ readl_relaxed((addr) + ATMEL_PMERRLOC_##reg)
+
+#define pmerrloc_writel(addr, reg, value) \
+ writel((value), (addr) + ATMEL_PMERRLOC_##reg)
+
+#define pmerrloc_writel_sigma_relaxed(addr, n, value) \
+ writel_relaxed((value), (addr) + ATMEL_PMERRLOC_SIGMAx + ((n) * 4))
+
+#define pmerrloc_readl_sigma_relaxed(addr, n) \
+ readl_relaxed((addr) + ATMEL_PMERRLOC_SIGMAx + ((n) * 4))
+
+#define pmerrloc_readl_el_relaxed(addr, n) \
+ readl_relaxed((addr) + ATMEL_PMERRLOC_ELx + ((n) * 4))
+
+/* Galois field dimension */
+#define PMECC_GF_DIMENSION_13 13
+#define PMECC_GF_DIMENSION_14 14
+
+#define PMECC_LOOKUP_TABLE_SIZE_512 0x2000
+#define PMECC_LOOKUP_TABLE_SIZE_1024 0x4000
+
+/* Time out value for reading PMECC status register */
+#define PMECC_MAX_TIMEOUT_MS 100
+
#endif
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c
index 9f609d2..5c47b20 100644
--- a/drivers/mtd/nand/au1550nd.c
+++ b/drivers/mtd/nand/au1550nd.c
@@ -141,28 +141,6 @@ static void au_read_buf(struct mtd_info *mtd, u_char *buf, int len)
}
/**
- * au_verify_buf - Verify chip data against buffer
- * @mtd: MTD device structure
- * @buf: buffer containing the data to compare
- * @len: number of bytes to compare
- *
- * verify function for 8bit buswidth
- */
-static int au_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
-{
- int i;
- struct nand_chip *this = mtd->priv;
-
- for (i = 0; i < len; i++) {
- if (buf[i] != readb(this->IO_ADDR_R))
- return -EFAULT;
- au_sync();
- }
-
- return 0;
-}
-
-/**
* au_write_buf16 - write buffer to chip
* @mtd: MTD device structure
* @buf: data buffer
@@ -205,29 +183,6 @@ static void au_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
}
}
-/**
- * au_verify_buf16 - Verify chip data against buffer
- * @mtd: MTD device structure
- * @buf: buffer containing the data to compare
- * @len: number of bytes to compare
- *
- * verify function for 16bit buswidth
- */
-static int au_verify_buf16(struct mtd_info *mtd, const u_char *buf, int len)
-{
- int i;
- struct nand_chip *this = mtd->priv;
- u16 *p = (u16 *) buf;
- len >>= 1;
-
- for (i = 0; i < len; i++) {
- if (p[i] != readw(this->IO_ADDR_R))
- return -EFAULT;
- au_sync();
- }
- return 0;
-}
-
/* Select the chip by setting nCE to low */
#define NAND_CTL_SETNCE 1
/* Deselect the chip by setting nCE to high */
@@ -516,7 +471,6 @@ static int __devinit au1550nd_probe(struct platform_device *pdev)
this->read_word = au_read_word;
this->write_buf = (pd->devwidth) ? au_write_buf16 : au_write_buf;
this->read_buf = (pd->devwidth) ? au_read_buf16 : au_read_buf;
- this->verify_buf = (pd->devwidth) ? au_verify_buf16 : au_verify_buf;
ret = nand_scan(&ctx->info, 1);
if (ret) {
diff --git a/drivers/mtd/nand/bcm_umi_bch.c b/drivers/mtd/nand/bcm_umi_bch.c
deleted file mode 100644
index 5914bb3..0000000
--- a/drivers/mtd/nand/bcm_umi_bch.c
+++ /dev/null
@@ -1,217 +0,0 @@
-/*****************************************************************************
-* Copyright 2004 - 2009 Broadcom Corporation. All rights reserved.
-*
-* Unless you and Broadcom execute a separate written software license
-* agreement governing use of this software, this software is licensed to you
-* under the terms of the GNU General Public License version 2, available at
-* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
-*
-* Notwithstanding the above, under no circumstances may you combine this
-* software in any way with any other Broadcom software provided under a
-* license other than the GPL, without Broadcom's express prior written
-* consent.
-*****************************************************************************/
-
-/* ---- Include Files ---------------------------------------------------- */
-#include "nand_bcm_umi.h"
-
-/* ---- External Variable Declarations ----------------------------------- */
-/* ---- External Function Prototypes ------------------------------------- */
-/* ---- Public Variables ------------------------------------------------- */
-/* ---- Private Constants and Types -------------------------------------- */
-
-/* ---- Private Function Prototypes -------------------------------------- */
-static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd,
- struct nand_chip *chip, uint8_t *buf, int oob_required, int page);
-static void bcm_umi_bch_write_page_hwecc(struct mtd_info *mtd,
- struct nand_chip *chip, const uint8_t *buf, int oob_required);
-
-/* ---- Private Variables ------------------------------------------------ */
-
-/*
-** nand_hw_eccoob
-** New oob placement block for use with hardware ecc generation.
-*/
-static struct nand_ecclayout nand_hw_eccoob_512 = {
- /* Reserve 5 for BI indicator */
- .oobfree = {
-#if (NAND_ECC_NUM_BYTES > 3)
- {.offset = 0, .length = 2}
-#else
- {.offset = 0, .length = 5},
- {.offset = 6, .length = 7}
-#endif
- }
-};
-
-/*
-** We treat the OOB for a 2K page as if it were 4 512 byte oobs,
-** except the BI is at byte 0.
-*/
-static struct nand_ecclayout nand_hw_eccoob_2048 = {
- /* Reserve 0 as BI indicator */
- .oobfree = {
-#if (NAND_ECC_NUM_BYTES > 10)
- {.offset = 1, .length = 2},
-#elif (NAND_ECC_NUM_BYTES > 7)
- {.offset = 1, .length = 5},
- {.offset = 16, .length = 6},
- {.offset = 32, .length = 6},
- {.offset = 48, .length = 6}
-#else
- {.offset = 1, .length = 8},
- {.offset = 16, .length = 9},
- {.offset = 32, .length = 9},
- {.offset = 48, .length = 9}
-#endif
- }
-};
-
-/* We treat the OOB for a 4K page as if it were 8 512 byte oobs,
- * except the BI is at byte 0. */
-static struct nand_ecclayout nand_hw_eccoob_4096 = {
- /* Reserve 0 as BI indicator */
- .oobfree = {
-#if (NAND_ECC_NUM_BYTES > 10)
- {.offset = 1, .length = 2},
- {.offset = 16, .length = 3},
- {.offset = 32, .length = 3},
- {.offset = 48, .length = 3},
- {.offset = 64, .length = 3},
- {.offset = 80, .length = 3},
- {.offset = 96, .length = 3},
- {.offset = 112, .length = 3}
-#else
- {.offset = 1, .length = 5},
- {.offset = 16, .length = 6},
- {.offset = 32, .length = 6},
- {.offset = 48, .length = 6},
- {.offset = 64, .length = 6},
- {.offset = 80, .length = 6},
- {.offset = 96, .length = 6},
- {.offset = 112, .length = 6}
-#endif
- }
-};
-
-/* ---- Private Functions ------------------------------------------------ */
-/* ==== Public Functions ================================================= */
-
-/****************************************************************************
-*
-* bcm_umi_bch_read_page_hwecc - hardware ecc based page read function
-* @mtd: mtd info structure
-* @chip: nand chip info structure
-* @buf: buffer to store read data
-* @oob_required: caller expects OOB data read to chip->oob_poi
-*
-***************************************************************************/
-static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd,
- struct nand_chip *chip, uint8_t * buf,
- int oob_required, int page)
-{
- int sectorIdx = 0;
- int eccsize = chip->ecc.size;
- int eccsteps = chip->ecc.steps;
- uint8_t *datap = buf;
- uint8_t eccCalc[NAND_ECC_NUM_BYTES];
- int sectorOobSize = mtd->oobsize / eccsteps;
- int stat;
- unsigned int max_bitflips = 0;
-
- for (sectorIdx = 0; sectorIdx < eccsteps;
- sectorIdx++, datap += eccsize) {
- if (sectorIdx > 0) {
- /* Seek to page location within sector */
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT, sectorIdx * eccsize,
- -1);
- }
-
- /* Enable hardware ECC before reading the buf */
- nand_bcm_umi_bch_enable_read_hwecc();
-
- /* Read in data */
- bcm_umi_nand_read_buf(mtd, datap, eccsize);
-
- /* Pause hardware ECC after reading the buf */
- nand_bcm_umi_bch_pause_read_ecc_calc();
-
- /* Read the OOB ECC */
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
- mtd->writesize + sectorIdx * sectorOobSize, -1);
- nand_bcm_umi_bch_read_oobEcc(mtd->writesize, eccCalc,
- NAND_ECC_NUM_BYTES,
- chip->oob_poi +
- sectorIdx * sectorOobSize);
-
- /* Correct any ECC detected errors */
- stat =
- nand_bcm_umi_bch_correct_page(datap, eccCalc,
- NAND_ECC_NUM_BYTES);
-
- /* Update Stats */
- if (stat < 0) {
-#if defined(NAND_BCM_UMI_DEBUG)
- printk(KERN_WARNING "%s uncorr_err sectorIdx=%d\n",
- __func__, sectorIdx);
- printk(KERN_WARNING
- "%s data %02x %02x %02x %02x "
- "%02x %02x %02x %02x\n",
- __func__, datap[0], datap[1], datap[2], datap[3],
- datap[4], datap[5], datap[6], datap[7]);
- printk(KERN_WARNING
- "%s ecc %02x %02x %02x %02x "
- "%02x %02x %02x %02x %02x %02x "
- "%02x %02x %02x\n",
- __func__, eccCalc[0], eccCalc[1], eccCalc[2],
- eccCalc[3], eccCalc[4], eccCalc[5], eccCalc[6],
- eccCalc[7], eccCalc[8], eccCalc[9], eccCalc[10],
- eccCalc[11], eccCalc[12]);
- BUG();
-#endif
- mtd->ecc_stats.failed++;
- } else {
-#if defined(NAND_BCM_UMI_DEBUG)
- if (stat > 0) {
- printk(KERN_INFO
- "%s %d correctable_errors detected\n",
- __func__, stat);
- }
-#endif
- mtd->ecc_stats.corrected += stat;
- max_bitflips = max_t(unsigned int, max_bitflips, stat);
- }
- }
- return max_bitflips;
-}
-
-/****************************************************************************
-*
-* bcm_umi_bch_write_page_hwecc - hardware ecc based page write function
-* @mtd: mtd info structure
-* @chip: nand chip info structure
-* @buf: data buffer
-* @oob_required: must write chip->oob_poi to OOB
-*
-***************************************************************************/
-static void bcm_umi_bch_write_page_hwecc(struct mtd_info *mtd,
- struct nand_chip *chip, const uint8_t *buf, int oob_required)
-{
- int sectorIdx = 0;
- int eccsize = chip->ecc.size;
- int eccsteps = chip->ecc.steps;
- const uint8_t *datap = buf;
- uint8_t *oobp = chip->oob_poi;
- int sectorOobSize = mtd->oobsize / eccsteps;
-
- for (sectorIdx = 0; sectorIdx < eccsteps;
- sectorIdx++, datap += eccsize, oobp += sectorOobSize) {
- /* Enable hardware ECC before writing the buf */
- nand_bcm_umi_bch_enable_write_hwecc();
- bcm_umi_nand_write_buf(mtd, datap, eccsize);
- nand_bcm_umi_bch_write_oobEcc(mtd->writesize, oobp,
- NAND_ECC_NUM_BYTES);
- }
-
- bcm_umi_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize);
-}
diff --git a/drivers/mtd/nand/bcm_umi_nand.c b/drivers/mtd/nand/bcm_umi_nand.c
deleted file mode 100644
index d0d1bd4..0000000
--- a/drivers/mtd/nand/bcm_umi_nand.c
+++ /dev/null
@@ -1,555 +0,0 @@
-/*****************************************************************************
-* Copyright 2004 - 2009 Broadcom Corporation. All rights reserved.
-*
-* Unless you and Broadcom execute a separate written software license
-* agreement governing use of this software, this software is licensed to you
-* under the terms of the GNU General Public License version 2, available at
-* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
-*
-* Notwithstanding the above, under no circumstances may you combine this
-* software in any way with any other Broadcom software provided under a
-* license other than the GPL, without Broadcom's express prior written
-* consent.
-*****************************************************************************/
-
-/* ---- Include Files ---------------------------------------------------- */
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/ioport.h>
-#include <linux/device.h>
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/platform_device.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/mtd/nand_ecc.h>
-#include <linux/mtd/partitions.h>
-
-#include <asm/mach-types.h>
-
-#include <mach/reg_nand.h>
-#include <mach/reg_umi.h>
-
-#include "nand_bcm_umi.h"
-
-#include <mach/memory_settings.h>
-
-#define USE_DMA 1
-#include <mach/dma.h>
-#include <linux/dma-mapping.h>
-#include <linux/completion.h>
-
-/* ---- External Variable Declarations ----------------------------------- */
-/* ---- External Function Prototypes ------------------------------------- */
-/* ---- Public Variables ------------------------------------------------- */
-/* ---- Private Constants and Types -------------------------------------- */
-static const __devinitconst char gBanner[] = KERN_INFO \
- "BCM UMI MTD NAND Driver: 1.00\n";
-
-#if NAND_ECC_BCH
-static uint8_t scan_ff_pattern[] = { 0xff };
-
-static struct nand_bbt_descr largepage_bbt = {
- .options = 0,
- .offs = 0,
- .len = 1,
- .pattern = scan_ff_pattern
-};
-#endif
-
-/*
-** Preallocate a buffer to avoid having to do this every dma operation.
-** This is the size of the preallocated coherent DMA buffer.
-*/
-#if USE_DMA
-#define DMA_MIN_BUFLEN 512
-#define DMA_MAX_BUFLEN PAGE_SIZE
-#define USE_DIRECT_IO(len) (((len) < DMA_MIN_BUFLEN) || \
- ((len) > DMA_MAX_BUFLEN))
-
-/*
- * The current NAND data space goes from 0x80001900 to 0x80001FFF,
- * which is only 0x700 = 1792 bytes long. This is too small for 2K, 4K page
- * size NAND flash. Need to break the DMA down to multiple 1Ks.
- *
- * Need to make sure REG_NAND_DATA_PADDR + DMA_MAX_LEN < 0x80002000
- */
-#define DMA_MAX_LEN 1024
-
-#else /* !USE_DMA */
-#define DMA_MIN_BUFLEN 0
-#define DMA_MAX_BUFLEN 0
-#define USE_DIRECT_IO(len) 1
-#endif
-/* ---- Private Function Prototypes -------------------------------------- */
-static void bcm_umi_nand_read_buf(struct mtd_info *mtd, u_char * buf, int len);
-static void bcm_umi_nand_write_buf(struct mtd_info *mtd, const u_char * buf,
- int len);
-
-/* ---- Private Variables ------------------------------------------------ */
-static struct mtd_info *board_mtd;
-static void __iomem *bcm_umi_io_base;
-static void *virtPtr;
-static dma_addr_t physPtr;
-static struct completion nand_comp;
-
-/* ---- Private Functions ------------------------------------------------ */
-#if NAND_ECC_BCH
-#include "bcm_umi_bch.c"
-#else
-#include "bcm_umi_hamming.c"
-#endif
-
-#if USE_DMA
-
-/* Handler called when the DMA finishes. */
-static void nand_dma_handler(DMA_Device_t dev, int reason, void *userData)
-{
- complete(&nand_comp);
-}
-
-static int nand_dma_init(void)
-{
- int rc;
-
- rc = dma_set_device_handler(DMA_DEVICE_NAND_MEM_TO_MEM,
- nand_dma_handler, NULL);
- if (rc != 0) {
- printk(KERN_ERR "dma_set_device_handler failed: %d\n", rc);
- return rc;
- }
-
- virtPtr =
- dma_alloc_coherent(NULL, DMA_MAX_BUFLEN, &physPtr, GFP_KERNEL);
- if (virtPtr == NULL) {
- printk(KERN_ERR "NAND - Failed to allocate memory for DMA buffer\n");
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static void nand_dma_term(void)
-{
- if (virtPtr != NULL)
- dma_free_coherent(NULL, DMA_MAX_BUFLEN, virtPtr, physPtr);
-}
-
-static void nand_dma_read(void *buf, int len)
-{
- int offset = 0;
- int tmp_len = 0;
- int len_left = len;
- DMA_Handle_t hndl;
-
- if (virtPtr == NULL)
- panic("nand_dma_read: virtPtr == NULL\n");
-
- if ((void *)physPtr == NULL)
- panic("nand_dma_read: physPtr == NULL\n");
-
- hndl = dma_request_channel(DMA_DEVICE_NAND_MEM_TO_MEM);
- if (hndl < 0) {
- printk(KERN_ERR
- "nand_dma_read: unable to allocate dma channel: %d\n",
- (int)hndl);
- panic("\n");
- }
-
- while (len_left > 0) {
- if (len_left > DMA_MAX_LEN) {
- tmp_len = DMA_MAX_LEN;
- len_left -= DMA_MAX_LEN;
- } else {
- tmp_len = len_left;
- len_left = 0;
- }
-
- init_completion(&nand_comp);
- dma_transfer_mem_to_mem(hndl, REG_NAND_DATA_PADDR,
- physPtr + offset, tmp_len);
- wait_for_completion(&nand_comp);
-
- offset += tmp_len;
- }
-
- dma_free_channel(hndl);
-
- if (buf != NULL)
- memcpy(buf, virtPtr, len);
-}
-
-static void nand_dma_write(const void *buf, int len)
-{
- int offset = 0;
- int tmp_len = 0;
- int len_left = len;
- DMA_Handle_t hndl;
-
- if (buf == NULL)
- panic("nand_dma_write: buf == NULL\n");
-
- if (virtPtr == NULL)
- panic("nand_dma_write: virtPtr == NULL\n");
-
- if ((void *)physPtr == NULL)
- panic("nand_dma_write: physPtr == NULL\n");
-
- memcpy(virtPtr, buf, len);
-
-
- hndl = dma_request_channel(DMA_DEVICE_NAND_MEM_TO_MEM);
- if (hndl < 0) {
- printk(KERN_ERR
- "nand_dma_write: unable to allocate dma channel: %d\n",
- (int)hndl);
- panic("\n");
- }
-
- while (len_left > 0) {
- if (len_left > DMA_MAX_LEN) {
- tmp_len = DMA_MAX_LEN;
- len_left -= DMA_MAX_LEN;
- } else {
- tmp_len = len_left;
- len_left = 0;
- }
-
- init_completion(&nand_comp);
- dma_transfer_mem_to_mem(hndl, physPtr + offset,
- REG_NAND_DATA_PADDR, tmp_len);
- wait_for_completion(&nand_comp);
-
- offset += tmp_len;
- }
-
- dma_free_channel(hndl);
-}
-
-#endif
-
-static int nand_dev_ready(struct mtd_info *mtd)
-{
- return nand_bcm_umi_dev_ready();
-}
-
-/****************************************************************************
-*
-* bcm_umi_nand_inithw
-*
-* This routine does the necessary hardware (board-specific)
-* initializations. This includes setting up the timings, etc.
-*
-***************************************************************************/
-int bcm_umi_nand_inithw(void)
-{
- /* Configure nand timing parameters */
- writel(readl(&REG_UMI_NAND_TCR) & ~0x7ffff, &REG_UMI_NAND_TCR);
- writel(readl(&REG_UMI_NAND_TCR) | HW_CFG_NAND_TCR, &REG_UMI_NAND_TCR);
-
-#if !defined(CONFIG_MTD_NAND_BCM_UMI_HWCS)
- /* enable software control of CS */
- writel(readl(&REG_UMI_NAND_TCR) | REG_UMI_NAND_TCR_CS_SWCTRL, &REG_UMI_NAND_TCR);
-#endif
-
- /* keep NAND chip select asserted */
- writel(readl(&REG_UMI_NAND_RCSR) | REG_UMI_NAND_RCSR_CS_ASSERTED, &REG_UMI_NAND_RCSR);
-
- writel(readl(&REG_UMI_NAND_TCR) & ~REG_UMI_NAND_TCR_WORD16, &REG_UMI_NAND_TCR);
- /* enable writes to flash */
- writel(readl(&REG_UMI_MMD_ICR) | REG_UMI_MMD_ICR_FLASH_WP, &REG_UMI_MMD_ICR);
-
- writel(NAND_CMD_RESET, bcm_umi_io_base + REG_NAND_CMD_OFFSET);
- nand_bcm_umi_wait_till_ready();
-
-#if NAND_ECC_BCH
- nand_bcm_umi_bch_config_ecc(NAND_ECC_NUM_BYTES);
-#endif
-
- return 0;
-}
-
-/* Used to turn latch the proper register for access. */
-static void bcm_umi_nand_hwcontrol(struct mtd_info *mtd, int cmd,
- unsigned int ctrl)
-{
- /* send command to hardware */
- struct nand_chip *chip = mtd->priv;
- if (ctrl & NAND_CTRL_CHANGE) {
- if (ctrl & NAND_CLE) {
- chip->IO_ADDR_W = bcm_umi_io_base + REG_NAND_CMD_OFFSET;
- goto CMD;
- }
- if (ctrl & NAND_ALE) {
- chip->IO_ADDR_W =
- bcm_umi_io_base + REG_NAND_ADDR_OFFSET;
- goto CMD;
- }
- chip->IO_ADDR_W = bcm_umi_io_base + REG_NAND_DATA8_OFFSET;
- }
-
-CMD:
- /* Send command to chip directly */
- if (cmd != NAND_CMD_NONE)
- writeb(cmd, chip->IO_ADDR_W);
-}
-
-static void bcm_umi_nand_write_buf(struct mtd_info *mtd, const u_char * buf,
- int len)
-{
- if (USE_DIRECT_IO(len)) {
- /* Do it the old way if the buffer is small or too large.
- * Probably quicker than starting and checking dma. */
- int i;
- struct nand_chip *this = mtd->priv;
-
- for (i = 0; i < len; i++)
- writeb(buf[i], this->IO_ADDR_W);
- }
-#if USE_DMA
- else
- nand_dma_write(buf, len);
-#endif
-}
-
-static void bcm_umi_nand_read_buf(struct mtd_info *mtd, u_char * buf, int len)
-{
- if (USE_DIRECT_IO(len)) {
- int i;
- struct nand_chip *this = mtd->priv;
-
- for (i = 0; i < len; i++)
- buf[i] = readb(this->IO_ADDR_R);
- }
-#if USE_DMA
- else
- nand_dma_read(buf, len);
-#endif
-}
-
-static uint8_t readbackbuf[NAND_MAX_PAGESIZE];
-static int bcm_umi_nand_verify_buf(struct mtd_info *mtd, const u_char * buf,
- int len)
-{
- /*
- * Try to readback page with ECC correction. This is necessary
- * for MLC parts which may have permanently stuck bits.
- */
- struct nand_chip *chip = mtd->priv;
- int ret = chip->ecc.read_page(mtd, chip, readbackbuf, 0, 0);
- if (ret < 0)
- return -EFAULT;
- else {
- if (memcmp(readbackbuf, buf, len) == 0)
- return 0;
-
- return -EFAULT;
- }
- return 0;
-}
-
-static int __devinit bcm_umi_nand_probe(struct platform_device *pdev)
-{
- struct nand_chip *this;
- struct resource *r;
- int err = 0;
-
- printk(gBanner);
-
- /* Allocate memory for MTD device structure and private data */
- board_mtd =
- kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip),
- GFP_KERNEL);
- if (!board_mtd) {
- printk(KERN_WARNING
- "Unable to allocate NAND MTD device structure.\n");
- return -ENOMEM;
- }
-
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- if (!r) {
- err = -ENXIO;
- goto out_free;
- }
-
- /* map physical address */
- bcm_umi_io_base = ioremap(r->start, resource_size(r));
-
- if (!bcm_umi_io_base) {
- printk(KERN_ERR "ioremap to access BCM UMI NAND chip failed\n");
- err = -EIO;
- goto out_free;
- }
-
- /* Get pointer to private data */
- this = (struct nand_chip *)(&board_mtd[1]);
-
- /* Initialize structures */
- memset((char *)board_mtd, 0, sizeof(struct mtd_info));
- memset((char *)this, 0, sizeof(struct nand_chip));
-
- /* Link the private data with the MTD structure */
- board_mtd->priv = this;
-
- /* Initialize the NAND hardware. */
- if (bcm_umi_nand_inithw() < 0) {
- printk(KERN_ERR "BCM UMI NAND chip could not be initialized\n");
- err = -EIO;
- goto out_unmap;
- }
-
- /* Set address of NAND IO lines */
- this->IO_ADDR_W = bcm_umi_io_base + REG_NAND_DATA8_OFFSET;
- this->IO_ADDR_R = bcm_umi_io_base + REG_NAND_DATA8_OFFSET;
-
- /* Set command delay time, see datasheet for correct value */
- this->chip_delay = 0;
- /* Assign the device ready function, if available */
- this->dev_ready = nand_dev_ready;
- this->options = 0;
-
- this->write_buf = bcm_umi_nand_write_buf;
- this->read_buf = bcm_umi_nand_read_buf;
- this->verify_buf = bcm_umi_nand_verify_buf;
-
- this->cmd_ctrl = bcm_umi_nand_hwcontrol;
- this->ecc.mode = NAND_ECC_HW;
- this->ecc.size = 512;
- this->ecc.bytes = NAND_ECC_NUM_BYTES;
-#if NAND_ECC_BCH
- this->ecc.read_page = bcm_umi_bch_read_page_hwecc;
- this->ecc.write_page = bcm_umi_bch_write_page_hwecc;
-#else
- this->ecc.correct = nand_correct_data512;
- this->ecc.calculate = bcm_umi_hamming_get_hw_ecc;
- this->ecc.hwctl = bcm_umi_hamming_enable_hwecc;
-#endif
-
-#if USE_DMA
- err = nand_dma_init();
- if (err != 0)
- goto out_unmap;
-#endif
-
- /* Figure out the size of the device that we have.
- * We need to do this to figure out which ECC
- * layout we'll be using.
- */
-
- err = nand_scan_ident(board_mtd, 1, NULL);
- if (err) {
- printk(KERN_ERR "nand_scan failed: %d\n", err);
- goto out_unmap;
- }
-
- /* Now that we know the nand size, we can setup the ECC layout */
-
- switch (board_mtd->writesize) { /* writesize is the pagesize */
- case 4096:
- this->ecc.layout = &nand_hw_eccoob_4096;
- break;
- case 2048:
- this->ecc.layout = &nand_hw_eccoob_2048;
- break;
- case 512:
- this->ecc.layout = &nand_hw_eccoob_512;
- break;
- default:
- {
- printk(KERN_ERR "NAND - Unrecognized pagesize: %d\n",
- board_mtd->writesize);
- err = -EINVAL;
- goto out_unmap;
- }
- }
-
-#if NAND_ECC_BCH
- if (board_mtd->writesize > 512) {
- if (this->bbt_options & NAND_BBT_USE_FLASH)
- largepage_bbt.options = NAND_BBT_SCAN2NDPAGE;
- this->badblock_pattern = &largepage_bbt;
- }
-
- this->ecc.strength = 8;
-
-#endif
-
- /* Now finish off the scan, now that ecc.layout has been initialized. */
-
- err = nand_scan_tail(board_mtd);
- if (err) {
- printk(KERN_ERR "nand_scan failed: %d\n", err);
- goto out_unmap;
- }
-
- /* Register the partitions */
- board_mtd->name = "bcm_umi-nand";
- mtd_device_parse_register(board_mtd, NULL, NULL, NULL, 0);
-
- /* Return happy */
- return 0;
-out_unmap:
- iounmap(bcm_umi_io_base);
-out_free:
- kfree(board_mtd);
- return err;
-}
-
-static int bcm_umi_nand_remove(struct platform_device *pdev)
-{
-#if USE_DMA
- nand_dma_term();
-#endif
-
- /* Release resources, unregister device */
- nand_release(board_mtd);
-
- /* unmap physical address */
- iounmap(bcm_umi_io_base);
-
- /* Free the MTD device structure */
- kfree(board_mtd);
-
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int bcm_umi_nand_suspend(struct platform_device *pdev,
- pm_message_t state)
-{
- printk(KERN_ERR "MTD NAND suspend is being called\n");
- return 0;
-}
-
-static int bcm_umi_nand_resume(struct platform_device *pdev)
-{
- printk(KERN_ERR "MTD NAND resume is being called\n");
- return 0;
-}
-#else
-#define bcm_umi_nand_suspend NULL
-#define bcm_umi_nand_resume NULL
-#endif
-
-static struct platform_driver nand_driver = {
- .driver = {
- .name = "bcm-nand",
- .owner = THIS_MODULE,
- },
- .probe = bcm_umi_nand_probe,
- .remove = bcm_umi_nand_remove,
- .suspend = bcm_umi_nand_suspend,
- .resume = bcm_umi_nand_resume,
-};
-
-module_platform_driver(nand_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Broadcom");
-MODULE_DESCRIPTION("BCM UMI MTD NAND driver");
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index 3f1c185..ab0caa7 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -566,11 +566,13 @@ static int bf5xx_nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip
return 0;
}
-static void bf5xx_nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf, int oob_required)
+static int bf5xx_nand_write_page_raw(struct mtd_info *mtd,
+ struct nand_chip *chip, const uint8_t *buf, int oob_required)
{
bf5xx_nand_write_buf(mtd, buf, mtd->writesize);
bf5xx_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ return 0;
}
/*
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index f3f6cfe..2bb7170 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -377,7 +377,7 @@ static int cafe_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
* @buf: buffer to store read data
* @oob_required: caller expects OOB data read to chip->oob_poi
*
- * The hw generator calculates the error syndrome automatically. Therefor
+ * The hw generator calculates the error syndrome automatically. Therefore
* we need a special oob layout and handling.
*/
static int cafe_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
@@ -520,7 +520,7 @@ static struct nand_bbt_descr cafe_bbt_mirror_descr_512 = {
};
-static void cafe_nand_write_page_lowlevel(struct mtd_info *mtd,
+static int cafe_nand_write_page_lowlevel(struct mtd_info *mtd,
struct nand_chip *chip,
const uint8_t *buf, int oob_required)
{
@@ -531,6 +531,8 @@ static void cafe_nand_write_page_lowlevel(struct mtd_info *mtd,
/* Set up ECC autogeneration */
cafe->ctl2 |= (1<<30);
+
+ return 0;
}
static int cafe_nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
@@ -542,9 +544,12 @@ static int cafe_nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
if (unlikely(raw))
- chip->ecc.write_page_raw(mtd, chip, buf, oob_required);
+ status = chip->ecc.write_page_raw(mtd, chip, buf, oob_required);
else
- chip->ecc.write_page(mtd, chip, buf, oob_required);
+ status = chip->ecc.write_page(mtd, chip, buf, oob_required);
+
+ if (status < 0)
+ return status;
/*
* Cached progamming disabled for now, Not sure if its worth the
@@ -571,13 +576,6 @@ static int cafe_nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
status = chip->waitfunc(mtd, chip);
}
-#ifdef CONFIG_MTD_NAND_VERIFY_WRITE
- /* Send command to read back the data */
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
-
- if (chip->verify_buf(mtd, buf, mtd->writesize))
- return -EIO;
-#endif
return 0;
}
diff --git a/drivers/mtd/nand/cmx270_nand.c b/drivers/mtd/nand/cmx270_nand.c
index 1024bfc..39b2ef8 100644
--- a/drivers/mtd/nand/cmx270_nand.c
+++ b/drivers/mtd/nand/cmx270_nand.c
@@ -76,18 +76,6 @@ static void cmx270_read_buf(struct mtd_info *mtd, u_char *buf, int len)
*buf++ = readl(this->IO_ADDR_R) >> 16;
}
-static int cmx270_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
-{
- int i;
- struct nand_chip *this = mtd->priv;
-
- for (i=0; i<len; i++)
- if (buf[i] != (u_char)(readl(this->IO_ADDR_R) >> 16))
- return -EFAULT;
-
- return 0;
-}
-
static inline void nand_cs_on(void)
{
gpio_set_value(GPIO_NAND_CS, 0);
@@ -209,7 +197,6 @@ static int __init cmx270_init(void)
this->read_byte = cmx270_read_byte;
this->read_buf = cmx270_read_buf;
this->write_buf = cmx270_write_buf;
- this->verify_buf = cmx270_verify_buf;
/* Scan to find existence of the device */
if (nand_scan (cmx270_nand_mtd, 1)) {
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index f1deb1e..945047a 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -33,6 +33,7 @@
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
#include <linux/slab.h>
+#include <linux/of_device.h>
#include <linux/platform_data/mtd-davinci.h>
#include <linux/platform_data/mtd-davinci-aemif.h>
@@ -518,9 +519,75 @@ static struct nand_ecclayout hwecc4_2048 __initconst = {
},
};
+#if defined(CONFIG_OF)
+static const struct of_device_id davinci_nand_of_match[] = {
+ {.compatible = "ti,davinci-nand", },
+ {},
+}
+MODULE_DEVICE_TABLE(of, davinci_nand_of_match);
+
+static struct davinci_nand_pdata
+ *nand_davinci_get_pdata(struct platform_device *pdev)
+{
+ if (!pdev->dev.platform_data && pdev->dev.of_node) {
+ struct davinci_nand_pdata *pdata;
+ const char *mode;
+ u32 prop;
+ int len;
+
+ pdata = devm_kzalloc(&pdev->dev,
+ sizeof(struct davinci_nand_pdata),
+ GFP_KERNEL);
+ pdev->dev.platform_data = pdata;
+ if (!pdata)
+ return NULL;
+ if (!of_property_read_u32(pdev->dev.of_node,
+ "ti,davinci-chipselect", &prop))
+ pdev->id = prop;
+ if (!of_property_read_u32(pdev->dev.of_node,
+ "ti,davinci-mask-ale", &prop))
+ pdata->mask_ale = prop;
+ if (!of_property_read_u32(pdev->dev.of_node,
+ "ti,davinci-mask-cle", &prop))
+ pdata->mask_cle = prop;
+ if (!of_property_read_u32(pdev->dev.of_node,
+ "ti,davinci-mask-chipsel", &prop))
+ pdata->mask_chipsel = prop;
+ if (!of_property_read_string(pdev->dev.of_node,
+ "ti,davinci-ecc-mode", &mode)) {
+ if (!strncmp("none", mode, 4))
+ pdata->ecc_mode = NAND_ECC_NONE;
+ if (!strncmp("soft", mode, 4))
+ pdata->ecc_mode = NAND_ECC_SOFT;
+ if (!strncmp("hw", mode, 2))
+ pdata->ecc_mode = NAND_ECC_HW;
+ }
+ if (!of_property_read_u32(pdev->dev.of_node,
+ "ti,davinci-ecc-bits", &prop))
+ pdata->ecc_bits = prop;
+ if (!of_property_read_u32(pdev->dev.of_node,
+ "ti,davinci-nand-buswidth", &prop))
+ if (prop == 16)
+ pdata->options |= NAND_BUSWIDTH_16;
+ if (of_find_property(pdev->dev.of_node,
+ "ti,davinci-nand-use-bbt", &len))
+ pdata->bbt_options = NAND_BBT_USE_FLASH;
+ }
+
+ return pdev->dev.platform_data;
+}
+#else
+#define davinci_nand_of_match NULL
+static struct davinci_nand_pdata
+ *nand_davinci_get_pdata(struct platform_device *pdev)
+{
+ return pdev->dev.platform_data;
+}
+#endif
+
static int __init nand_davinci_probe(struct platform_device *pdev)
{
- struct davinci_nand_pdata *pdata = pdev->dev.platform_data;
+ struct davinci_nand_pdata *pdata;
struct davinci_nand_info *info;
struct resource *res1;
struct resource *res2;
@@ -530,6 +597,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
uint32_t val;
nand_ecc_modes_t ecc_mode;
+ pdata = nand_davinci_get_pdata(pdev);
/* insist on board-specific configuration */
if (!pdata)
return -ENODEV;
@@ -656,7 +724,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
goto err_clk;
}
- ret = clk_enable(info->clk);
+ ret = clk_prepare_enable(info->clk);
if (ret < 0) {
dev_dbg(&pdev->dev, "unable to enable AEMIF clock, err %d\n",
ret);
@@ -767,7 +835,7 @@ syndrome_done:
err_scan:
err_timing:
- clk_disable(info->clk);
+ clk_disable_unprepare(info->clk);
err_clk_enable:
clk_put(info->clk);
@@ -804,7 +872,7 @@ static int __exit nand_davinci_remove(struct platform_device *pdev)
nand_release(&info->mtd);
- clk_disable(info->clk);
+ clk_disable_unprepare(info->clk);
clk_put(info->clk);
kfree(info);
@@ -816,6 +884,8 @@ static struct platform_driver nand_davinci_driver = {
.remove = __exit_p(nand_davinci_remove),
.driver = {
.name = "davinci_nand",
+ .owner = THIS_MODULE,
+ .of_match_table = davinci_nand_of_match,
},
};
MODULE_ALIAS("platform:davinci_nand");
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
index 0650aaf..e706a23 100644
--- a/drivers/mtd/nand/denali.c
+++ b/drivers/mtd/nand/denali.c
@@ -1028,7 +1028,7 @@ static void denali_setup_dma(struct denali_nand_info *denali, int op)
/* writes a page. user specifies type, and this function handles the
* configuration details. */
-static void write_page(struct mtd_info *mtd, struct nand_chip *chip,
+static int write_page(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf, bool raw_xfer)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
@@ -1078,6 +1078,8 @@ static void write_page(struct mtd_info *mtd, struct nand_chip *chip,
denali_enable_dma(denali, false);
dma_sync_single_for_cpu(denali->dev, addr, size, DMA_TO_DEVICE);
+
+ return 0;
}
/* NAND core entry points */
@@ -1086,24 +1088,24 @@ static void write_page(struct mtd_info *mtd, struct nand_chip *chip,
* writing a page with ECC or without is similar, all the work is done
* by write_page above.
* */
-static void denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+static int denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf, int oob_required)
{
/* for regular page writes, we let HW handle all the ECC
* data written to the device. */
- write_page(mtd, chip, buf, false);
+ return write_page(mtd, chip, buf, false);
}
/* This is the callback that the NAND core calls to write a page without ECC.
* raw access is similar to ECC page writes, so all the work is done in the
* write_page() function above.
*/
-static void denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf, int oob_required)
{
/* for raw page writes, we want to disable ECC and simply write
whatever data is in the buffer. */
- write_page(mtd, chip, buf, true);
+ return write_page(mtd, chip, buf, true);
}
static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c
index e2ca067..256eb30 100644
--- a/drivers/mtd/nand/diskonchip.c
+++ b/drivers/mtd/nand/diskonchip.c
@@ -376,19 +376,6 @@ static void doc2000_readbuf_dword(struct mtd_info *mtd, u_char *buf, int len)
}
}
-static int doc2000_verifybuf(struct mtd_info *mtd, const u_char *buf, int len)
-{
- struct nand_chip *this = mtd->priv;
- struct doc_priv *doc = this->priv;
- void __iomem *docptr = doc->virtadr;
- int i;
-
- for (i = 0; i < len; i++)
- if (buf[i] != ReadDOC(docptr, 2k_CDSN_IO))
- return -EFAULT;
- return 0;
-}
-
static uint16_t __init doc200x_ident_chip(struct mtd_info *mtd, int nr)
{
struct nand_chip *this = mtd->priv;
@@ -526,26 +513,6 @@ static void doc2001_readbuf(struct mtd_info *mtd, u_char *buf, int len)
buf[i] = ReadDOC(docptr, LastDataRead);
}
-static int doc2001_verifybuf(struct mtd_info *mtd, const u_char *buf, int len)
-{
- struct nand_chip *this = mtd->priv;
- struct doc_priv *doc = this->priv;
- void __iomem *docptr = doc->virtadr;
- int i;
-
- /* Start read pipeline */
- ReadDOC(docptr, ReadPipeInit);
-
- for (i = 0; i < len - 1; i++)
- if (buf[i] != ReadDOC(docptr, Mil_CDSN_IO)) {
- ReadDOC(docptr, LastDataRead);
- return i;
- }
- if (buf[i] != ReadDOC(docptr, LastDataRead))
- return i;
- return 0;
-}
-
static u_char doc2001plus_read_byte(struct mtd_info *mtd)
{
struct nand_chip *this = mtd->priv;
@@ -610,33 +577,6 @@ static void doc2001plus_readbuf(struct mtd_info *mtd, u_char *buf, int len)
printk("\n");
}
-static int doc2001plus_verifybuf(struct mtd_info *mtd, const u_char *buf, int len)
-{
- struct nand_chip *this = mtd->priv;
- struct doc_priv *doc = this->priv;
- void __iomem *docptr = doc->virtadr;
- int i;
-
- if (debug)
- printk("verifybuf of %d bytes: ", len);
-
- /* Start read pipeline */
- ReadDOC(docptr, Mplus_ReadPipeInit);
- ReadDOC(docptr, Mplus_ReadPipeInit);
-
- for (i = 0; i < len - 2; i++)
- if (buf[i] != ReadDOC(docptr, Mil_CDSN_IO)) {
- ReadDOC(docptr, Mplus_LastDataRead);
- ReadDOC(docptr, Mplus_LastDataRead);
- return i;
- }
- if (buf[len - 2] != ReadDOC(docptr, Mplus_LastDataRead))
- return len - 2;
- if (buf[len - 1] != ReadDOC(docptr, Mplus_LastDataRead))
- return len - 1;
- return 0;
-}
-
static void doc2001plus_select_chip(struct mtd_info *mtd, int chip)
{
struct nand_chip *this = mtd->priv;
@@ -1432,7 +1372,6 @@ static inline int __init doc2000_init(struct mtd_info *mtd)
this->read_byte = doc2000_read_byte;
this->write_buf = doc2000_writebuf;
this->read_buf = doc2000_readbuf;
- this->verify_buf = doc2000_verifybuf;
this->scan_bbt = nftl_scan_bbt;
doc->CDSNControl = CDSN_CTRL_FLASH_IO | CDSN_CTRL_ECC_IO;
@@ -1449,7 +1388,6 @@ static inline int __init doc2001_init(struct mtd_info *mtd)
this->read_byte = doc2001_read_byte;
this->write_buf = doc2001_writebuf;
this->read_buf = doc2001_readbuf;
- this->verify_buf = doc2001_verifybuf;
ReadDOC(doc->virtadr, ChipID);
ReadDOC(doc->virtadr, ChipID);
@@ -1480,7 +1418,6 @@ static inline int __init doc2001plus_init(struct mtd_info *mtd)
this->read_byte = doc2001plus_read_byte;
this->write_buf = doc2001plus_writebuf;
this->read_buf = doc2001plus_readbuf;
- this->verify_buf = doc2001plus_verifybuf;
this->scan_bbt = inftl_scan_bbt;
this->cmd_ctrl = NULL;
this->select_chip = doc2001plus_select_chip;
diff --git a/drivers/mtd/nand/docg4.c b/drivers/mtd/nand/docg4.c
index a225e49..799da5d 100644
--- a/drivers/mtd/nand/docg4.c
+++ b/drivers/mtd/nand/docg4.c
@@ -378,9 +378,9 @@ static int correct_data(struct mtd_info *mtd, uint8_t *buf, int page)
* bit flips(s) are not reported in stats.
*/
- if (doc->oob_buf[15]) {
+ if (nand->oob_poi[15]) {
int bit, numsetbits = 0;
- unsigned long written_flag = doc->oob_buf[15];
+ unsigned long written_flag = nand->oob_poi[15];
for_each_set_bit(bit, &written_flag, 8)
numsetbits++;
if (numsetbits > 4) { /* assume blank */
@@ -428,7 +428,7 @@ static int correct_data(struct mtd_info *mtd, uint8_t *buf, int page)
/* if error within oob area preceeding ecc bytes... */
if (errpos[i] > DOCG4_PAGE_SIZE * 8)
change_bit(errpos[i] - DOCG4_PAGE_SIZE * 8,
- (unsigned long *)doc->oob_buf);
+ (unsigned long *)nand->oob_poi);
else /* error in page data */
change_bit(errpos[i], (unsigned long *)buf);
@@ -748,18 +748,12 @@ static int read_page(struct mtd_info *mtd, struct nand_chip *nand,
docg4_read_buf(mtd, buf, DOCG4_PAGE_SIZE); /* read the page data */
- /*
- * Diskonchips read oob immediately after a page read. Mtd
- * infrastructure issues a separate command for reading oob after the
- * page is read. So we save the oob bytes in a local buffer and just
- * copy it if the next command reads oob from the same page.
- */
-
+ /* this device always reads oob after page data */
/* first 14 oob bytes read from I/O reg */
- docg4_read_buf(mtd, doc->oob_buf, 14);
+ docg4_read_buf(mtd, nand->oob_poi, 14);
/* last 2 read from another reg */
- buf16 = (uint16_t *)(doc->oob_buf + 14);
+ buf16 = (uint16_t *)(nand->oob_poi + 14);
*buf16 = readw(docptr + DOCG4_MYSTERY_REG);
write_nop(docptr);
@@ -782,6 +776,8 @@ static int read_page(struct mtd_info *mtd, struct nand_chip *nand,
}
writew(0, docptr + DOC_DATAEND);
+ if (bits_corrected == -EBADMSG) /* uncorrectable errors */
+ return 0;
return bits_corrected;
}
@@ -807,21 +803,6 @@ static int docg4_read_oob(struct mtd_info *mtd, struct nand_chip *nand,
dev_dbg(doc->dev, "%s: page %x\n", __func__, page);
- /*
- * Oob bytes are read as part of a normal page read. If the previous
- * nand command was a read of the page whose oob is now being read, just
- * copy the oob bytes that we saved in a local buffer and avoid a
- * separate oob read.
- */
- if (doc->last_command.command == NAND_CMD_READ0 &&
- doc->last_command.page == page) {
- memcpy(nand->oob_poi, doc->oob_buf, 16);
- return 0;
- }
-
- /*
- * Separate read of oob data only.
- */
docg4_command(mtd, NAND_CMD_READ0, nand->ecc.size, page);
writew(DOC_ECCCONF0_READ_MODE | DOCG4_OOB_SIZE, docptr + DOC_ECCCONF0);
@@ -898,7 +879,7 @@ static void docg4_erase_block(struct mtd_info *mtd, int page)
write_nop(docptr);
}
-static void write_page(struct mtd_info *mtd, struct nand_chip *nand,
+static int write_page(struct mtd_info *mtd, struct nand_chip *nand,
const uint8_t *buf, bool use_ecc)
{
struct docg4_priv *doc = nand->priv;
@@ -950,15 +931,17 @@ static void write_page(struct mtd_info *mtd, struct nand_chip *nand,
write_nop(docptr);
writew(0, docptr + DOC_DATAEND);
write_nop(docptr);
+
+ return 0;
}
-static void docg4_write_page_raw(struct mtd_info *mtd, struct nand_chip *nand,
+static int docg4_write_page_raw(struct mtd_info *mtd, struct nand_chip *nand,
const uint8_t *buf, int oob_required)
{
return write_page(mtd, nand, buf, false);
}
-static void docg4_write_page(struct mtd_info *mtd, struct nand_chip *nand,
+static int docg4_write_page(struct mtd_info *mtd, struct nand_chip *nand,
const uint8_t *buf, int oob_required)
{
return write_page(mtd, nand, buf, true);
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index 7842938..cc1480a 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -614,41 +614,6 @@ static void fsl_elbc_read_buf(struct mtd_info *mtd, u8 *buf, int len)
len, avail);
}
-/*
- * Verify buffer against the FCM Controller Data Buffer
- */
-static int fsl_elbc_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
-{
- struct nand_chip *chip = mtd->priv;
- struct fsl_elbc_mtd *priv = chip->priv;
- struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
- int i;
-
- if (len < 0) {
- dev_err(priv->dev, "write_buf of %d bytes", len);
- return -EINVAL;
- }
-
- if ((unsigned int)len >
- elbc_fcm_ctrl->read_bytes - elbc_fcm_ctrl->index) {
- dev_err(priv->dev,
- "verify_buf beyond end of buffer "
- "(%d requested, %u available)\n",
- len, elbc_fcm_ctrl->read_bytes - elbc_fcm_ctrl->index);
-
- elbc_fcm_ctrl->index = elbc_fcm_ctrl->read_bytes;
- return -EINVAL;
- }
-
- for (i = 0; i < len; i++)
- if (in_8(&elbc_fcm_ctrl->addr[elbc_fcm_ctrl->index + i])
- != buf[i])
- break;
-
- elbc_fcm_ctrl->index += len;
- return i == len && elbc_fcm_ctrl->status == LTESR_CC ? 0 : -EIO;
-}
-
/* This function is called after Program and Erase Operations to
* check for success or failure.
*/
@@ -766,11 +731,13 @@ static int fsl_elbc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
/* ECC will be calculated automatically, and errors will be detected in
* waitfunc.
*/
-static void fsl_elbc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+static int fsl_elbc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf, int oob_required)
{
fsl_elbc_write_buf(mtd, buf, mtd->writesize);
fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ return 0;
}
static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
@@ -796,7 +763,6 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
chip->read_byte = fsl_elbc_read_byte;
chip->write_buf = fsl_elbc_write_buf;
chip->read_buf = fsl_elbc_read_buf;
- chip->verify_buf = fsl_elbc_verify_buf;
chip->select_chip = fsl_elbc_select_chip;
chip->cmdfunc = fsl_elbc_cmdfunc;
chip->waitfunc = fsl_elbc_wait;
@@ -805,7 +771,6 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
chip->bbt_md = &bbt_mirror_descr;
/* set up nand options */
- chip->options = NAND_NO_READRDY;
chip->bbt_options = NAND_BBT_USE_FLASH;
chip->controller = &elbc_fcm_ctrl->controller;
@@ -916,7 +881,8 @@ static int __devinit fsl_elbc_nand_probe(struct platform_device *pdev)
elbc_fcm_ctrl->chips[bank] = priv;
priv->bank = bank;
priv->ctrl = fsl_lbc_ctrl_dev;
- priv->dev = dev;
+ priv->dev = &pdev->dev;
+ dev_set_drvdata(priv->dev, priv);
priv->vbase = ioremap(res.start, resource_size(&res));
if (!priv->vbase) {
@@ -963,11 +929,10 @@ err:
static int fsl_elbc_nand_remove(struct platform_device *pdev)
{
- int i;
struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = fsl_lbc_ctrl_dev->nand;
- for (i = 0; i < MAX_BANKS; i++)
- if (elbc_fcm_ctrl->chips[i])
- fsl_elbc_chip_remove(elbc_fcm_ctrl->chips[i]);
+ struct fsl_elbc_mtd *priv = dev_get_drvdata(&pdev->dev);
+
+ fsl_elbc_chip_remove(priv);
mutex_lock(&fsl_elbc_nand_mutex);
elbc_fcm_ctrl->counter--;
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
index 01e2f2e..3551a99 100644
--- a/drivers/mtd/nand/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -194,7 +194,7 @@ static int is_blank(struct mtd_info *mtd, unsigned int bufnum)
struct nand_chip *chip = mtd->priv;
struct fsl_ifc_mtd *priv = chip->priv;
u8 __iomem *addr = priv->vbase + bufnum * (mtd->writesize * 2);
- u32 __iomem *mainarea = (u32 *)addr;
+ u32 __iomem *mainarea = (u32 __iomem *)addr;
u8 __iomem *oob = addr + mtd->writesize;
int i;
@@ -592,8 +592,8 @@ static uint8_t fsl_ifc_read_byte16(struct mtd_info *mtd)
* next byte.
*/
if (ifc_nand_ctrl->index < ifc_nand_ctrl->read_bytes) {
- data = in_be16((uint16_t *)&ifc_nand_ctrl->
- addr[ifc_nand_ctrl->index]);
+ data = in_be16((uint16_t __iomem *)&ifc_nand_ctrl->
+ addr[ifc_nand_ctrl->index]);
ifc_nand_ctrl->index += 2;
return (uint8_t) data;
}
@@ -628,46 +628,6 @@ static void fsl_ifc_read_buf(struct mtd_info *mtd, u8 *buf, int len)
}
/*
- * Verify buffer against the IFC Controller Data Buffer
- */
-static int fsl_ifc_verify_buf(struct mtd_info *mtd,
- const u_char *buf, int len)
-{
- struct nand_chip *chip = mtd->priv;
- struct fsl_ifc_mtd *priv = chip->priv;
- struct fsl_ifc_ctrl *ctrl = priv->ctrl;
- struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl;
- int i;
-
- if (len < 0) {
- dev_err(priv->dev, "%s: write_buf of %d bytes", __func__, len);
- return -EINVAL;
- }
-
- if ((unsigned int)len > nctrl->read_bytes - nctrl->index) {
- dev_err(priv->dev,
- "%s: beyond end of buffer (%d requested, %u available)\n",
- __func__, len, nctrl->read_bytes - nctrl->index);
-
- nctrl->index = nctrl->read_bytes;
- return -EINVAL;
- }
-
- for (i = 0; i < len; i++)
- if (in_8(&nctrl->addr[nctrl->index + i]) != buf[i])
- break;
-
- nctrl->index += len;
-
- if (i != len)
- return -EIO;
- if (ctrl->nand_stat != IFC_NAND_EVTER_STAT_OPC)
- return -EIO;
-
- return 0;
-}
-
-/*
* This function is called after Program and Erase Operations to
* check for success or failure.
*/
@@ -722,11 +682,13 @@ static int fsl_ifc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
/* ECC will be calculated automatically, and errors will be detected in
* waitfunc.
*/
-static void fsl_ifc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+static int fsl_ifc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf, int oob_required)
{
fsl_ifc_write_buf(mtd, buf, mtd->writesize);
fsl_ifc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ return 0;
}
static int fsl_ifc_chip_init_tail(struct mtd_info *mtd)
@@ -844,7 +806,6 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
chip->write_buf = fsl_ifc_write_buf;
chip->read_buf = fsl_ifc_read_buf;
- chip->verify_buf = fsl_ifc_verify_buf;
chip->select_chip = fsl_ifc_select_chip;
chip->cmdfunc = fsl_ifc_cmdfunc;
chip->waitfunc = fsl_ifc_wait;
@@ -855,7 +816,6 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
out_be32(&ifc->ifc_nand.ncfgr, 0x0);
/* set up nand options */
- chip->options = NAND_NO_READRDY;
chip->bbt_options = NAND_BBT_USE_FLASH;
diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c
index 27000a5..bc73bc5 100644
--- a/drivers/mtd/nand/gpio.c
+++ b/drivers/mtd/nand/gpio.c
@@ -100,23 +100,6 @@ static void gpio_nand_readbuf(struct mtd_info *mtd, u_char *buf, int len)
readsb(this->IO_ADDR_R, buf, len);
}
-static int gpio_nand_verifybuf(struct mtd_info *mtd, const u_char *buf, int len)
-{
- struct nand_chip *this = mtd->priv;
- unsigned char read, *p = (unsigned char *) buf;
- int i, err = 0;
-
- for (i = 0; i < len; i++) {
- read = readb(this->IO_ADDR_R);
- if (read != p[i]) {
- pr_debug("%s: err at %d (read %04x vs %04x)\n",
- __func__, i, read, p[i]);
- err = -EFAULT;
- }
- }
- return err;
-}
-
static void gpio_nand_writebuf16(struct mtd_info *mtd, const u_char *buf,
int len)
{
@@ -148,26 +131,6 @@ static void gpio_nand_readbuf16(struct mtd_info *mtd, u_char *buf, int len)
}
}
-static int gpio_nand_verifybuf16(struct mtd_info *mtd, const u_char *buf,
- int len)
-{
- struct nand_chip *this = mtd->priv;
- unsigned short read, *p = (unsigned short *) buf;
- int i, err = 0;
- len >>= 1;
-
- for (i = 0; i < len; i++) {
- read = readw(this->IO_ADDR_R);
- if (read != p[i]) {
- pr_debug("%s: err at %d (read %04x vs %04x)\n",
- __func__, i, read, p[i]);
- err = -EFAULT;
- }
- }
- return err;
-}
-
-
static int gpio_nand_devready(struct mtd_info *mtd)
{
struct gpiomtd *gpiomtd = gpio_nand_getpriv(mtd);
@@ -391,11 +354,9 @@ static int __devinit gpio_nand_probe(struct platform_device *dev)
if (this->options & NAND_BUSWIDTH_16) {
this->read_buf = gpio_nand_readbuf16;
this->write_buf = gpio_nand_writebuf16;
- this->verify_buf = gpio_nand_verifybuf16;
} else {
this->read_buf = gpio_nand_readbuf;
this->write_buf = gpio_nand_writebuf;
- this->verify_buf = gpio_nand_verifybuf;
}
/* set the mtd private data for the nand driver */
@@ -456,20 +417,7 @@ static struct platform_driver gpio_nand_driver = {
},
};
-static int __init gpio_nand_init(void)
-{
- printk(KERN_INFO "GPIO NAND driver, © 2004 Simtec Electronics\n");
-
- return platform_driver_register(&gpio_nand_driver);
-}
-
-static void __exit gpio_nand_exit(void)
-{
- platform_driver_unregister(&gpio_nand_driver);
-}
-
-module_init(gpio_nand_init);
-module_exit(gpio_nand_exit);
+module_platform_driver(gpio_nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
index a1f4332..3502acc 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
@@ -26,7 +26,7 @@
#include "gpmi-regs.h"
#include "bch-regs.h"
-struct timing_threshod timing_default_threshold = {
+static struct timing_threshod timing_default_threshold = {
.max_data_setup_cycles = (BM_GPMI_TIMING0_DATA_SETUP >>
BP_GPMI_TIMING0_DATA_SETUP),
.internal_data_setup_in_ns = 0,
@@ -124,12 +124,42 @@ error:
return -ETIMEDOUT;
}
+static int __gpmi_enable_clk(struct gpmi_nand_data *this, bool v)
+{
+ struct clk *clk;
+ int ret;
+ int i;
+
+ for (i = 0; i < GPMI_CLK_MAX; i++) {
+ clk = this->resources.clock[i];
+ if (!clk)
+ break;
+
+ if (v) {
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ goto err_clk;
+ } else {
+ clk_disable_unprepare(clk);
+ }
+ }
+ return 0;
+
+err_clk:
+ for (; i > 0; i--)
+ clk_disable_unprepare(this->resources.clock[i - 1]);
+ return ret;
+}
+
+#define gpmi_enable_clk(x) __gpmi_enable_clk(x, true)
+#define gpmi_disable_clk(x) __gpmi_enable_clk(x, false)
+
int gpmi_init(struct gpmi_nand_data *this)
{
struct resources *r = &this->resources;
int ret;
- ret = clk_prepare_enable(r->clock);
+ ret = gpmi_enable_clk(this);
if (ret)
goto err_out;
ret = gpmi_reset_block(r->gpmi_regs, false);
@@ -149,7 +179,7 @@ int gpmi_init(struct gpmi_nand_data *this)
/* Select BCH ECC. */
writel(BM_GPMI_CTRL1_BCH_MODE, r->gpmi_regs + HW_GPMI_CTRL1_SET);
- clk_disable_unprepare(r->clock);
+ gpmi_disable_clk(this);
return 0;
err_out:
return ret;
@@ -205,7 +235,7 @@ int bch_set_geometry(struct gpmi_nand_data *this)
ecc_strength = bch_geo->ecc_strength >> 1;
page_size = bch_geo->page_size;
- ret = clk_prepare_enable(r->clock);
+ ret = gpmi_enable_clk(this);
if (ret)
goto err_out;
@@ -240,7 +270,7 @@ int bch_set_geometry(struct gpmi_nand_data *this)
writel(BM_BCH_CTRL_COMPLETE_IRQ_EN,
r->bch_regs + HW_BCH_CTRL_SET);
- clk_disable_unprepare(r->clock);
+ gpmi_disable_clk(this);
return 0;
err_out:
return ret;
@@ -263,6 +293,7 @@ static int gpmi_nfc_compute_hardware_timing(struct gpmi_nand_data *this,
struct gpmi_nfc_hardware_timing *hw)
{
struct timing_threshod *nfc = &timing_default_threshold;
+ struct resources *r = &this->resources;
struct nand_chip *nand = &this->nand;
struct nand_timing target = this->timing;
bool improved_timing_is_available;
@@ -302,8 +333,9 @@ static int gpmi_nfc_compute_hardware_timing(struct gpmi_nand_data *this,
(target.tRHOH_in_ns >= 0) ;
/* Inspect the clock. */
+ nfc->clock_frequency_in_hz = clk_get_rate(r->clock[0]);
clock_frequency_in_hz = nfc->clock_frequency_in_hz;
- clock_period_in_ns = 1000000000 / clock_frequency_in_hz;
+ clock_period_in_ns = NSEC_PER_SEC / clock_frequency_in_hz;
/*
* The NFC quantizes setup and hold parameters in terms of clock cycles.
@@ -698,17 +730,230 @@ return_results:
hw->address_setup_in_cycles = address_setup_in_cycles;
hw->use_half_periods = dll_use_half_periods;
hw->sample_delay_factor = sample_delay_factor;
+ hw->device_busy_timeout = GPMI_DEFAULT_BUSY_TIMEOUT;
+ hw->wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_4_TO_8NS;
/* Return success. */
return 0;
}
+/*
+ * <1> Firstly, we should know what's the GPMI-clock means.
+ * The GPMI-clock is the internal clock in the gpmi nand controller.
+ * If you set 100MHz to gpmi nand controller, the GPMI-clock's period
+ * is 10ns. Mark the GPMI-clock's period as GPMI-clock-period.
+ *
+ * <2> Secondly, we should know what's the frequency on the nand chip pins.
+ * The frequency on the nand chip pins is derived from the GPMI-clock.
+ * We can get it from the following equation:
+ *
+ * F = G / (DS + DH)
+ *
+ * F : the frequency on the nand chip pins.
+ * G : the GPMI clock, such as 100MHz.
+ * DS : GPMI_HW_GPMI_TIMING0:DATA_SETUP
+ * DH : GPMI_HW_GPMI_TIMING0:DATA_HOLD
+ *
+ * <3> Thirdly, when the frequency on the nand chip pins is above 33MHz,
+ * the nand EDO(extended Data Out) timing could be applied.
+ * The GPMI implements a feedback read strobe to sample the read data.
+ * The feedback read strobe can be delayed to support the nand EDO timing
+ * where the read strobe may deasserts before the read data is valid, and
+ * read data is valid for some time after read strobe.
+ *
+ * The following figure illustrates some aspects of a NAND Flash read:
+ *
+ * |<---tREA---->|
+ * | |
+ * | | |
+ * |<--tRP-->| |
+ * | | |
+ * __ ___|__________________________________
+ * RDN \________/ |
+ * |
+ * /---------\
+ * Read Data --------------< >---------
+ * \---------/
+ * | |
+ * |<-D->|
+ * FeedbackRDN ________ ____________
+ * \___________/
+ *
+ * D stands for delay, set in the HW_GPMI_CTRL1:RDN_DELAY.
+ *
+ *
+ * <4> Now, we begin to describe how to compute the right RDN_DELAY.
+ *
+ * 4.1) From the aspect of the nand chip pins:
+ * Delay = (tREA + C - tRP) {1}
+ *
+ * tREA : the maximum read access time. From the ONFI nand standards,
+ * we know that tREA is 16ns in mode 5, tREA is 20ns is mode 4.
+ * Please check it in : www.onfi.org
+ * C : a constant for adjust the delay. default is 4.
+ * tRP : the read pulse width.
+ * Specified by the HW_GPMI_TIMING0:DATA_SETUP:
+ * tRP = (GPMI-clock-period) * DATA_SETUP
+ *
+ * 4.2) From the aspect of the GPMI nand controller:
+ * Delay = RDN_DELAY * 0.125 * RP {2}
+ *
+ * RP : the DLL reference period.
+ * if (GPMI-clock-period > DLL_THRETHOLD)
+ * RP = GPMI-clock-period / 2;
+ * else
+ * RP = GPMI-clock-period;
+ *
+ * Set the HW_GPMI_CTRL1:HALF_PERIOD if GPMI-clock-period
+ * is greater DLL_THRETHOLD. In other SOCs, the DLL_THRETHOLD
+ * is 16ns, but in mx6q, we use 12ns.
+ *
+ * 4.3) since {1} equals {2}, we get:
+ *
+ * (tREA + 4 - tRP) * 8
+ * RDN_DELAY = --------------------- {3}
+ * RP
+ *
+ * 4.4) We only support the fastest asynchronous mode of ONFI nand.
+ * For some ONFI nand, the mode 4 is the fastest mode;
+ * while for some ONFI nand, the mode 5 is the fastest mode.
+ * So we only support the mode 4 and mode 5. It is no need to
+ * support other modes.
+ */
+static void gpmi_compute_edo_timing(struct gpmi_nand_data *this,
+ struct gpmi_nfc_hardware_timing *hw)
+{
+ struct resources *r = &this->resources;
+ unsigned long rate = clk_get_rate(r->clock[0]);
+ int mode = this->timing_mode;
+ int dll_threshold = 16; /* in ns */
+ unsigned long delay;
+ unsigned long clk_period;
+ int t_rea;
+ int c = 4;
+ int t_rp;
+ int rp;
+
+ /*
+ * [1] for GPMI_HW_GPMI_TIMING0:
+ * The async mode requires 40MHz for mode 4, 50MHz for mode 5.
+ * The GPMI can support 100MHz at most. So if we want to
+ * get the 40MHz or 50MHz, we have to set DS=1, DH=1.
+ * Set the ADDRESS_SETUP to 0 in mode 4.
+ */
+ hw->data_setup_in_cycles = 1;
+ hw->data_hold_in_cycles = 1;
+ hw->address_setup_in_cycles = ((mode == 5) ? 1 : 0);
+
+ /* [2] for GPMI_HW_GPMI_TIMING1 */
+ hw->device_busy_timeout = 0x9000;
+
+ /* [3] for GPMI_HW_GPMI_CTRL1 */
+ hw->wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
+
+ if (GPMI_IS_MX6Q(this))
+ dll_threshold = 12;
+
+ /*
+ * Enlarge 10 times for the numerator and denominator in {3}.
+ * This make us to get more accurate result.
+ */
+ clk_period = NSEC_PER_SEC / (rate / 10);
+ dll_threshold *= 10;
+ t_rea = ((mode == 5) ? 16 : 20) * 10;
+ c *= 10;
+
+ t_rp = clk_period * 1; /* DATA_SETUP is 1 */
+
+ if (clk_period > dll_threshold) {
+ hw->use_half_periods = 1;
+ rp = clk_period / 2;
+ } else {
+ hw->use_half_periods = 0;
+ rp = clk_period;
+ }
+
+ /*
+ * Multiply the numerator with 10, we could do a round off:
+ * 7.8 round up to 8; 7.4 round down to 7.
+ */
+ delay = (((t_rea + c - t_rp) * 8) * 10) / rp;
+ delay = (delay + 5) / 10;
+
+ hw->sample_delay_factor = delay;
+}
+
+static int enable_edo_mode(struct gpmi_nand_data *this, int mode)
+{
+ struct resources *r = &this->resources;
+ struct nand_chip *nand = &this->nand;
+ struct mtd_info *mtd = &this->mtd;
+ uint8_t feature[ONFI_SUBFEATURE_PARAM_LEN] = {};
+ unsigned long rate;
+ int ret;
+
+ nand->select_chip(mtd, 0);
+
+ /* [1] send SET FEATURE commond to NAND */
+ feature[0] = mode;
+ ret = nand->onfi_set_features(mtd, nand,
+ ONFI_FEATURE_ADDR_TIMING_MODE, feature);
+ if (ret)
+ goto err_out;
+
+ /* [2] send GET FEATURE command to double-check the timing mode */
+ memset(feature, 0, ONFI_SUBFEATURE_PARAM_LEN);
+ ret = nand->onfi_get_features(mtd, nand,
+ ONFI_FEATURE_ADDR_TIMING_MODE, feature);
+ if (ret || feature[0] != mode)
+ goto err_out;
+
+ nand->select_chip(mtd, -1);
+
+ /* [3] set the main IO clock, 100MHz for mode 5, 80MHz for mode 4. */
+ rate = (mode == 5) ? 100000000 : 80000000;
+ clk_set_rate(r->clock[0], rate);
+
+ /* Let the gpmi_begin() re-compute the timing again. */
+ this->flags &= ~GPMI_TIMING_INIT_OK;
+
+ this->flags |= GPMI_ASYNC_EDO_ENABLED;
+ this->timing_mode = mode;
+ dev_info(this->dev, "enable the asynchronous EDO mode %d\n", mode);
+ return 0;
+
+err_out:
+ nand->select_chip(mtd, -1);
+ dev_err(this->dev, "mode:%d ,failed in set feature.\n", mode);
+ return -EINVAL;
+}
+
+int gpmi_extra_init(struct gpmi_nand_data *this)
+{
+ struct nand_chip *chip = &this->nand;
+
+ /* Enable the asynchronous EDO feature. */
+ if (GPMI_IS_MX6Q(this) && chip->onfi_version) {
+ int mode = onfi_get_async_timing_mode(chip);
+
+ /* We only support the timing mode 4 and mode 5. */
+ if (mode & ONFI_TIMING_MODE_5)
+ mode = 5;
+ else if (mode & ONFI_TIMING_MODE_4)
+ mode = 4;
+ else
+ return 0;
+
+ return enable_edo_mode(this, mode);
+ }
+ return 0;
+}
+
/* Begin the I/O */
void gpmi_begin(struct gpmi_nand_data *this)
{
struct resources *r = &this->resources;
- struct timing_threshod *nfc = &timing_default_threshold;
- unsigned char *gpmi_regs = r->gpmi_regs;
+ void __iomem *gpmi_regs = r->gpmi_regs;
unsigned int clock_period_in_ns;
uint32_t reg;
unsigned int dll_wait_time_in_us;
@@ -716,60 +961,66 @@ void gpmi_begin(struct gpmi_nand_data *this)
int ret;
/* Enable the clock. */
- ret = clk_prepare_enable(r->clock);
+ ret = gpmi_enable_clk(this);
if (ret) {
pr_err("We failed in enable the clk\n");
goto err_out;
}
- /* set ready/busy timeout */
- writel(0x500 << BP_GPMI_TIMING1_BUSY_TIMEOUT,
- gpmi_regs + HW_GPMI_TIMING1);
-
- /* Get the timing information we need. */
- nfc->clock_frequency_in_hz = clk_get_rate(r->clock);
- clock_period_in_ns = 1000000000 / nfc->clock_frequency_in_hz;
+ /* Only initialize the timing once */
+ if (this->flags & GPMI_TIMING_INIT_OK)
+ return;
+ this->flags |= GPMI_TIMING_INIT_OK;
- gpmi_nfc_compute_hardware_timing(this, &hw);
+ if (this->flags & GPMI_ASYNC_EDO_ENABLED)
+ gpmi_compute_edo_timing(this, &hw);
+ else
+ gpmi_nfc_compute_hardware_timing(this, &hw);
- /* Set up all the simple timing parameters. */
+ /* [1] Set HW_GPMI_TIMING0 */
reg = BF_GPMI_TIMING0_ADDRESS_SETUP(hw.address_setup_in_cycles) |
BF_GPMI_TIMING0_DATA_HOLD(hw.data_hold_in_cycles) |
BF_GPMI_TIMING0_DATA_SETUP(hw.data_setup_in_cycles) ;
writel(reg, gpmi_regs + HW_GPMI_TIMING0);
- /*
- * DLL_ENABLE must be set to 0 when setting RDN_DELAY or HALF_PERIOD.
- */
+ /* [2] Set HW_GPMI_TIMING1 */
+ writel(BF_GPMI_TIMING1_BUSY_TIMEOUT(hw.device_busy_timeout),
+ gpmi_regs + HW_GPMI_TIMING1);
+
+ /* [3] The following code is to set the HW_GPMI_CTRL1. */
+
+ /* Set the WRN_DLY_SEL */
+ writel(BM_GPMI_CTRL1_WRN_DLY_SEL, gpmi_regs + HW_GPMI_CTRL1_CLR);
+ writel(BF_GPMI_CTRL1_WRN_DLY_SEL(hw.wrn_dly_sel),
+ gpmi_regs + HW_GPMI_CTRL1_SET);
+
+ /* DLL_ENABLE must be set to 0 when setting RDN_DELAY or HALF_PERIOD. */
writel(BM_GPMI_CTRL1_DLL_ENABLE, gpmi_regs + HW_GPMI_CTRL1_CLR);
/* Clear out the DLL control fields. */
- writel(BM_GPMI_CTRL1_RDN_DELAY, gpmi_regs + HW_GPMI_CTRL1_CLR);
- writel(BM_GPMI_CTRL1_HALF_PERIOD, gpmi_regs + HW_GPMI_CTRL1_CLR);
+ reg = BM_GPMI_CTRL1_RDN_DELAY | BM_GPMI_CTRL1_HALF_PERIOD;
+ writel(reg, gpmi_regs + HW_GPMI_CTRL1_CLR);
/* If no sample delay is called for, return immediately. */
if (!hw.sample_delay_factor)
return;
- /* Configure the HALF_PERIOD flag. */
- if (hw.use_half_periods)
- writel(BM_GPMI_CTRL1_HALF_PERIOD,
- gpmi_regs + HW_GPMI_CTRL1_SET);
+ /* Set RDN_DELAY or HALF_PERIOD. */
+ reg = ((hw.use_half_periods) ? BM_GPMI_CTRL1_HALF_PERIOD : 0)
+ | BF_GPMI_CTRL1_RDN_DELAY(hw.sample_delay_factor);
- /* Set the delay factor. */
- writel(BF_GPMI_CTRL1_RDN_DELAY(hw.sample_delay_factor),
- gpmi_regs + HW_GPMI_CTRL1_SET);
+ writel(reg, gpmi_regs + HW_GPMI_CTRL1_SET);
- /* Enable the DLL. */
+ /* At last, we enable the DLL. */
writel(BM_GPMI_CTRL1_DLL_ENABLE, gpmi_regs + HW_GPMI_CTRL1_SET);
/*
* After we enable the GPMI DLL, we have to wait 64 clock cycles before
- * we can use the GPMI.
- *
- * Calculate the amount of time we need to wait, in microseconds.
+ * we can use the GPMI. Calculate the amount of time we need to wait,
+ * in microseconds.
*/
+ clock_period_in_ns = NSEC_PER_SEC / clk_get_rate(r->clock[0]);
dll_wait_time_in_us = (clock_period_in_ns * 64) / 1000;
if (!dll_wait_time_in_us)
@@ -784,8 +1035,7 @@ err_out:
void gpmi_end(struct gpmi_nand_data *this)
{
- struct resources *r = &this->resources;
- clk_disable_unprepare(r->clock);
+ gpmi_disable_clk(this);
}
/* Clears a BCH interrupt. */
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index a6cad5c..d79696b 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -18,6 +18,9 @@
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/clk.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
@@ -27,6 +30,7 @@
#include <linux/pinctrl/consumer.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/of_mtd.h>
#include "gpmi-nand.h"
/* add our owner bbt descriptor */
@@ -113,7 +117,7 @@ int common_nfc_set_geometry(struct gpmi_nand_data *this)
/* We use the same ECC strength for all chunks. */
geo->ecc_strength = get_ecc_strength(this);
if (!geo->ecc_strength) {
- pr_err("We get a wrong ECC strength.\n");
+ pr_err("wrong ECC strength.\n");
return -EINVAL;
}
@@ -316,7 +320,7 @@ acquire_register_block(struct gpmi_nand_data *this, const char *res_name)
struct platform_device *pdev = this->pdev;
struct resources *res = &this->resources;
struct resource *r;
- void *p;
+ void __iomem *p;
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name);
if (!r) {
@@ -423,8 +427,8 @@ static int __devinit acquire_dma_channels(struct gpmi_nand_data *this)
struct platform_device *pdev = this->pdev;
struct resource *r_dma;
struct device_node *dn;
- int dma_channel;
- unsigned int ret;
+ u32 dma_channel;
+ int ret;
struct dma_chan *dma_chan;
dma_cap_mask_t mask;
@@ -464,9 +468,73 @@ acquire_err:
return -EINVAL;
}
+static void gpmi_put_clks(struct gpmi_nand_data *this)
+{
+ struct resources *r = &this->resources;
+ struct clk *clk;
+ int i;
+
+ for (i = 0; i < GPMI_CLK_MAX; i++) {
+ clk = r->clock[i];
+ if (clk) {
+ clk_put(clk);
+ r->clock[i] = NULL;
+ }
+ }
+}
+
+static char *extra_clks_for_mx6q[GPMI_CLK_MAX] = {
+ "gpmi_apb", "gpmi_bch", "gpmi_bch_apb", "per1_bch",
+};
+
+static int __devinit gpmi_get_clks(struct gpmi_nand_data *this)
+{
+ struct resources *r = &this->resources;
+ char **extra_clks = NULL;
+ struct clk *clk;
+ int i;
+
+ /* The main clock is stored in the first. */
+ r->clock[0] = clk_get(this->dev, "gpmi_io");
+ if (IS_ERR(r->clock[0]))
+ goto err_clock;
+
+ /* Get extra clocks */
+ if (GPMI_IS_MX6Q(this))
+ extra_clks = extra_clks_for_mx6q;
+ if (!extra_clks)
+ return 0;
+
+ for (i = 1; i < GPMI_CLK_MAX; i++) {
+ if (extra_clks[i - 1] == NULL)
+ break;
+
+ clk = clk_get(this->dev, extra_clks[i - 1]);
+ if (IS_ERR(clk))
+ goto err_clock;
+
+ r->clock[i] = clk;
+ }
+
+ if (GPMI_IS_MX6Q(this))
+ /*
+ * Set the default value for the gpmi clock in mx6q:
+ *
+ * If you want to use the ONFI nand which is in the
+ * Synchronous Mode, you should change the clock as you need.
+ */
+ clk_set_rate(r->clock[0], 22000000);
+
+ return 0;
+
+err_clock:
+ dev_dbg(this->dev, "failed in finding the clocks.\n");
+ gpmi_put_clks(this);
+ return -ENOMEM;
+}
+
static int __devinit acquire_resources(struct gpmi_nand_data *this)
{
- struct resources *res = &this->resources;
struct pinctrl *pinctrl;
int ret;
@@ -492,12 +560,9 @@ static int __devinit acquire_resources(struct gpmi_nand_data *this)
goto exit_pin;
}
- res->clock = clk_get(&this->pdev->dev, NULL);
- if (IS_ERR(res->clock)) {
- pr_err("can not get the clock\n");
- ret = -ENOENT;
+ ret = gpmi_get_clks(this);
+ if (ret)
goto exit_clock;
- }
return 0;
exit_clock:
@@ -512,9 +577,7 @@ exit_regs:
static void release_resources(struct gpmi_nand_data *this)
{
- struct resources *r = &this->resources;
-
- clk_put(r->clock);
+ gpmi_put_clks(this);
release_register_block(this);
release_bch_irq(this);
release_dma_channels(this);
@@ -667,12 +730,12 @@ static int gpmi_alloc_dma_buffer(struct gpmi_nand_data *this)
struct device *dev = this->dev;
/* [1] Allocate a command buffer. PAGE_SIZE is enough. */
- this->cmd_buffer = kzalloc(PAGE_SIZE, GFP_DMA);
+ this->cmd_buffer = kzalloc(PAGE_SIZE, GFP_DMA | GFP_KERNEL);
if (this->cmd_buffer == NULL)
goto error_alloc;
/* [2] Allocate a read/write data buffer. PAGE_SIZE is enough. */
- this->data_buffer_dma = kzalloc(PAGE_SIZE, GFP_DMA);
+ this->data_buffer_dma = kzalloc(PAGE_SIZE, GFP_DMA | GFP_KERNEL);
if (this->data_buffer_dma == NULL)
goto error_alloc;
@@ -930,7 +993,7 @@ exit_nfc:
return ret;
}
-static void gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+static int gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf, int oob_required)
{
struct gpmi_nand_data *this = chip->priv;
@@ -972,7 +1035,7 @@ static void gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
&payload_virt, &payload_phys);
if (ret) {
pr_err("Inadequate payload DMA buffer\n");
- return;
+ return 0;
}
ret = send_page_prepare(this,
@@ -1002,6 +1065,8 @@ exit_auxiliary:
nfc_geo->payload_size,
payload_virt, payload_phys);
}
+
+ return 0;
}
/*
@@ -1064,6 +1129,9 @@ exit_auxiliary:
* ECC-based or raw view of the page is implicit in which function it calls
* (there is a similar pair of ECC-based/raw functions for writing).
*
+ * FIXME: The following paragraph is incorrect, now that there exist
+ * ecc.read_oob_raw and ecc.write_oob_raw functions.
+ *
* Since MTD assumes the OOB is not covered by ECC, there is no pair of
* ECC-based/raw functions for reading or or writing the OOB. The fact that the
* caller wants an ECC-based or raw view of the page is not propagated down to
@@ -1190,7 +1258,6 @@ static int mx23_check_transcription_stamp(struct gpmi_nand_data *this)
unsigned int search_area_size_in_strides;
unsigned int stride;
unsigned int page;
- loff_t byte;
uint8_t *buffer = chip->buffers->databuf;
int saved_chip_number;
int found_an_ncb_fingerprint = false;
@@ -1207,9 +1274,8 @@ static int mx23_check_transcription_stamp(struct gpmi_nand_data *this)
dev_dbg(dev, "Scanning for an NCB fingerprint...\n");
for (stride = 0; stride < search_area_size_in_strides; stride++) {
- /* Compute the page and byte addresses. */
+ /* Compute the page addresses. */
page = stride * rom_geo->stride_size_in_pages;
- byte = page * mtd->writesize;
dev_dbg(dev, "Looking for a fingerprint in page 0x%x\n", page);
@@ -1251,7 +1317,6 @@ static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
unsigned int block;
unsigned int stride;
unsigned int page;
- loff_t byte;
uint8_t *buffer = chip->buffers->databuf;
int saved_chip_number;
int status;
@@ -1300,9 +1365,8 @@ static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
/* Loop through the first search area, writing NCB fingerprints. */
dev_dbg(dev, "Writing NCB fingerprints...\n");
for (stride = 0; stride < search_area_size_in_strides; stride++) {
- /* Compute the page and byte addresses. */
+ /* Compute the page addresses. */
page = stride * rom_geo->stride_size_in_pages;
- byte = page * mtd->writesize;
/* Write the first page of the current stride. */
dev_dbg(dev, "Writing an NCB fingerprint in page 0x%x\n", page);
@@ -1436,6 +1500,7 @@ static int gpmi_pre_bbt_scan(struct gpmi_nand_data *this)
/* Adjust the ECC strength according to the chip. */
this->nand.ecc.strength = this->bch_geometry.ecc_strength;
this->mtd.ecc_strength = this->bch_geometry.ecc_strength;
+ this->mtd.bitflip_threshold = this->bch_geometry.ecc_strength;
/* NAND boot init, depends on the gpmi_set_geometry(). */
return nand_boot_init(this);
@@ -1452,11 +1517,19 @@ static int gpmi_scan_bbt(struct mtd_info *mtd)
if (ret)
return ret;
+ /*
+ * Can we enable the extra features? such as EDO or Sync mode.
+ *
+ * We do not check the return value now. That's means if we fail in
+ * enable the extra features, we still can run in the normal way.
+ */
+ gpmi_extra_init(this);
+
/* use the default BBT implementation */
return nand_default_bbt(mtd);
}
-void gpmi_nfc_exit(struct gpmi_nand_data *this)
+static void gpmi_nfc_exit(struct gpmi_nand_data *this)
{
nand_release(&this->mtd);
gpmi_free_dma_buffer(this);
@@ -1497,6 +1570,8 @@ static int __devinit gpmi_nfc_init(struct gpmi_nand_data *this)
chip->ecc.size = 1;
chip->ecc.strength = 8;
chip->ecc.layout = &gpmi_hw_ecclayout;
+ if (of_get_nand_on_flash_bbt(this->dev->of_node))
+ chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
/* Allocate a temporary DMA buffer for reading ID in the nand_scan() */
this->bch_geometry.payload_size = 1024;
@@ -1579,6 +1654,8 @@ static int __devinit gpmi_nand_probe(struct platform_device *pdev)
if (ret)
goto exit_nfc_init;
+ dev_info(this->dev, "driver registered.\n");
+
return 0;
exit_nfc_init:
@@ -1586,10 +1663,12 @@ exit_nfc_init:
exit_acquire_resources:
platform_set_drvdata(pdev, NULL);
kfree(this);
+ dev_err(this->dev, "driver registration failed: %d\n", ret);
+
return ret;
}
-static int __exit gpmi_nand_remove(struct platform_device *pdev)
+static int __devexit gpmi_nand_remove(struct platform_device *pdev)
{
struct gpmi_nand_data *this = platform_get_drvdata(pdev);
@@ -1606,29 +1685,10 @@ static struct platform_driver gpmi_nand_driver = {
.of_match_table = gpmi_nand_id_table,
},
.probe = gpmi_nand_probe,
- .remove = __exit_p(gpmi_nand_remove),
+ .remove = __devexit_p(gpmi_nand_remove),
.id_table = gpmi_ids,
};
-
-static int __init gpmi_nand_init(void)
-{
- int err;
-
- err = platform_driver_register(&gpmi_nand_driver);
- if (err == 0)
- printk(KERN_INFO "GPMI NAND driver registered. (IMX)\n");
- else
- pr_err("i.MX GPMI NAND driver registration failed\n");
- return err;
-}
-
-static void __exit gpmi_nand_exit(void)
-{
- platform_driver_unregister(&gpmi_nand_driver);
-}
-
-module_init(gpmi_nand_init);
-module_exit(gpmi_nand_exit);
+module_platform_driver(gpmi_nand_driver);
MODULE_AUTHOR("Freescale Semiconductor, Inc.");
MODULE_DESCRIPTION("i.MX GPMI NAND Flash Controller Driver");
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
index ce5daa1..7ac25c1 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
@@ -22,14 +22,15 @@
#include <linux/dma-mapping.h>
#include <linux/fsl/mxs-dma.h>
+#define GPMI_CLK_MAX 5 /* MX6Q needs five clocks */
struct resources {
- void *gpmi_regs;
- void *bch_regs;
+ void __iomem *gpmi_regs;
+ void __iomem *bch_regs;
unsigned int bch_low_interrupt;
unsigned int bch_high_interrupt;
unsigned int dma_low_channel;
unsigned int dma_high_channel;
- struct clk *clock;
+ struct clk *clock[GPMI_CLK_MAX];
};
/**
@@ -121,6 +122,11 @@ struct nand_timing {
};
struct gpmi_nand_data {
+ /* flags */
+#define GPMI_ASYNC_EDO_ENABLED (1 << 0)
+#define GPMI_TIMING_INIT_OK (1 << 1)
+ int flags;
+
/* System Interface */
struct device *dev;
struct platform_device *pdev;
@@ -131,6 +137,7 @@ struct gpmi_nand_data {
/* Flash Hardware */
struct nand_timing timing;
+ int timing_mode;
/* BCH */
struct bch_geometry bch_geometry;
@@ -188,16 +195,28 @@ struct gpmi_nand_data {
* @data_setup_in_cycles: The data setup time, in cycles.
* @data_hold_in_cycles: The data hold time, in cycles.
* @address_setup_in_cycles: The address setup time, in cycles.
+ * @device_busy_timeout: The timeout waiting for NAND Ready/Busy,
+ * this value is the number of cycles multiplied
+ * by 4096.
* @use_half_periods: Indicates the clock is running slowly, so the
* NFC DLL should use half-periods.
* @sample_delay_factor: The sample delay factor.
+ * @wrn_dly_sel: The delay on the GPMI write strobe.
*/
struct gpmi_nfc_hardware_timing {
+ /* for HW_GPMI_TIMING0 */
uint8_t data_setup_in_cycles;
uint8_t data_hold_in_cycles;
uint8_t address_setup_in_cycles;
+
+ /* for HW_GPMI_TIMING1 */
+ uint16_t device_busy_timeout;
+#define GPMI_DEFAULT_BUSY_TIMEOUT 0x500 /* default busy timeout value.*/
+
+ /* for HW_GPMI_CTRL1 */
bool use_half_periods;
uint8_t sample_delay_factor;
+ uint8_t wrn_dly_sel;
};
/**
@@ -246,6 +265,7 @@ extern int start_dma_with_bch_irq(struct gpmi_nand_data *,
/* GPMI-NAND helper function library */
extern int gpmi_init(struct gpmi_nand_data *);
+extern int gpmi_extra_init(struct gpmi_nand_data *);
extern void gpmi_clear_bch(struct gpmi_nand_data *);
extern void gpmi_dump_info(struct gpmi_nand_data *);
extern int bch_set_geometry(struct gpmi_nand_data *);
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-regs.h b/drivers/mtd/nand/gpmi-nand/gpmi-regs.h
index 8343124..53397cc 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-regs.h
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-regs.h
@@ -108,6 +108,15 @@
#define HW_GPMI_CTRL1_CLR 0x00000068
#define HW_GPMI_CTRL1_TOG 0x0000006c
+#define BP_GPMI_CTRL1_WRN_DLY_SEL 22
+#define BM_GPMI_CTRL1_WRN_DLY_SEL (0x3 << BP_GPMI_CTRL1_WRN_DLY_SEL)
+#define BF_GPMI_CTRL1_WRN_DLY_SEL(v) \
+ (((v) << BP_GPMI_CTRL1_WRN_DLY_SEL) & BM_GPMI_CTRL1_WRN_DLY_SEL)
+#define BV_GPMI_CTRL1_WRN_DLY_SEL_4_TO_8NS 0x0
+#define BV_GPMI_CTRL1_WRN_DLY_SEL_6_TO_10NS 0x1
+#define BV_GPMI_CTRL1_WRN_DLY_SEL_7_TO_12NS 0x2
+#define BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY 0x3
+
#define BM_GPMI_CTRL1_BCH_MODE (1 << 18)
#define BP_GPMI_CTRL1_DLL_ENABLE 17
@@ -154,6 +163,9 @@
#define HW_GPMI_TIMING1 0x00000080
#define BP_GPMI_TIMING1_BUSY_TIMEOUT 16
+#define BM_GPMI_TIMING1_BUSY_TIMEOUT (0xffff << BP_GPMI_TIMING1_BUSY_TIMEOUT)
+#define BF_GPMI_TIMING1_BUSY_TIMEOUT(v) \
+ (((v) << BP_GPMI_TIMING1_BUSY_TIMEOUT) & BM_GPMI_TIMING1_BUSY_TIMEOUT)
#define HW_GPMI_TIMING2 0x00000090
#define HW_GPMI_DATA 0x000000a0
diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c
new file mode 100644
index 0000000..c29b7ac
--- /dev/null
+++ b/drivers/mtd/nand/lpc32xx_mlc.c
@@ -0,0 +1,924 @@
+/*
+ * Driver for NAND MLC Controller in LPC32xx
+ *
+ * Author: Roland Stigge <stigge@antcom.de>
+ *
+ * Copyright © 2011 WORK Microwave GmbH
+ * Copyright © 2011, 2012 Roland Stigge
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ * NAND Flash Controller Operation:
+ * - Read: Auto Decode
+ * - Write: Auto Encode
+ * - Tested Page Sizes: 2048, 4096
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/completion.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_mtd.h>
+#include <linux/of_gpio.h>
+#include <linux/mtd/lpc32xx_mlc.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/mtd/nand_ecc.h>
+
+#define DRV_NAME "lpc32xx_mlc"
+
+/**********************************************************************
+* MLC NAND controller register offsets
+**********************************************************************/
+
+#define MLC_BUFF(x) (x + 0x00000)
+#define MLC_DATA(x) (x + 0x08000)
+#define MLC_CMD(x) (x + 0x10000)
+#define MLC_ADDR(x) (x + 0x10004)
+#define MLC_ECC_ENC_REG(x) (x + 0x10008)
+#define MLC_ECC_DEC_REG(x) (x + 0x1000C)
+#define MLC_ECC_AUTO_ENC_REG(x) (x + 0x10010)
+#define MLC_ECC_AUTO_DEC_REG(x) (x + 0x10014)
+#define MLC_RPR(x) (x + 0x10018)
+#define MLC_WPR(x) (x + 0x1001C)
+#define MLC_RUBP(x) (x + 0x10020)
+#define MLC_ROBP(x) (x + 0x10024)
+#define MLC_SW_WP_ADD_LOW(x) (x + 0x10028)
+#define MLC_SW_WP_ADD_HIG(x) (x + 0x1002C)
+#define MLC_ICR(x) (x + 0x10030)
+#define MLC_TIME_REG(x) (x + 0x10034)
+#define MLC_IRQ_MR(x) (x + 0x10038)
+#define MLC_IRQ_SR(x) (x + 0x1003C)
+#define MLC_LOCK_PR(x) (x + 0x10044)
+#define MLC_ISR(x) (x + 0x10048)
+#define MLC_CEH(x) (x + 0x1004C)
+
+/**********************************************************************
+* MLC_CMD bit definitions
+**********************************************************************/
+#define MLCCMD_RESET 0xFF
+
+/**********************************************************************
+* MLC_ICR bit definitions
+**********************************************************************/
+#define MLCICR_WPROT (1 << 3)
+#define MLCICR_LARGEBLOCK (1 << 2)
+#define MLCICR_LONGADDR (1 << 1)
+#define MLCICR_16BIT (1 << 0) /* unsupported by LPC32x0! */
+
+/**********************************************************************
+* MLC_TIME_REG bit definitions
+**********************************************************************/
+#define MLCTIMEREG_TCEA_DELAY(n) (((n) & 0x03) << 24)
+#define MLCTIMEREG_BUSY_DELAY(n) (((n) & 0x1F) << 19)
+#define MLCTIMEREG_NAND_TA(n) (((n) & 0x07) << 16)
+#define MLCTIMEREG_RD_HIGH(n) (((n) & 0x0F) << 12)
+#define MLCTIMEREG_RD_LOW(n) (((n) & 0x0F) << 8)
+#define MLCTIMEREG_WR_HIGH(n) (((n) & 0x0F) << 4)
+#define MLCTIMEREG_WR_LOW(n) (((n) & 0x0F) << 0)
+
+/**********************************************************************
+* MLC_IRQ_MR and MLC_IRQ_SR bit definitions
+**********************************************************************/
+#define MLCIRQ_NAND_READY (1 << 5)
+#define MLCIRQ_CONTROLLER_READY (1 << 4)
+#define MLCIRQ_DECODE_FAILURE (1 << 3)
+#define MLCIRQ_DECODE_ERROR (1 << 2)
+#define MLCIRQ_ECC_READY (1 << 1)
+#define MLCIRQ_WRPROT_FAULT (1 << 0)
+
+/**********************************************************************
+* MLC_LOCK_PR bit definitions
+**********************************************************************/
+#define MLCLOCKPR_MAGIC 0xA25E
+
+/**********************************************************************
+* MLC_ISR bit definitions
+**********************************************************************/
+#define MLCISR_DECODER_FAILURE (1 << 6)
+#define MLCISR_ERRORS ((1 << 4) | (1 << 5))
+#define MLCISR_ERRORS_DETECTED (1 << 3)
+#define MLCISR_ECC_READY (1 << 2)
+#define MLCISR_CONTROLLER_READY (1 << 1)
+#define MLCISR_NAND_READY (1 << 0)
+
+/**********************************************************************
+* MLC_CEH bit definitions
+**********************************************************************/
+#define MLCCEH_NORMAL (1 << 0)
+
+struct lpc32xx_nand_cfg_mlc {
+ uint32_t tcea_delay;
+ uint32_t busy_delay;
+ uint32_t nand_ta;
+ uint32_t rd_high;
+ uint32_t rd_low;
+ uint32_t wr_high;
+ uint32_t wr_low;
+ int wp_gpio;
+ struct mtd_partition *parts;
+ unsigned num_parts;
+};
+
+static struct nand_ecclayout lpc32xx_nand_oob = {
+ .eccbytes = 40,
+ .eccpos = { 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 54, 55, 56, 57, 58, 59, 60, 61, 62, 63 },
+ .oobfree = {
+ { .offset = 0,
+ .length = 6, },
+ { .offset = 16,
+ .length = 6, },
+ { .offset = 32,
+ .length = 6, },
+ { .offset = 48,
+ .length = 6, },
+ },
+};
+
+static struct nand_bbt_descr lpc32xx_nand_bbt = {
+ .options = NAND_BBT_ABSPAGE | NAND_BBT_2BIT | NAND_BBT_NO_OOB |
+ NAND_BBT_WRITE,
+ .pages = { 524224, 0, 0, 0, 0, 0, 0, 0 },
+};
+
+static struct nand_bbt_descr lpc32xx_nand_bbt_mirror = {
+ .options = NAND_BBT_ABSPAGE | NAND_BBT_2BIT | NAND_BBT_NO_OOB |
+ NAND_BBT_WRITE,
+ .pages = { 524160, 0, 0, 0, 0, 0, 0, 0 },
+};
+
+struct lpc32xx_nand_host {
+ struct nand_chip nand_chip;
+ struct lpc32xx_mlc_platform_data *pdata;
+ struct clk *clk;
+ struct mtd_info mtd;
+ void __iomem *io_base;
+ int irq;
+ struct lpc32xx_nand_cfg_mlc *ncfg;
+ struct completion comp_nand;
+ struct completion comp_controller;
+ uint32_t llptr;
+ /*
+ * Physical addresses of ECC buffer, DMA data buffers, OOB data buffer
+ */
+ dma_addr_t oob_buf_phy;
+ /*
+ * Virtual addresses of ECC buffer, DMA data buffers, OOB data buffer
+ */
+ uint8_t *oob_buf;
+ /* Physical address of DMA base address */
+ dma_addr_t io_base_phy;
+
+ struct completion comp_dma;
+ struct dma_chan *dma_chan;
+ struct dma_slave_config dma_slave_config;
+ struct scatterlist sgl;
+ uint8_t *dma_buf;
+ uint8_t *dummy_buf;
+ int mlcsubpages; /* number of 512bytes-subpages */
+};
+
+/*
+ * Activate/Deactivate DMA Operation:
+ *
+ * Using the PL080 DMA Controller for transferring the 512 byte subpages
+ * instead of doing readl() / writel() in a loop slows it down significantly.
+ * Measurements via getnstimeofday() upon 512 byte subpage reads reveal:
+ *
+ * - readl() of 128 x 32 bits in a loop: ~20us
+ * - DMA read of 512 bytes (32 bit, 4...128 words bursts): ~60us
+ * - DMA read of 512 bytes (32 bit, no bursts): ~100us
+ *
+ * This applies to the transfer itself. In the DMA case: only the
+ * wait_for_completion() (DMA setup _not_ included).
+ *
+ * Note that the 512 bytes subpage transfer is done directly from/to a
+ * FIFO/buffer inside the NAND controller. Most of the time (~400-800us for a
+ * 2048 bytes page) is spent waiting for the NAND IRQ, anyway. (The NAND
+ * controller transferring data between its internal buffer to/from the NAND
+ * chip.)
+ *
+ * Therefore, using the PL080 DMA is disabled by default, for now.
+ *
+ */
+static int use_dma;
+
+static void lpc32xx_nand_setup(struct lpc32xx_nand_host *host)
+{
+ uint32_t clkrate, tmp;
+
+ /* Reset MLC controller */
+ writel(MLCCMD_RESET, MLC_CMD(host->io_base));
+ udelay(1000);
+
+ /* Get base clock for MLC block */
+ clkrate = clk_get_rate(host->clk);
+ if (clkrate == 0)
+ clkrate = 104000000;
+
+ /* Unlock MLC_ICR
+ * (among others, will be locked again automatically) */
+ writew(MLCLOCKPR_MAGIC, MLC_LOCK_PR(host->io_base));
+
+ /* Configure MLC Controller: Large Block, 5 Byte Address */
+ tmp = MLCICR_LARGEBLOCK | MLCICR_LONGADDR;
+ writel(tmp, MLC_ICR(host->io_base));
+
+ /* Unlock MLC_TIME_REG
+ * (among others, will be locked again automatically) */
+ writew(MLCLOCKPR_MAGIC, MLC_LOCK_PR(host->io_base));
+
+ /* Compute clock setup values, see LPC and NAND manual */
+ tmp = 0;
+ tmp |= MLCTIMEREG_TCEA_DELAY(clkrate / host->ncfg->tcea_delay + 1);
+ tmp |= MLCTIMEREG_BUSY_DELAY(clkrate / host->ncfg->busy_delay + 1);
+ tmp |= MLCTIMEREG_NAND_TA(clkrate / host->ncfg->nand_ta + 1);
+ tmp |= MLCTIMEREG_RD_HIGH(clkrate / host->ncfg->rd_high + 1);
+ tmp |= MLCTIMEREG_RD_LOW(clkrate / host->ncfg->rd_low);
+ tmp |= MLCTIMEREG_WR_HIGH(clkrate / host->ncfg->wr_high + 1);
+ tmp |= MLCTIMEREG_WR_LOW(clkrate / host->ncfg->wr_low);
+ writel(tmp, MLC_TIME_REG(host->io_base));
+
+ /* Enable IRQ for CONTROLLER_READY and NAND_READY */
+ writeb(MLCIRQ_CONTROLLER_READY | MLCIRQ_NAND_READY,
+ MLC_IRQ_MR(host->io_base));
+
+ /* Normal nCE operation: nCE controlled by controller */
+ writel(MLCCEH_NORMAL, MLC_CEH(host->io_base));
+}
+
+/*
+ * Hardware specific access to control lines
+ */
+static void lpc32xx_nand_cmd_ctrl(struct mtd_info *mtd, int cmd,
+ unsigned int ctrl)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct lpc32xx_nand_host *host = nand_chip->priv;
+
+ if (cmd != NAND_CMD_NONE) {
+ if (ctrl & NAND_CLE)
+ writel(cmd, MLC_CMD(host->io_base));
+ else
+ writel(cmd, MLC_ADDR(host->io_base));
+ }
+}
+
+/*
+ * Read Device Ready (NAND device _and_ controller ready)
+ */
+static int lpc32xx_nand_device_ready(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct lpc32xx_nand_host *host = nand_chip->priv;
+
+ if ((readb(MLC_ISR(host->io_base)) &
+ (MLCISR_CONTROLLER_READY | MLCISR_NAND_READY)) ==
+ (MLCISR_CONTROLLER_READY | MLCISR_NAND_READY))
+ return 1;
+
+ return 0;
+}
+
+static irqreturn_t lpc3xxx_nand_irq(int irq, struct lpc32xx_nand_host *host)
+{
+ uint8_t sr;
+
+ /* Clear interrupt flag by reading status */
+ sr = readb(MLC_IRQ_SR(host->io_base));
+ if (sr & MLCIRQ_NAND_READY)
+ complete(&host->comp_nand);
+ if (sr & MLCIRQ_CONTROLLER_READY)
+ complete(&host->comp_controller);
+
+ return IRQ_HANDLED;
+}
+
+static int lpc32xx_waitfunc_nand(struct mtd_info *mtd, struct nand_chip *chip)
+{
+ struct lpc32xx_nand_host *host = chip->priv;
+
+ if (readb(MLC_ISR(host->io_base)) & MLCISR_NAND_READY)
+ goto exit;
+
+ wait_for_completion(&host->comp_nand);
+
+ while (!(readb(MLC_ISR(host->io_base)) & MLCISR_NAND_READY)) {
+ /* Seems to be delayed sometimes by controller */
+ dev_dbg(&mtd->dev, "Warning: NAND not ready.\n");
+ cpu_relax();
+ }
+
+exit:
+ return NAND_STATUS_READY;
+}
+
+static int lpc32xx_waitfunc_controller(struct mtd_info *mtd,
+ struct nand_chip *chip)
+{
+ struct lpc32xx_nand_host *host = chip->priv;
+
+ if (readb(MLC_ISR(host->io_base)) & MLCISR_CONTROLLER_READY)
+ goto exit;
+
+ wait_for_completion(&host->comp_controller);
+
+ while (!(readb(MLC_ISR(host->io_base)) &
+ MLCISR_CONTROLLER_READY)) {
+ dev_dbg(&mtd->dev, "Warning: Controller not ready.\n");
+ cpu_relax();
+ }
+
+exit:
+ return NAND_STATUS_READY;
+}
+
+static int lpc32xx_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
+{
+ lpc32xx_waitfunc_nand(mtd, chip);
+ lpc32xx_waitfunc_controller(mtd, chip);
+
+ return NAND_STATUS_READY;
+}
+
+/*
+ * Enable NAND write protect
+ */
+static void lpc32xx_wp_enable(struct lpc32xx_nand_host *host)
+{
+ if (gpio_is_valid(host->ncfg->wp_gpio))
+ gpio_set_value(host->ncfg->wp_gpio, 0);
+}
+
+/*
+ * Disable NAND write protect
+ */
+static void lpc32xx_wp_disable(struct lpc32xx_nand_host *host)
+{
+ if (gpio_is_valid(host->ncfg->wp_gpio))
+ gpio_set_value(host->ncfg->wp_gpio, 1);
+}
+
+static void lpc32xx_dma_complete_func(void *completion)
+{
+ complete(completion);
+}
+
+static int lpc32xx_xmit_dma(struct mtd_info *mtd, void *mem, int len,
+ enum dma_transfer_direction dir)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct lpc32xx_nand_host *host = chip->priv;
+ struct dma_async_tx_descriptor *desc;
+ int flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+ int res;
+
+ sg_init_one(&host->sgl, mem, len);
+
+ res = dma_map_sg(host->dma_chan->device->dev, &host->sgl, 1,
+ DMA_BIDIRECTIONAL);
+ if (res != 1) {
+ dev_err(mtd->dev.parent, "Failed to map sg list\n");
+ return -ENXIO;
+ }
+ desc = dmaengine_prep_slave_sg(host->dma_chan, &host->sgl, 1, dir,
+ flags);
+ if (!desc) {
+ dev_err(mtd->dev.parent, "Failed to prepare slave sg\n");
+ goto out1;
+ }
+
+ init_completion(&host->comp_dma);
+ desc->callback = lpc32xx_dma_complete_func;
+ desc->callback_param = &host->comp_dma;
+
+ dmaengine_submit(desc);
+ dma_async_issue_pending(host->dma_chan);
+
+ wait_for_completion_timeout(&host->comp_dma, msecs_to_jiffies(1000));
+
+ dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
+ DMA_BIDIRECTIONAL);
+ return 0;
+out1:
+ dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
+ DMA_BIDIRECTIONAL);
+ return -ENXIO;
+}
+
+static int lpc32xx_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
+{
+ struct lpc32xx_nand_host *host = chip->priv;
+ int i, j;
+ uint8_t *oobbuf = chip->oob_poi;
+ uint32_t mlc_isr;
+ int res;
+ uint8_t *dma_buf;
+ bool dma_mapped;
+
+ if ((void *)buf <= high_memory) {
+ dma_buf = buf;
+ dma_mapped = true;
+ } else {
+ dma_buf = host->dma_buf;
+ dma_mapped = false;
+ }
+
+ /* Writing Command and Address */
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+
+ /* For all sub-pages */
+ for (i = 0; i < host->mlcsubpages; i++) {
+ /* Start Auto Decode Command */
+ writeb(0x00, MLC_ECC_AUTO_DEC_REG(host->io_base));
+
+ /* Wait for Controller Ready */
+ lpc32xx_waitfunc_controller(mtd, chip);
+
+ /* Check ECC Error status */
+ mlc_isr = readl(MLC_ISR(host->io_base));
+ if (mlc_isr & MLCISR_DECODER_FAILURE) {
+ mtd->ecc_stats.failed++;
+ dev_warn(&mtd->dev, "%s: DECODER_FAILURE\n", __func__);
+ } else if (mlc_isr & MLCISR_ERRORS_DETECTED) {
+ mtd->ecc_stats.corrected += ((mlc_isr >> 4) & 0x3) + 1;
+ }
+
+ /* Read 512 + 16 Bytes */
+ if (use_dma) {
+ res = lpc32xx_xmit_dma(mtd, dma_buf + i * 512, 512,
+ DMA_DEV_TO_MEM);
+ if (res)
+ return res;
+ } else {
+ for (j = 0; j < (512 >> 2); j++) {
+ *((uint32_t *)(buf)) =
+ readl(MLC_BUFF(host->io_base));
+ buf += 4;
+ }
+ }
+ for (j = 0; j < (16 >> 2); j++) {
+ *((uint32_t *)(oobbuf)) =
+ readl(MLC_BUFF(host->io_base));
+ oobbuf += 4;
+ }
+ }
+
+ if (use_dma && !dma_mapped)
+ memcpy(buf, dma_buf, mtd->writesize);
+
+ return 0;
+}
+
+static int lpc32xx_write_page_lowlevel(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const uint8_t *buf, int oob_required)
+{
+ struct lpc32xx_nand_host *host = chip->priv;
+ const uint8_t *oobbuf = chip->oob_poi;
+ uint8_t *dma_buf = (uint8_t *)buf;
+ int res;
+ int i, j;
+
+ if (use_dma && (void *)buf >= high_memory) {
+ dma_buf = host->dma_buf;
+ memcpy(dma_buf, buf, mtd->writesize);
+ }
+
+ for (i = 0; i < host->mlcsubpages; i++) {
+ /* Start Encode */
+ writeb(0x00, MLC_ECC_ENC_REG(host->io_base));
+
+ /* Write 512 + 6 Bytes to Buffer */
+ if (use_dma) {
+ res = lpc32xx_xmit_dma(mtd, dma_buf + i * 512, 512,
+ DMA_MEM_TO_DEV);
+ if (res)
+ return res;
+ } else {
+ for (j = 0; j < (512 >> 2); j++) {
+ writel(*((uint32_t *)(buf)),
+ MLC_BUFF(host->io_base));
+ buf += 4;
+ }
+ }
+ writel(*((uint32_t *)(oobbuf)), MLC_BUFF(host->io_base));
+ oobbuf += 4;
+ writew(*((uint16_t *)(oobbuf)), MLC_BUFF(host->io_base));
+ oobbuf += 12;
+
+ /* Auto Encode w/ Bit 8 = 0 (see LPC MLC Controller manual) */
+ writeb(0x00, MLC_ECC_AUTO_ENC_REG(host->io_base));
+
+ /* Wait for Controller Ready */
+ lpc32xx_waitfunc_controller(mtd, chip);
+ }
+ return 0;
+}
+
+static int lpc32xx_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf, int oob_required, int page,
+ int cached, int raw)
+{
+ int res;
+
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
+ res = lpc32xx_write_page_lowlevel(mtd, chip, buf, oob_required);
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+ lpc32xx_waitfunc(mtd, chip);
+
+ return res;
+}
+
+static int lpc32xx_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ struct lpc32xx_nand_host *host = chip->priv;
+
+ /* Read whole page - necessary with MLC controller! */
+ lpc32xx_read_page(mtd, chip, host->dummy_buf, 1, page);
+
+ return 0;
+}
+
+static int lpc32xx_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ /* None, write_oob conflicts with the automatic LPC MLC ECC decoder! */
+ return 0;
+}
+
+/* Prepares MLC for transfers with H/W ECC enabled: always enabled anyway */
+static void lpc32xx_ecc_enable(struct mtd_info *mtd, int mode)
+{
+ /* Always enabled! */
+}
+
+static int lpc32xx_dma_setup(struct lpc32xx_nand_host *host)
+{
+ struct mtd_info *mtd = &host->mtd;
+ dma_cap_mask_t mask;
+
+ if (!host->pdata || !host->pdata->dma_filter) {
+ dev_err(mtd->dev.parent, "no DMA platform data\n");
+ return -ENOENT;
+ }
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ host->dma_chan = dma_request_channel(mask, host->pdata->dma_filter,
+ "nand-mlc");
+ if (!host->dma_chan) {
+ dev_err(mtd->dev.parent, "Failed to request DMA channel\n");
+ return -EBUSY;
+ }
+
+ /*
+ * Set direction to a sensible value even if the dmaengine driver
+ * should ignore it. With the default (DMA_MEM_TO_MEM), the amba-pl08x
+ * driver criticizes it as "alien transfer direction".
+ */
+ host->dma_slave_config.direction = DMA_DEV_TO_MEM;
+ host->dma_slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ host->dma_slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ host->dma_slave_config.src_maxburst = 128;
+ host->dma_slave_config.dst_maxburst = 128;
+ /* DMA controller does flow control: */
+ host->dma_slave_config.device_fc = false;
+ host->dma_slave_config.src_addr = MLC_BUFF(host->io_base_phy);
+ host->dma_slave_config.dst_addr = MLC_BUFF(host->io_base_phy);
+ if (dmaengine_slave_config(host->dma_chan, &host->dma_slave_config)) {
+ dev_err(mtd->dev.parent, "Failed to setup DMA slave\n");
+ goto out1;
+ }
+
+ return 0;
+out1:
+ dma_release_channel(host->dma_chan);
+ return -ENXIO;
+}
+
+static struct lpc32xx_nand_cfg_mlc *lpc32xx_parse_dt(struct device *dev)
+{
+ struct lpc32xx_nand_cfg_mlc *ncfg;
+ struct device_node *np = dev->of_node;
+
+ ncfg = devm_kzalloc(dev, sizeof(*ncfg), GFP_KERNEL);
+ if (!ncfg) {
+ dev_err(dev, "could not allocate memory for platform data\n");
+ return NULL;
+ }
+
+ of_property_read_u32(np, "nxp,tcea-delay", &ncfg->tcea_delay);
+ of_property_read_u32(np, "nxp,busy-delay", &ncfg->busy_delay);
+ of_property_read_u32(np, "nxp,nand-ta", &ncfg->nand_ta);
+ of_property_read_u32(np, "nxp,rd-high", &ncfg->rd_high);
+ of_property_read_u32(np, "nxp,rd-low", &ncfg->rd_low);
+ of_property_read_u32(np, "nxp,wr-high", &ncfg->wr_high);
+ of_property_read_u32(np, "nxp,wr-low", &ncfg->wr_low);
+
+ if (!ncfg->tcea_delay || !ncfg->busy_delay || !ncfg->nand_ta ||
+ !ncfg->rd_high || !ncfg->rd_low || !ncfg->wr_high ||
+ !ncfg->wr_low) {
+ dev_err(dev, "chip parameters not specified correctly\n");
+ return NULL;
+ }
+
+ ncfg->wp_gpio = of_get_named_gpio(np, "gpios", 0);
+
+ return ncfg;
+}
+
+/*
+ * Probe for NAND controller
+ */
+static int __devinit lpc32xx_nand_probe(struct platform_device *pdev)
+{
+ struct lpc32xx_nand_host *host;
+ struct mtd_info *mtd;
+ struct nand_chip *nand_chip;
+ struct resource *rc;
+ int res;
+ struct mtd_part_parser_data ppdata = {};
+
+ /* Allocate memory for the device structure (and zero it) */
+ host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
+ if (!host) {
+ dev_err(&pdev->dev, "failed to allocate device structure.\n");
+ return -ENOMEM;
+ }
+
+ rc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (rc == NULL) {
+ dev_err(&pdev->dev, "No memory resource found for device!\r\n");
+ return -ENXIO;
+ }
+
+ host->io_base = devm_request_and_ioremap(&pdev->dev, rc);
+ if (host->io_base == NULL) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ return -EIO;
+ }
+ host->io_base_phy = rc->start;
+
+ mtd = &host->mtd;
+ nand_chip = &host->nand_chip;
+ if (pdev->dev.of_node)
+ host->ncfg = lpc32xx_parse_dt(&pdev->dev);
+ if (!host->ncfg) {
+ dev_err(&pdev->dev,
+ "Missing or bad NAND config from device tree\n");
+ return -ENOENT;
+ }
+ if (host->ncfg->wp_gpio == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ if (gpio_is_valid(host->ncfg->wp_gpio) &&
+ gpio_request(host->ncfg->wp_gpio, "NAND WP")) {
+ dev_err(&pdev->dev, "GPIO not available\n");
+ return -EBUSY;
+ }
+ lpc32xx_wp_disable(host);
+
+ host->pdata = pdev->dev.platform_data;
+
+ nand_chip->priv = host; /* link the private data structures */
+ mtd->priv = nand_chip;
+ mtd->owner = THIS_MODULE;
+ mtd->dev.parent = &pdev->dev;
+
+ /* Get NAND clock */
+ host->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(host->clk)) {
+ dev_err(&pdev->dev, "Clock initialization failure\n");
+ res = -ENOENT;
+ goto err_exit1;
+ }
+ clk_enable(host->clk);
+
+ nand_chip->cmd_ctrl = lpc32xx_nand_cmd_ctrl;
+ nand_chip->dev_ready = lpc32xx_nand_device_ready;
+ nand_chip->chip_delay = 25; /* us */
+ nand_chip->IO_ADDR_R = MLC_DATA(host->io_base);
+ nand_chip->IO_ADDR_W = MLC_DATA(host->io_base);
+
+ /* Init NAND controller */
+ lpc32xx_nand_setup(host);
+
+ platform_set_drvdata(pdev, host);
+
+ /* Initialize function pointers */
+ nand_chip->ecc.hwctl = lpc32xx_ecc_enable;
+ nand_chip->ecc.read_page_raw = lpc32xx_read_page;
+ nand_chip->ecc.read_page = lpc32xx_read_page;
+ nand_chip->ecc.write_page_raw = lpc32xx_write_page_lowlevel;
+ nand_chip->ecc.write_page = lpc32xx_write_page_lowlevel;
+ nand_chip->ecc.write_oob = lpc32xx_write_oob;
+ nand_chip->ecc.read_oob = lpc32xx_read_oob;
+ nand_chip->ecc.strength = 4;
+ nand_chip->write_page = lpc32xx_write_page;
+ nand_chip->waitfunc = lpc32xx_waitfunc;
+
+ nand_chip->bbt_options = NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
+ nand_chip->bbt_td = &lpc32xx_nand_bbt;
+ nand_chip->bbt_md = &lpc32xx_nand_bbt_mirror;
+
+ /* bitflip_threshold's default is defined as ecc_strength anyway.
+ * Unfortunately, it is set only later at add_mtd_device(). Meanwhile
+ * being 0, it causes bad block table scanning errors in
+ * nand_scan_tail(), so preparing it here. */
+ mtd->bitflip_threshold = nand_chip->ecc.strength;
+
+ if (use_dma) {
+ res = lpc32xx_dma_setup(host);
+ if (res) {
+ res = -EIO;
+ goto err_exit2;
+ }
+ }
+
+ /*
+ * Scan to find existance of the device and
+ * Get the type of NAND device SMALL block or LARGE block
+ */
+ if (nand_scan_ident(mtd, 1, NULL)) {
+ res = -ENXIO;
+ goto err_exit3;
+ }
+
+ host->dma_buf = devm_kzalloc(&pdev->dev, mtd->writesize, GFP_KERNEL);
+ if (!host->dma_buf) {
+ dev_err(&pdev->dev, "Error allocating dma_buf memory\n");
+ res = -ENOMEM;
+ goto err_exit3;
+ }
+
+ host->dummy_buf = devm_kzalloc(&pdev->dev, mtd->writesize, GFP_KERNEL);
+ if (!host->dummy_buf) {
+ dev_err(&pdev->dev, "Error allocating dummy_buf memory\n");
+ res = -ENOMEM;
+ goto err_exit3;
+ }
+
+ nand_chip->ecc.mode = NAND_ECC_HW;
+ nand_chip->ecc.size = mtd->writesize;
+ nand_chip->ecc.layout = &lpc32xx_nand_oob;
+ host->mlcsubpages = mtd->writesize / 512;
+
+ /* initially clear interrupt status */
+ readb(MLC_IRQ_SR(host->io_base));
+
+ init_completion(&host->comp_nand);
+ init_completion(&host->comp_controller);
+
+ host->irq = platform_get_irq(pdev, 0);
+ if ((host->irq < 0) || (host->irq >= NR_IRQS)) {
+ dev_err(&pdev->dev, "failed to get platform irq\n");
+ res = -EINVAL;
+ goto err_exit3;
+ }
+
+ if (request_irq(host->irq, (irq_handler_t)&lpc3xxx_nand_irq,
+ IRQF_TRIGGER_HIGH, DRV_NAME, host)) {
+ dev_err(&pdev->dev, "Error requesting NAND IRQ\n");
+ res = -ENXIO;
+ goto err_exit3;
+ }
+
+ /*
+ * Fills out all the uninitialized function pointers with the defaults
+ * And scans for a bad block table if appropriate.
+ */
+ if (nand_scan_tail(mtd)) {
+ res = -ENXIO;
+ goto err_exit4;
+ }
+
+ mtd->name = DRV_NAME;
+
+ ppdata.of_node = pdev->dev.of_node;
+ res = mtd_device_parse_register(mtd, NULL, &ppdata, host->ncfg->parts,
+ host->ncfg->num_parts);
+ if (!res)
+ return res;
+
+ nand_release(mtd);
+
+err_exit4:
+ free_irq(host->irq, host);
+err_exit3:
+ if (use_dma)
+ dma_release_channel(host->dma_chan);
+err_exit2:
+ clk_disable(host->clk);
+ clk_put(host->clk);
+ platform_set_drvdata(pdev, NULL);
+err_exit1:
+ lpc32xx_wp_enable(host);
+ gpio_free(host->ncfg->wp_gpio);
+
+ return res;
+}
+
+/*
+ * Remove NAND device
+ */
+static int __devexit lpc32xx_nand_remove(struct platform_device *pdev)
+{
+ struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
+ struct mtd_info *mtd = &host->mtd;
+
+ nand_release(mtd);
+ free_irq(host->irq, host);
+ if (use_dma)
+ dma_release_channel(host->dma_chan);
+
+ clk_disable(host->clk);
+ clk_put(host->clk);
+ platform_set_drvdata(pdev, NULL);
+
+ lpc32xx_wp_enable(host);
+ gpio_free(host->ncfg->wp_gpio);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int lpc32xx_nand_resume(struct platform_device *pdev)
+{
+ struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
+
+ /* Re-enable NAND clock */
+ clk_enable(host->clk);
+
+ /* Fresh init of NAND controller */
+ lpc32xx_nand_setup(host);
+
+ /* Disable write protect */
+ lpc32xx_wp_disable(host);
+
+ return 0;
+}
+
+static int lpc32xx_nand_suspend(struct platform_device *pdev, pm_message_t pm)
+{
+ struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
+
+ /* Enable write protect for safety */
+ lpc32xx_wp_enable(host);
+
+ /* Disable clock */
+ clk_disable(host->clk);
+ return 0;
+}
+
+#else
+#define lpc32xx_nand_resume NULL
+#define lpc32xx_nand_suspend NULL
+#endif
+
+static const struct of_device_id lpc32xx_nand_match[] = {
+ { .compatible = "nxp,lpc3220-mlc" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, lpc32xx_nand_match);
+
+static struct platform_driver lpc32xx_nand_driver = {
+ .probe = lpc32xx_nand_probe,
+ .remove = __devexit_p(lpc32xx_nand_remove),
+ .resume = lpc32xx_nand_resume,
+ .suspend = lpc32xx_nand_suspend,
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(lpc32xx_nand_match),
+ },
+};
+
+module_platform_driver(lpc32xx_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
+MODULE_DESCRIPTION("NAND driver for the NXP LPC32XX MLC controller");
diff --git a/drivers/mtd/nand/lpc32xx_slc.c b/drivers/mtd/nand/lpc32xx_slc.c
new file mode 100644
index 0000000..32409c4
--- /dev/null
+++ b/drivers/mtd/nand/lpc32xx_slc.c
@@ -0,0 +1,1039 @@
+/*
+ * NXP LPC32XX NAND SLC driver
+ *
+ * Authors:
+ * Kevin Wells <kevin.wells@nxp.com>
+ * Roland Stigge <stigge@antcom.de>
+ *
+ * Copyright © 2011 NXP Semiconductors
+ * Copyright © 2012 Roland Stigge
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/gpio.h>
+#include <linux/of.h>
+#include <linux/of_mtd.h>
+#include <linux/of_gpio.h>
+#include <linux/mtd/lpc32xx_slc.h>
+
+#define LPC32XX_MODNAME "lpc32xx-nand"
+
+/**********************************************************************
+* SLC NAND controller register offsets
+**********************************************************************/
+
+#define SLC_DATA(x) (x + 0x000)
+#define SLC_ADDR(x) (x + 0x004)
+#define SLC_CMD(x) (x + 0x008)
+#define SLC_STOP(x) (x + 0x00C)
+#define SLC_CTRL(x) (x + 0x010)
+#define SLC_CFG(x) (x + 0x014)
+#define SLC_STAT(x) (x + 0x018)
+#define SLC_INT_STAT(x) (x + 0x01C)
+#define SLC_IEN(x) (x + 0x020)
+#define SLC_ISR(x) (x + 0x024)
+#define SLC_ICR(x) (x + 0x028)
+#define SLC_TAC(x) (x + 0x02C)
+#define SLC_TC(x) (x + 0x030)
+#define SLC_ECC(x) (x + 0x034)
+#define SLC_DMA_DATA(x) (x + 0x038)
+
+/**********************************************************************
+* slc_ctrl register definitions
+**********************************************************************/
+#define SLCCTRL_SW_RESET (1 << 2) /* Reset the NAND controller bit */
+#define SLCCTRL_ECC_CLEAR (1 << 1) /* Reset ECC bit */
+#define SLCCTRL_DMA_START (1 << 0) /* Start DMA channel bit */
+
+/**********************************************************************
+* slc_cfg register definitions
+**********************************************************************/
+#define SLCCFG_CE_LOW (1 << 5) /* Force CE low bit */
+#define SLCCFG_DMA_ECC (1 << 4) /* Enable DMA ECC bit */
+#define SLCCFG_ECC_EN (1 << 3) /* ECC enable bit */
+#define SLCCFG_DMA_BURST (1 << 2) /* DMA burst bit */
+#define SLCCFG_DMA_DIR (1 << 1) /* DMA write(0)/read(1) bit */
+#define SLCCFG_WIDTH (1 << 0) /* External device width, 0=8bit */
+
+/**********************************************************************
+* slc_stat register definitions
+**********************************************************************/
+#define SLCSTAT_DMA_FIFO (1 << 2) /* DMA FIFO has data bit */
+#define SLCSTAT_SLC_FIFO (1 << 1) /* SLC FIFO has data bit */
+#define SLCSTAT_NAND_READY (1 << 0) /* NAND device is ready bit */
+
+/**********************************************************************
+* slc_int_stat, slc_ien, slc_isr, and slc_icr register definitions
+**********************************************************************/
+#define SLCSTAT_INT_TC (1 << 1) /* Transfer count bit */
+#define SLCSTAT_INT_RDY_EN (1 << 0) /* Ready interrupt bit */
+
+/**********************************************************************
+* slc_tac register definitions
+**********************************************************************/
+/* Clock setting for RDY write sample wait time in 2*n clocks */
+#define SLCTAC_WDR(n) (((n) & 0xF) << 28)
+/* Write pulse width in clock cycles, 1 to 16 clocks */
+#define SLCTAC_WWIDTH(n) (((n) & 0xF) << 24)
+/* Write hold time of control and data signals, 1 to 16 clocks */
+#define SLCTAC_WHOLD(n) (((n) & 0xF) << 20)
+/* Write setup time of control and data signals, 1 to 16 clocks */
+#define SLCTAC_WSETUP(n) (((n) & 0xF) << 16)
+/* Clock setting for RDY read sample wait time in 2*n clocks */
+#define SLCTAC_RDR(n) (((n) & 0xF) << 12)
+/* Read pulse width in clock cycles, 1 to 16 clocks */
+#define SLCTAC_RWIDTH(n) (((n) & 0xF) << 8)
+/* Read hold time of control and data signals, 1 to 16 clocks */
+#define SLCTAC_RHOLD(n) (((n) & 0xF) << 4)
+/* Read setup time of control and data signals, 1 to 16 clocks */
+#define SLCTAC_RSETUP(n) (((n) & 0xF) << 0)
+
+/**********************************************************************
+* slc_ecc register definitions
+**********************************************************************/
+/* ECC line party fetch macro */
+#define SLCECC_TO_LINEPAR(n) (((n) >> 6) & 0x7FFF)
+#define SLCECC_TO_COLPAR(n) ((n) & 0x3F)
+
+/*
+ * DMA requires storage space for the DMA local buffer and the hardware ECC
+ * storage area. The DMA local buffer is only used if DMA mapping fails
+ * during runtime.
+ */
+#define LPC32XX_DMA_DATA_SIZE 4096
+#define LPC32XX_ECC_SAVE_SIZE ((4096 / 256) * 4)
+
+/* Number of bytes used for ECC stored in NAND per 256 bytes */
+#define LPC32XX_SLC_DEV_ECC_BYTES 3
+
+/*
+ * If the NAND base clock frequency can't be fetched, this frequency will be
+ * used instead as the base. This rate is used to setup the timing registers
+ * used for NAND accesses.
+ */
+#define LPC32XX_DEF_BUS_RATE 133250000
+
+/* Milliseconds for DMA FIFO timeout (unlikely anyway) */
+#define LPC32XX_DMA_TIMEOUT 100
+
+/*
+ * NAND ECC Layout for small page NAND devices
+ * Note: For large and huge page devices, the default layouts are used
+ */
+static struct nand_ecclayout lpc32xx_nand_oob_16 = {
+ .eccbytes = 6,
+ .eccpos = {10, 11, 12, 13, 14, 15},
+ .oobfree = {
+ { .offset = 0, .length = 4 },
+ { .offset = 6, .length = 4 },
+ },
+};
+
+static u8 bbt_pattern[] = {'B', 'b', 't', '0' };
+static u8 mirror_pattern[] = {'1', 't', 'b', 'B' };
+
+/*
+ * Small page FLASH BBT descriptors, marker at offset 0, version at offset 6
+ * Note: Large page devices used the default layout
+ */
+static struct nand_bbt_descr bbt_smallpage_main_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 0,
+ .len = 4,
+ .veroffs = 6,
+ .maxblocks = 4,
+ .pattern = bbt_pattern
+};
+
+static struct nand_bbt_descr bbt_smallpage_mirror_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 0,
+ .len = 4,
+ .veroffs = 6,
+ .maxblocks = 4,
+ .pattern = mirror_pattern
+};
+
+/*
+ * NAND platform configuration structure
+ */
+struct lpc32xx_nand_cfg_slc {
+ uint32_t wdr_clks;
+ uint32_t wwidth;
+ uint32_t whold;
+ uint32_t wsetup;
+ uint32_t rdr_clks;
+ uint32_t rwidth;
+ uint32_t rhold;
+ uint32_t rsetup;
+ bool use_bbt;
+ int wp_gpio;
+ struct mtd_partition *parts;
+ unsigned num_parts;
+};
+
+struct lpc32xx_nand_host {
+ struct nand_chip nand_chip;
+ struct lpc32xx_slc_platform_data *pdata;
+ struct clk *clk;
+ struct mtd_info mtd;
+ void __iomem *io_base;
+ struct lpc32xx_nand_cfg_slc *ncfg;
+
+ struct completion comp;
+ struct dma_chan *dma_chan;
+ uint32_t dma_buf_len;
+ struct dma_slave_config dma_slave_config;
+ struct scatterlist sgl;
+
+ /*
+ * DMA and CPU addresses of ECC work area and data buffer
+ */
+ uint32_t *ecc_buf;
+ uint8_t *data_buf;
+ dma_addr_t io_base_dma;
+};
+
+static void lpc32xx_nand_setup(struct lpc32xx_nand_host *host)
+{
+ uint32_t clkrate, tmp;
+
+ /* Reset SLC controller */
+ writel(SLCCTRL_SW_RESET, SLC_CTRL(host->io_base));
+ udelay(1000);
+
+ /* Basic setup */
+ writel(0, SLC_CFG(host->io_base));
+ writel(0, SLC_IEN(host->io_base));
+ writel((SLCSTAT_INT_TC | SLCSTAT_INT_RDY_EN),
+ SLC_ICR(host->io_base));
+
+ /* Get base clock for SLC block */
+ clkrate = clk_get_rate(host->clk);
+ if (clkrate == 0)
+ clkrate = LPC32XX_DEF_BUS_RATE;
+
+ /* Compute clock setup values */
+ tmp = SLCTAC_WDR(host->ncfg->wdr_clks) |
+ SLCTAC_WWIDTH(1 + (clkrate / host->ncfg->wwidth)) |
+ SLCTAC_WHOLD(1 + (clkrate / host->ncfg->whold)) |
+ SLCTAC_WSETUP(1 + (clkrate / host->ncfg->wsetup)) |
+ SLCTAC_RDR(host->ncfg->rdr_clks) |
+ SLCTAC_RWIDTH(1 + (clkrate / host->ncfg->rwidth)) |
+ SLCTAC_RHOLD(1 + (clkrate / host->ncfg->rhold)) |
+ SLCTAC_RSETUP(1 + (clkrate / host->ncfg->rsetup));
+ writel(tmp, SLC_TAC(host->io_base));
+}
+
+/*
+ * Hardware specific access to control lines
+ */
+static void lpc32xx_nand_cmd_ctrl(struct mtd_info *mtd, int cmd,
+ unsigned int ctrl)
+{
+ uint32_t tmp;
+ struct nand_chip *chip = mtd->priv;
+ struct lpc32xx_nand_host *host = chip->priv;
+
+ /* Does CE state need to be changed? */
+ tmp = readl(SLC_CFG(host->io_base));
+ if (ctrl & NAND_NCE)
+ tmp |= SLCCFG_CE_LOW;
+ else
+ tmp &= ~SLCCFG_CE_LOW;
+ writel(tmp, SLC_CFG(host->io_base));
+
+ if (cmd != NAND_CMD_NONE) {
+ if (ctrl & NAND_CLE)
+ writel(cmd, SLC_CMD(host->io_base));
+ else
+ writel(cmd, SLC_ADDR(host->io_base));
+ }
+}
+
+/*
+ * Read the Device Ready pin
+ */
+static int lpc32xx_nand_device_ready(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct lpc32xx_nand_host *host = chip->priv;
+ int rdy = 0;
+
+ if ((readl(SLC_STAT(host->io_base)) & SLCSTAT_NAND_READY) != 0)
+ rdy = 1;
+
+ return rdy;
+}
+
+/*
+ * Enable NAND write protect
+ */
+static void lpc32xx_wp_enable(struct lpc32xx_nand_host *host)
+{
+ if (gpio_is_valid(host->ncfg->wp_gpio))
+ gpio_set_value(host->ncfg->wp_gpio, 0);
+}
+
+/*
+ * Disable NAND write protect
+ */
+static void lpc32xx_wp_disable(struct lpc32xx_nand_host *host)
+{
+ if (gpio_is_valid(host->ncfg->wp_gpio))
+ gpio_set_value(host->ncfg->wp_gpio, 1);
+}
+
+/*
+ * Prepares SLC for transfers with H/W ECC enabled
+ */
+static void lpc32xx_nand_ecc_enable(struct mtd_info *mtd, int mode)
+{
+ /* Hardware ECC is enabled automatically in hardware as needed */
+}
+
+/*
+ * Calculates the ECC for the data
+ */
+static int lpc32xx_nand_ecc_calculate(struct mtd_info *mtd,
+ const unsigned char *buf,
+ unsigned char *code)
+{
+ /*
+ * ECC is calculated automatically in hardware during syndrome read
+ * and write operations, so it doesn't need to be calculated here.
+ */
+ return 0;
+}
+
+/*
+ * Read a single byte from NAND device
+ */
+static uint8_t lpc32xx_nand_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct lpc32xx_nand_host *host = chip->priv;
+
+ return (uint8_t)readl(SLC_DATA(host->io_base));
+}
+
+/*
+ * Simple device read without ECC
+ */
+static void lpc32xx_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct lpc32xx_nand_host *host = chip->priv;
+
+ /* Direct device read with no ECC */
+ while (len-- > 0)
+ *buf++ = (uint8_t)readl(SLC_DATA(host->io_base));
+}
+
+/*
+ * Simple device write without ECC
+ */
+static void lpc32xx_nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct lpc32xx_nand_host *host = chip->priv;
+
+ /* Direct device write with no ECC */
+ while (len-- > 0)
+ writel((uint32_t)*buf++, SLC_DATA(host->io_base));
+}
+
+/*
+ * Read the OOB data from the device without ECC using FIFO method
+ */
+static int lpc32xx_nand_read_oob_syndrome(struct mtd_info *mtd,
+ struct nand_chip *chip, int page)
+{
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ return 0;
+}
+
+/*
+ * Write the OOB data to the device without ECC using FIFO method
+ */
+static int lpc32xx_nand_write_oob_syndrome(struct mtd_info *mtd,
+ struct nand_chip *chip, int page)
+{
+ int status;
+
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
+ chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ /* Send command to program the OOB data */
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+
+ status = chip->waitfunc(mtd, chip);
+
+ return status & NAND_STATUS_FAIL ? -EIO : 0;
+}
+
+/*
+ * Fills in the ECC fields in the OOB buffer with the hardware generated ECC
+ */
+static void lpc32xx_slc_ecc_copy(uint8_t *spare, const uint32_t *ecc, int count)
+{
+ int i;
+
+ for (i = 0; i < (count * 3); i += 3) {
+ uint32_t ce = ecc[i / 3];
+ ce = ~(ce << 2) & 0xFFFFFF;
+ spare[i + 2] = (uint8_t)(ce & 0xFF);
+ ce >>= 8;
+ spare[i + 1] = (uint8_t)(ce & 0xFF);
+ ce >>= 8;
+ spare[i] = (uint8_t)(ce & 0xFF);
+ }
+}
+
+static void lpc32xx_dma_complete_func(void *completion)
+{
+ complete(completion);
+}
+
+static int lpc32xx_xmit_dma(struct mtd_info *mtd, dma_addr_t dma,
+ void *mem, int len, enum dma_transfer_direction dir)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct lpc32xx_nand_host *host = chip->priv;
+ struct dma_async_tx_descriptor *desc;
+ int flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+ int res;
+
+ host->dma_slave_config.direction = dir;
+ host->dma_slave_config.src_addr = dma;
+ host->dma_slave_config.dst_addr = dma;
+ host->dma_slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ host->dma_slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ host->dma_slave_config.src_maxburst = 4;
+ host->dma_slave_config.dst_maxburst = 4;
+ /* DMA controller does flow control: */
+ host->dma_slave_config.device_fc = false;
+ if (dmaengine_slave_config(host->dma_chan, &host->dma_slave_config)) {
+ dev_err(mtd->dev.parent, "Failed to setup DMA slave\n");
+ return -ENXIO;
+ }
+
+ sg_init_one(&host->sgl, mem, len);
+
+ res = dma_map_sg(host->dma_chan->device->dev, &host->sgl, 1,
+ DMA_BIDIRECTIONAL);
+ if (res != 1) {
+ dev_err(mtd->dev.parent, "Failed to map sg list\n");
+ return -ENXIO;
+ }
+ desc = dmaengine_prep_slave_sg(host->dma_chan, &host->sgl, 1, dir,
+ flags);
+ if (!desc) {
+ dev_err(mtd->dev.parent, "Failed to prepare slave sg\n");
+ goto out1;
+ }
+
+ init_completion(&host->comp);
+ desc->callback = lpc32xx_dma_complete_func;
+ desc->callback_param = &host->comp;
+
+ dmaengine_submit(desc);
+ dma_async_issue_pending(host->dma_chan);
+
+ wait_for_completion_timeout(&host->comp, msecs_to_jiffies(1000));
+
+ dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
+ DMA_BIDIRECTIONAL);
+
+ return 0;
+out1:
+ dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
+ DMA_BIDIRECTIONAL);
+ return -ENXIO;
+}
+
+/*
+ * DMA read/write transfers with ECC support
+ */
+static int lpc32xx_xfer(struct mtd_info *mtd, uint8_t *buf, int eccsubpages,
+ int read)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct lpc32xx_nand_host *host = chip->priv;
+ int i, status = 0;
+ unsigned long timeout;
+ int res;
+ enum dma_transfer_direction dir =
+ read ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
+ uint8_t *dma_buf;
+ bool dma_mapped;
+
+ if ((void *)buf <= high_memory) {
+ dma_buf = buf;
+ dma_mapped = true;
+ } else {
+ dma_buf = host->data_buf;
+ dma_mapped = false;
+ if (!read)
+ memcpy(host->data_buf, buf, mtd->writesize);
+ }
+
+ if (read) {
+ writel(readl(SLC_CFG(host->io_base)) |
+ SLCCFG_DMA_DIR | SLCCFG_ECC_EN | SLCCFG_DMA_ECC |
+ SLCCFG_DMA_BURST, SLC_CFG(host->io_base));
+ } else {
+ writel((readl(SLC_CFG(host->io_base)) |
+ SLCCFG_ECC_EN | SLCCFG_DMA_ECC | SLCCFG_DMA_BURST) &
+ ~SLCCFG_DMA_DIR,
+ SLC_CFG(host->io_base));
+ }
+
+ /* Clear initial ECC */
+ writel(SLCCTRL_ECC_CLEAR, SLC_CTRL(host->io_base));
+
+ /* Transfer size is data area only */
+ writel(mtd->writesize, SLC_TC(host->io_base));
+
+ /* Start transfer in the NAND controller */
+ writel(readl(SLC_CTRL(host->io_base)) | SLCCTRL_DMA_START,
+ SLC_CTRL(host->io_base));
+
+ for (i = 0; i < chip->ecc.steps; i++) {
+ /* Data */
+ res = lpc32xx_xmit_dma(mtd, SLC_DMA_DATA(host->io_base_dma),
+ dma_buf + i * chip->ecc.size,
+ mtd->writesize / chip->ecc.steps, dir);
+ if (res)
+ return res;
+
+ /* Always _read_ ECC */
+ if (i == chip->ecc.steps - 1)
+ break;
+ if (!read) /* ECC availability delayed on write */
+ udelay(10);
+ res = lpc32xx_xmit_dma(mtd, SLC_ECC(host->io_base_dma),
+ &host->ecc_buf[i], 4, DMA_DEV_TO_MEM);
+ if (res)
+ return res;
+ }
+
+ /*
+ * According to NXP, the DMA can be finished here, but the NAND
+ * controller may still have buffered data. After porting to using the
+ * dmaengine DMA driver (amba-pl080), the condition (DMA_FIFO empty)
+ * appears to be always true, according to tests. Keeping the check for
+ * safety reasons for now.
+ */
+ if (readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO) {
+ dev_warn(mtd->dev.parent, "FIFO not empty!\n");
+ timeout = jiffies + msecs_to_jiffies(LPC32XX_DMA_TIMEOUT);
+ while ((readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO) &&
+ time_before(jiffies, timeout))
+ cpu_relax();
+ if (!time_before(jiffies, timeout)) {
+ dev_err(mtd->dev.parent, "FIFO held data too long\n");
+ status = -EIO;
+ }
+ }
+
+ /* Read last calculated ECC value */
+ if (!read)
+ udelay(10);
+ host->ecc_buf[chip->ecc.steps - 1] =
+ readl(SLC_ECC(host->io_base));
+
+ /* Flush DMA */
+ dmaengine_terminate_all(host->dma_chan);
+
+ if (readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO ||
+ readl(SLC_TC(host->io_base))) {
+ /* Something is left in the FIFO, something is wrong */
+ dev_err(mtd->dev.parent, "DMA FIFO failure\n");
+ status = -EIO;
+ }
+
+ /* Stop DMA & HW ECC */
+ writel(readl(SLC_CTRL(host->io_base)) & ~SLCCTRL_DMA_START,
+ SLC_CTRL(host->io_base));
+ writel(readl(SLC_CFG(host->io_base)) &
+ ~(SLCCFG_DMA_DIR | SLCCFG_ECC_EN | SLCCFG_DMA_ECC |
+ SLCCFG_DMA_BURST), SLC_CFG(host->io_base));
+
+ if (!dma_mapped && read)
+ memcpy(buf, host->data_buf, mtd->writesize);
+
+ return status;
+}
+
+/*
+ * Read the data and OOB data from the device, use ECC correction with the
+ * data, disable ECC for the OOB data
+ */
+static int lpc32xx_nand_read_page_syndrome(struct mtd_info *mtd,
+ struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct lpc32xx_nand_host *host = chip->priv;
+ int stat, i, status;
+ uint8_t *oobecc, tmpecc[LPC32XX_ECC_SAVE_SIZE];
+
+ /* Issue read command */
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+
+ /* Read data and oob, calculate ECC */
+ status = lpc32xx_xfer(mtd, buf, chip->ecc.steps, 1);
+
+ /* Get OOB data */
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ /* Convert to stored ECC format */
+ lpc32xx_slc_ecc_copy(tmpecc, (uint32_t *) host->ecc_buf, chip->ecc.steps);
+
+ /* Pointer to ECC data retrieved from NAND spare area */
+ oobecc = chip->oob_poi + chip->ecc.layout->eccpos[0];
+
+ for (i = 0; i < chip->ecc.steps; i++) {
+ stat = chip->ecc.correct(mtd, buf, oobecc,
+ &tmpecc[i * chip->ecc.bytes]);
+ if (stat < 0)
+ mtd->ecc_stats.failed++;
+ else
+ mtd->ecc_stats.corrected += stat;
+
+ buf += chip->ecc.size;
+ oobecc += chip->ecc.bytes;
+ }
+
+ return status;
+}
+
+/*
+ * Read the data and OOB data from the device, no ECC correction with the
+ * data or OOB data
+ */
+static int lpc32xx_nand_read_page_raw_syndrome(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ uint8_t *buf, int oob_required,
+ int page)
+{
+ /* Issue read command */
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+
+ /* Raw reads can just use the FIFO interface */
+ chip->read_buf(mtd, buf, chip->ecc.size * chip->ecc.steps);
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ return 0;
+}
+
+/*
+ * Write the data and OOB data to the device, use ECC with the data,
+ * disable ECC for the OOB data
+ */
+static int lpc32xx_nand_write_page_syndrome(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const uint8_t *buf, int oob_required)
+{
+ struct lpc32xx_nand_host *host = chip->priv;
+ uint8_t *pb = chip->oob_poi + chip->ecc.layout->eccpos[0];
+ int error;
+
+ /* Write data, calculate ECC on outbound data */
+ error = lpc32xx_xfer(mtd, (uint8_t *)buf, chip->ecc.steps, 0);
+ if (error)
+ return error;
+
+ /*
+ * The calculated ECC needs some manual work done to it before
+ * committing it to NAND. Process the calculated ECC and place
+ * the resultant values directly into the OOB buffer. */
+ lpc32xx_slc_ecc_copy(pb, (uint32_t *)host->ecc_buf, chip->ecc.steps);
+
+ /* Write ECC data to device */
+ chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+ return 0;
+}
+
+/*
+ * Write the data and OOB data to the device, no ECC correction with the
+ * data or OOB data
+ */
+static int lpc32xx_nand_write_page_raw_syndrome(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const uint8_t *buf,
+ int oob_required)
+{
+ /* Raw writes can just use the FIFO interface */
+ chip->write_buf(mtd, buf, chip->ecc.size * chip->ecc.steps);
+ chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+ return 0;
+}
+
+static int lpc32xx_nand_dma_setup(struct lpc32xx_nand_host *host)
+{
+ struct mtd_info *mtd = &host->mtd;
+ dma_cap_mask_t mask;
+
+ if (!host->pdata || !host->pdata->dma_filter) {
+ dev_err(mtd->dev.parent, "no DMA platform data\n");
+ return -ENOENT;
+ }
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ host->dma_chan = dma_request_channel(mask, host->pdata->dma_filter,
+ "nand-slc");
+ if (!host->dma_chan) {
+ dev_err(mtd->dev.parent, "Failed to request DMA channel\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static struct lpc32xx_nand_cfg_slc *lpc32xx_parse_dt(struct device *dev)
+{
+ struct lpc32xx_nand_cfg_slc *ncfg;
+ struct device_node *np = dev->of_node;
+
+ ncfg = devm_kzalloc(dev, sizeof(*ncfg), GFP_KERNEL);
+ if (!ncfg) {
+ dev_err(dev, "could not allocate memory for NAND config\n");
+ return NULL;
+ }
+
+ of_property_read_u32(np, "nxp,wdr-clks", &ncfg->wdr_clks);
+ of_property_read_u32(np, "nxp,wwidth", &ncfg->wwidth);
+ of_property_read_u32(np, "nxp,whold", &ncfg->whold);
+ of_property_read_u32(np, "nxp,wsetup", &ncfg->wsetup);
+ of_property_read_u32(np, "nxp,rdr-clks", &ncfg->rdr_clks);
+ of_property_read_u32(np, "nxp,rwidth", &ncfg->rwidth);
+ of_property_read_u32(np, "nxp,rhold", &ncfg->rhold);
+ of_property_read_u32(np, "nxp,rsetup", &ncfg->rsetup);
+
+ if (!ncfg->wdr_clks || !ncfg->wwidth || !ncfg->whold ||
+ !ncfg->wsetup || !ncfg->rdr_clks || !ncfg->rwidth ||
+ !ncfg->rhold || !ncfg->rsetup) {
+ dev_err(dev, "chip parameters not specified correctly\n");
+ return NULL;
+ }
+
+ ncfg->use_bbt = of_get_nand_on_flash_bbt(np);
+ ncfg->wp_gpio = of_get_named_gpio(np, "gpios", 0);
+
+ return ncfg;
+}
+
+/*
+ * Probe for NAND controller
+ */
+static int __devinit lpc32xx_nand_probe(struct platform_device *pdev)
+{
+ struct lpc32xx_nand_host *host;
+ struct mtd_info *mtd;
+ struct nand_chip *chip;
+ struct resource *rc;
+ struct mtd_part_parser_data ppdata = {};
+ int res;
+
+ rc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (rc == NULL) {
+ dev_err(&pdev->dev, "No memory resource found for device\n");
+ return -EBUSY;
+ }
+
+ /* Allocate memory for the device structure (and zero it) */
+ host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
+ if (!host) {
+ dev_err(&pdev->dev, "failed to allocate device structure\n");
+ return -ENOMEM;
+ }
+ host->io_base_dma = rc->start;
+
+ host->io_base = devm_request_and_ioremap(&pdev->dev, rc);
+ if (host->io_base == NULL) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ return -ENOMEM;
+ }
+
+ if (pdev->dev.of_node)
+ host->ncfg = lpc32xx_parse_dt(&pdev->dev);
+ if (!host->ncfg) {
+ dev_err(&pdev->dev,
+ "Missing or bad NAND config from device tree\n");
+ return -ENOENT;
+ }
+ if (host->ncfg->wp_gpio == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ if (gpio_is_valid(host->ncfg->wp_gpio) &&
+ gpio_request(host->ncfg->wp_gpio, "NAND WP")) {
+ dev_err(&pdev->dev, "GPIO not available\n");
+ return -EBUSY;
+ }
+ lpc32xx_wp_disable(host);
+
+ host->pdata = pdev->dev.platform_data;
+
+ mtd = &host->mtd;
+ chip = &host->nand_chip;
+ chip->priv = host;
+ mtd->priv = chip;
+ mtd->owner = THIS_MODULE;
+ mtd->dev.parent = &pdev->dev;
+
+ /* Get NAND clock */
+ host->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(host->clk)) {
+ dev_err(&pdev->dev, "Clock failure\n");
+ res = -ENOENT;
+ goto err_exit1;
+ }
+ clk_enable(host->clk);
+
+ /* Set NAND IO addresses and command/ready functions */
+ chip->IO_ADDR_R = SLC_DATA(host->io_base);
+ chip->IO_ADDR_W = SLC_DATA(host->io_base);
+ chip->cmd_ctrl = lpc32xx_nand_cmd_ctrl;
+ chip->dev_ready = lpc32xx_nand_device_ready;
+ chip->chip_delay = 20; /* 20us command delay time */
+
+ /* Init NAND controller */
+ lpc32xx_nand_setup(host);
+
+ platform_set_drvdata(pdev, host);
+
+ /* NAND callbacks for LPC32xx SLC hardware */
+ chip->ecc.mode = NAND_ECC_HW_SYNDROME;
+ chip->read_byte = lpc32xx_nand_read_byte;
+ chip->read_buf = lpc32xx_nand_read_buf;
+ chip->write_buf = lpc32xx_nand_write_buf;
+ chip->ecc.read_page_raw = lpc32xx_nand_read_page_raw_syndrome;
+ chip->ecc.read_page = lpc32xx_nand_read_page_syndrome;
+ chip->ecc.write_page_raw = lpc32xx_nand_write_page_raw_syndrome;
+ chip->ecc.write_page = lpc32xx_nand_write_page_syndrome;
+ chip->ecc.write_oob = lpc32xx_nand_write_oob_syndrome;
+ chip->ecc.read_oob = lpc32xx_nand_read_oob_syndrome;
+ chip->ecc.calculate = lpc32xx_nand_ecc_calculate;
+ chip->ecc.correct = nand_correct_data;
+ chip->ecc.strength = 1;
+ chip->ecc.hwctl = lpc32xx_nand_ecc_enable;
+
+ /* bitflip_threshold's default is defined as ecc_strength anyway.
+ * Unfortunately, it is set only later at add_mtd_device(). Meanwhile
+ * being 0, it causes bad block table scanning errors in
+ * nand_scan_tail(), so preparing it here already. */
+ mtd->bitflip_threshold = chip->ecc.strength;
+
+ /*
+ * Allocate a large enough buffer for a single huge page plus
+ * extra space for the spare area and ECC storage area
+ */
+ host->dma_buf_len = LPC32XX_DMA_DATA_SIZE + LPC32XX_ECC_SAVE_SIZE;
+ host->data_buf = devm_kzalloc(&pdev->dev, host->dma_buf_len,
+ GFP_KERNEL);
+ if (host->data_buf == NULL) {
+ dev_err(&pdev->dev, "Error allocating memory\n");
+ res = -ENOMEM;
+ goto err_exit2;
+ }
+
+ res = lpc32xx_nand_dma_setup(host);
+ if (res) {
+ res = -EIO;
+ goto err_exit2;
+ }
+
+ /* Find NAND device */
+ if (nand_scan_ident(mtd, 1, NULL)) {
+ res = -ENXIO;
+ goto err_exit3;
+ }
+
+ /* OOB and ECC CPU and DMA work areas */
+ host->ecc_buf = (uint32_t *)(host->data_buf + LPC32XX_DMA_DATA_SIZE);
+
+ /*
+ * Small page FLASH has a unique OOB layout, but large and huge
+ * page FLASH use the standard layout. Small page FLASH uses a
+ * custom BBT marker layout.
+ */
+ if (mtd->writesize <= 512)
+ chip->ecc.layout = &lpc32xx_nand_oob_16;
+
+ /* These sizes remain the same regardless of page size */
+ chip->ecc.size = 256;
+ chip->ecc.bytes = LPC32XX_SLC_DEV_ECC_BYTES;
+ chip->ecc.prepad = chip->ecc.postpad = 0;
+
+ /* Avoid extra scan if using BBT, setup BBT support */
+ if (host->ncfg->use_bbt) {
+ chip->options |= NAND_SKIP_BBTSCAN;
+ chip->bbt_options |= NAND_BBT_USE_FLASH;
+
+ /*
+ * Use a custom BBT marker setup for small page FLASH that
+ * won't interfere with the ECC layout. Large and huge page
+ * FLASH use the standard layout.
+ */
+ if (mtd->writesize <= 512) {
+ chip->bbt_td = &bbt_smallpage_main_descr;
+ chip->bbt_md = &bbt_smallpage_mirror_descr;
+ }
+ }
+
+ /*
+ * Fills out all the uninitialized function pointers with the defaults
+ */
+ if (nand_scan_tail(mtd)) {
+ res = -ENXIO;
+ goto err_exit3;
+ }
+
+ /* Standard layout in FLASH for bad block tables */
+ if (host->ncfg->use_bbt) {
+ if (nand_default_bbt(mtd) < 0)
+ dev_err(&pdev->dev,
+ "Error initializing default bad block tables\n");
+ }
+
+ mtd->name = "nxp_lpc3220_slc";
+ ppdata.of_node = pdev->dev.of_node;
+ res = mtd_device_parse_register(mtd, NULL, &ppdata, host->ncfg->parts,
+ host->ncfg->num_parts);
+ if (!res)
+ return res;
+
+ nand_release(mtd);
+
+err_exit3:
+ dma_release_channel(host->dma_chan);
+err_exit2:
+ clk_disable(host->clk);
+ clk_put(host->clk);
+ platform_set_drvdata(pdev, NULL);
+err_exit1:
+ lpc32xx_wp_enable(host);
+ gpio_free(host->ncfg->wp_gpio);
+
+ return res;
+}
+
+/*
+ * Remove NAND device.
+ */
+static int __devexit lpc32xx_nand_remove(struct platform_device *pdev)
+{
+ uint32_t tmp;
+ struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
+ struct mtd_info *mtd = &host->mtd;
+
+ nand_release(mtd);
+ dma_release_channel(host->dma_chan);
+
+ /* Force CE high */
+ tmp = readl(SLC_CTRL(host->io_base));
+ tmp &= ~SLCCFG_CE_LOW;
+ writel(tmp, SLC_CTRL(host->io_base));
+
+ clk_disable(host->clk);
+ clk_put(host->clk);
+ platform_set_drvdata(pdev, NULL);
+ lpc32xx_wp_enable(host);
+ gpio_free(host->ncfg->wp_gpio);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int lpc32xx_nand_resume(struct platform_device *pdev)
+{
+ struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
+
+ /* Re-enable NAND clock */
+ clk_enable(host->clk);
+
+ /* Fresh init of NAND controller */
+ lpc32xx_nand_setup(host);
+
+ /* Disable write protect */
+ lpc32xx_wp_disable(host);
+
+ return 0;
+}
+
+static int lpc32xx_nand_suspend(struct platform_device *pdev, pm_message_t pm)
+{
+ uint32_t tmp;
+ struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
+
+ /* Force CE high */
+ tmp = readl(SLC_CTRL(host->io_base));
+ tmp &= ~SLCCFG_CE_LOW;
+ writel(tmp, SLC_CTRL(host->io_base));
+
+ /* Enable write protect for safety */
+ lpc32xx_wp_enable(host);
+
+ /* Disable clock */
+ clk_disable(host->clk);
+
+ return 0;
+}
+
+#else
+#define lpc32xx_nand_resume NULL
+#define lpc32xx_nand_suspend NULL
+#endif
+
+static const struct of_device_id lpc32xx_nand_match[] = {
+ { .compatible = "nxp,lpc3220-slc" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, lpc32xx_nand_match);
+
+static struct platform_driver lpc32xx_nand_driver = {
+ .probe = lpc32xx_nand_probe,
+ .remove = __devexit_p(lpc32xx_nand_remove),
+ .resume = lpc32xx_nand_resume,
+ .suspend = lpc32xx_nand_suspend,
+ .driver = {
+ .name = LPC32XX_MODNAME,
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(lpc32xx_nand_match),
+ },
+};
+
+module_platform_driver(lpc32xx_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Kevin Wells <kevin.wells@nxp.com>");
+MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
+MODULE_DESCRIPTION("NAND driver for the NXP LPC32XX SLC controller");
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c
index c259c24..f776c85 100644
--- a/drivers/mtd/nand/mpc5121_nfc.c
+++ b/drivers/mtd/nand/mpc5121_nfc.c
@@ -506,27 +506,6 @@ static void mpc5121_nfc_write_buf(struct mtd_info *mtd,
mpc5121_nfc_buf_copy(mtd, (u_char *)buf, len, 1);
}
-/* Compare buffer with NAND flash */
-static int mpc5121_nfc_verify_buf(struct mtd_info *mtd,
- const u_char *buf, int len)
-{
- u_char tmp[256];
- uint bsize;
-
- while (len) {
- bsize = min(len, 256);
- mpc5121_nfc_read_buf(mtd, tmp, bsize);
-
- if (memcmp(buf, tmp, bsize))
- return 1;
-
- buf += bsize;
- len -= bsize;
- }
-
- return 0;
-}
-
/* Read byte from NFC buffers */
static u8 mpc5121_nfc_read_byte(struct mtd_info *mtd)
{
@@ -732,7 +711,6 @@ static int __devinit mpc5121_nfc_probe(struct platform_device *op)
chip->read_word = mpc5121_nfc_read_word;
chip->read_buf = mpc5121_nfc_read_buf;
chip->write_buf = mpc5121_nfc_write_buf;
- chip->verify_buf = mpc5121_nfc_verify_buf;
chip->select_chip = mpc5121_nfc_select_chip;
chip->bbt_options = NAND_BBT_USE_FLASH;
chip->ecc.mode = NAND_ECC_SOFT;
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 5683604..72e31d8 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -43,8 +43,8 @@
#define nfc_is_v21() (cpu_is_mx25() || cpu_is_mx35())
#define nfc_is_v1() (cpu_is_mx31() || cpu_is_mx27() || cpu_is_mx21())
-#define nfc_is_v3_2() (cpu_is_mx51() || cpu_is_mx53())
-#define nfc_is_v3() nfc_is_v3_2()
+#define nfc_is_v3_2a() cpu_is_mx51()
+#define nfc_is_v3_2b() cpu_is_mx53()
/* Addresses for NFC registers */
#define NFC_V1_V2_BUF_SIZE (host->regs + 0x00)
@@ -122,7 +122,7 @@
#define NFC_V3_CONFIG2_2CMD_PHASES (1 << 4)
#define NFC_V3_CONFIG2_NUM_ADDR_PHASE0 (1 << 5)
#define NFC_V3_CONFIG2_ECC_MODE_8 (1 << 6)
-#define NFC_V3_CONFIG2_PPB(x) (((x) & 0x3) << 7)
+#define NFC_V3_CONFIG2_PPB(x, shift) (((x) & 0x3) << shift)
#define NFC_V3_CONFIG2_NUM_ADDR_PHASE1(x) (((x) & 0x3) << 12)
#define NFC_V3_CONFIG2_INT_MSK (1 << 15)
#define NFC_V3_CONFIG2_ST_CMD(x) (((x) & 0xff) << 24)
@@ -174,6 +174,7 @@ struct mxc_nand_devtype_data {
int spare_len;
int eccbytes;
int eccsize;
+ int ppb_shift;
};
struct mxc_nand_host {
@@ -745,14 +746,6 @@ static void mxc_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
host->buf_start += n;
}
-/* Used by the upper layer to verify the data in NAND Flash
- * with the data in the buf. */
-static int mxc_nand_verify_buf(struct mtd_info *mtd,
- const u_char *buf, int len)
-{
- return -EFAULT;
-}
-
/* This function is used by upper layer for select and
* deselect of the NAND chip */
static void mxc_nand_select_chip_v1_v3(struct mtd_info *mtd, int chip)
@@ -784,7 +777,7 @@ static void mxc_nand_select_chip_v2(struct mtd_info *mtd, int chip)
if (chip == -1) {
/* Disable the NFC clock */
if (host->clk_act) {
- clk_disable(host->clk);
+ clk_disable_unprepare(host->clk);
host->clk_act = 0;
}
return;
@@ -792,7 +785,7 @@ static void mxc_nand_select_chip_v2(struct mtd_info *mtd, int chip)
if (!host->clk_act) {
/* Enable the NFC clock */
- clk_enable(host->clk);
+ clk_prepare_enable(host->clk);
host->clk_act = 1;
}
@@ -1021,7 +1014,9 @@ static void preset_v3(struct mtd_info *mtd)
}
if (mtd->writesize) {
- config2 |= NFC_V3_CONFIG2_PPB(ffs(mtd->erasesize / mtd->writesize) - 6);
+ config2 |= NFC_V3_CONFIG2_PPB(
+ ffs(mtd->erasesize / mtd->writesize) - 6,
+ host->devtype_data->ppb_shift);
host->eccsize = get_eccsize(mtd);
if (host->eccsize == 8)
config2 |= NFC_V3_CONFIG2_ECC_MODE_8;
@@ -1234,7 +1229,7 @@ static const struct mxc_nand_devtype_data imx25_nand_devtype_data = {
.eccsize = 0,
};
-/* v3: i.MX51, i.MX53 */
+/* v3.2a: i.MX51 */
static const struct mxc_nand_devtype_data imx51_nand_devtype_data = {
.preset = preset_v3,
.send_cmd = send_cmd_v3,
@@ -1258,6 +1253,34 @@ static const struct mxc_nand_devtype_data imx51_nand_devtype_data = {
.spare_len = 64,
.eccbytes = 0,
.eccsize = 0,
+ .ppb_shift = 7,
+};
+
+/* v3.2b: i.MX53 */
+static const struct mxc_nand_devtype_data imx53_nand_devtype_data = {
+ .preset = preset_v3,
+ .send_cmd = send_cmd_v3,
+ .send_addr = send_addr_v3,
+ .send_page = send_page_v3,
+ .send_read_id = send_read_id_v3,
+ .get_dev_status = get_dev_status_v3,
+ .check_int = check_int_v3,
+ .irq_control = irq_control_v3,
+ .get_ecc_status = get_ecc_status_v3,
+ .ecclayout_512 = &nandv2_hw_eccoob_smallpage,
+ .ecclayout_2k = &nandv2_hw_eccoob_largepage,
+ .ecclayout_4k = &nandv2_hw_eccoob_smallpage, /* XXX: needs fix */
+ .select_chip = mxc_nand_select_chip_v1_v3,
+ .correct_data = mxc_nand_correct_data_v2_v3,
+ .irqpending_quirk = 0,
+ .needs_ip = 1,
+ .regs_offset = 0,
+ .spare0_offset = 0x1000,
+ .axi_offset = 0x1e00,
+ .spare_len = 64,
+ .eccbytes = 0,
+ .eccsize = 0,
+ .ppb_shift = 8,
};
#ifdef CONFIG_OF_MTD
@@ -1274,6 +1297,9 @@ static const struct of_device_id mxcnd_dt_ids[] = {
}, {
.compatible = "fsl,imx51-nand",
.data = &imx51_nand_devtype_data,
+ }, {
+ .compatible = "fsl,imx53-nand",
+ .data = &imx53_nand_devtype_data,
},
{ /* sentinel */ }
};
@@ -1327,15 +1353,17 @@ static int __init mxcnd_probe_pdata(struct mxc_nand_host *host)
host->devtype_data = &imx27_nand_devtype_data;
} else if (nfc_is_v21()) {
host->devtype_data = &imx25_nand_devtype_data;
- } else if (nfc_is_v3_2()) {
+ } else if (nfc_is_v3_2a()) {
host->devtype_data = &imx51_nand_devtype_data;
+ } else if (nfc_is_v3_2b()) {
+ host->devtype_data = &imx53_nand_devtype_data;
} else
BUG();
return 0;
}
-static int __init mxcnd_probe(struct platform_device *pdev)
+static int __devinit mxcnd_probe(struct platform_device *pdev)
{
struct nand_chip *this;
struct mtd_info *mtd;
@@ -1344,8 +1372,8 @@ static int __init mxcnd_probe(struct platform_device *pdev)
int err = 0;
/* Allocate memory for MTD device structure and private data */
- host = kzalloc(sizeof(struct mxc_nand_host) + NAND_MAX_PAGESIZE +
- NAND_MAX_OOBSIZE, GFP_KERNEL);
+ host = devm_kzalloc(&pdev->dev, sizeof(struct mxc_nand_host) +
+ NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE, GFP_KERNEL);
if (!host)
return -ENOMEM;
@@ -1370,36 +1398,38 @@ static int __init mxcnd_probe(struct platform_device *pdev)
this->read_word = mxc_nand_read_word;
this->write_buf = mxc_nand_write_buf;
this->read_buf = mxc_nand_read_buf;
- this->verify_buf = mxc_nand_verify_buf;
- host->clk = clk_get(&pdev->dev, "nfc");
- if (IS_ERR(host->clk)) {
- err = PTR_ERR(host->clk);
- goto eclk;
- }
+ host->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(host->clk))
+ return PTR_ERR(host->clk);
- clk_prepare_enable(host->clk);
- host->clk_act = 1;
+ err = mxcnd_probe_dt(host);
+ if (err > 0)
+ err = mxcnd_probe_pdata(host);
+ if (err < 0)
+ return err;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- err = -ENODEV;
- goto eres;
- }
+ if (host->devtype_data->needs_ip) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+ host->regs_ip = devm_request_and_ioremap(&pdev->dev, res);
+ if (!host->regs_ip)
+ return -ENOMEM;
- host->base = ioremap(res->start, resource_size(res));
- if (!host->base) {
- err = -ENOMEM;
- goto eres;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ } else {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
}
- host->main_area0 = host->base;
+ if (!res)
+ return -ENODEV;
- err = mxcnd_probe_dt(host);
- if (err > 0)
- err = mxcnd_probe_pdata(host);
- if (err < 0)
- goto eirq;
+ host->base = devm_request_and_ioremap(&pdev->dev, res);
+ if (!host->base)
+ return -ENOMEM;
+
+ host->main_area0 = host->base;
if (host->devtype_data->regs_offset)
host->regs = host->base + host->devtype_data->regs_offset;
@@ -1414,19 +1444,6 @@ static int __init mxcnd_probe(struct platform_device *pdev)
this->ecc.size = 512;
this->ecc.layout = host->devtype_data->ecclayout_512;
- if (host->devtype_data->needs_ip) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!res) {
- err = -ENODEV;
- goto eirq;
- }
- host->regs_ip = ioremap(res->start, resource_size(res));
- if (!host->regs_ip) {
- err = -ENOMEM;
- goto eirq;
- }
- }
-
if (host->pdata.hw_ecc) {
this->ecc.calculate = mxc_nand_calculate_ecc;
this->ecc.hwctl = mxc_nand_enable_hwecc;
@@ -1458,9 +1475,13 @@ static int __init mxcnd_probe(struct platform_device *pdev)
*/
host->devtype_data->irq_control(host, 0);
- err = request_irq(host->irq, mxc_nfc_irq, IRQF_DISABLED, DRIVER_NAME, host);
+ err = devm_request_irq(&pdev->dev, host->irq, mxc_nfc_irq,
+ IRQF_DISABLED, DRIVER_NAME, host);
if (err)
- goto eirq;
+ return err;
+
+ clk_prepare_enable(host->clk);
+ host->clk_act = 1;
/*
* Now that we "own" the interrupt make sure the interrupt mask bit is
@@ -1512,15 +1533,7 @@ static int __init mxcnd_probe(struct platform_device *pdev)
return 0;
escan:
- free_irq(host->irq, host);
-eirq:
- if (host->regs_ip)
- iounmap(host->regs_ip);
- iounmap(host->base);
-eres:
- clk_put(host->clk);
-eclk:
- kfree(host);
+ clk_disable_unprepare(host->clk);
return err;
}
@@ -1529,16 +1542,9 @@ static int __devexit mxcnd_remove(struct platform_device *pdev)
{
struct mxc_nand_host *host = platform_get_drvdata(pdev);
- clk_put(host->clk);
-
platform_set_drvdata(pdev, NULL);
nand_release(&host->mtd);
- free_irq(host->irq, host);
- if (host->regs_ip)
- iounmap(host->regs_ip);
- iounmap(host->base);
- kfree(host);
return 0;
}
@@ -1549,22 +1555,10 @@ static struct platform_driver mxcnd_driver = {
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(mxcnd_dt_ids),
},
+ .probe = mxcnd_probe,
.remove = __devexit_p(mxcnd_remove),
};
-
-static int __init mxc_nd_init(void)
-{
- return platform_driver_probe(&mxcnd_driver, mxcnd_probe);
-}
-
-static void __exit mxc_nd_cleanup(void)
-{
- /* Unregister the device structure */
- platform_driver_unregister(&mxcnd_driver);
-}
-
-module_init(mxc_nd_init);
-module_exit(mxc_nd_cleanup);
+module_platform_driver(mxcnd_driver);
MODULE_AUTHOR("Freescale Semiconductor, Inc.");
MODULE_DESCRIPTION("MXC NAND MTD driver");
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index a11253a0..ec6841d 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -243,25 +243,6 @@ static void nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
}
/**
- * nand_verify_buf - [DEFAULT] Verify chip data against buffer
- * @mtd: MTD device structure
- * @buf: buffer containing the data to compare
- * @len: number of bytes to compare
- *
- * Default verify function for 8bit buswidth.
- */
-static int nand_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
-{
- int i;
- struct nand_chip *chip = mtd->priv;
-
- for (i = 0; i < len; i++)
- if (buf[i] != readb(chip->IO_ADDR_R))
- return -EFAULT;
- return 0;
-}
-
-/**
* nand_write_buf16 - [DEFAULT] write buffer to chip
* @mtd: MTD device structure
* @buf: data buffer
@@ -301,28 +282,6 @@ static void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
}
/**
- * nand_verify_buf16 - [DEFAULT] Verify chip data against buffer
- * @mtd: MTD device structure
- * @buf: buffer containing the data to compare
- * @len: number of bytes to compare
- *
- * Default verify function for 16bit buswidth.
- */
-static int nand_verify_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
-{
- int i;
- struct nand_chip *chip = mtd->priv;
- u16 *p = (u16 *) buf;
- len >>= 1;
-
- for (i = 0; i < len; i++)
- if (p[i] != readw(chip->IO_ADDR_R))
- return -EFAULT;
-
- return 0;
-}
-
-/**
* nand_block_bad - [DEFAULT] Read bad block marker from the chip
* @mtd: MTD device structure
* @ofs: offset from device start
@@ -1525,7 +1484,8 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
ret = chip->ecc.read_page_raw(mtd, chip, bufpoi,
oob_required,
page);
- else if (!aligned && NAND_SUBPAGE_READ(chip) && !oob)
+ else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) &&
+ !oob)
ret = chip->ecc.read_subpage(mtd, chip,
col, bytes, bufpoi);
else
@@ -1542,7 +1502,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
/* Transfer not aligned data */
if (!aligned) {
- if (!NAND_SUBPAGE_READ(chip) && !oob &&
+ if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
!(mtd->ecc_stats.failed - stats.failed) &&
(ops->mode != MTD_OPS_RAW)) {
chip->pagebuf = realpage;
@@ -1565,14 +1525,6 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
oobreadlen -= toread;
}
}
-
- if (!(chip->options & NAND_NO_READRDY)) {
- /* Apply delay or wait for ready/busy pin */
- if (!chip->dev_ready)
- udelay(chip->chip_delay);
- else
- nand_wait_ready(mtd);
- }
} else {
memcpy(buf, chip->buffers->databuf + col, bytes);
buf += bytes;
@@ -1633,7 +1585,7 @@ static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
ops.len = len;
ops.datbuf = buf;
ops.oobbuf = NULL;
- ops.mode = 0;
+ ops.mode = MTD_OPS_PLACE_OOB;
ret = nand_do_read_ops(mtd, from, &ops);
*retlen = ops.retlen;
nand_release_device(mtd);
@@ -1837,14 +1789,6 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
len = min(len, readlen);
buf = nand_transfer_oob(chip, buf, ops, len);
- if (!(chip->options & NAND_NO_READRDY)) {
- /* Apply delay or wait for ready/busy pin */
- if (!chip->dev_ready)
- udelay(chip->chip_delay);
- else
- nand_wait_ready(mtd);
- }
-
readlen -= len;
if (!readlen)
break;
@@ -1927,12 +1871,14 @@ out:
*
* Not for syndrome calculating ECC controllers, which use a special oob layout.
*/
-static void nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+static int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf, int oob_required)
{
chip->write_buf(mtd, buf, mtd->writesize);
if (oob_required)
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ return 0;
}
/**
@@ -1944,7 +1890,7 @@ static void nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
*
* We need a special oob layout and handling even when ECC isn't checked.
*/
-static void nand_write_page_raw_syndrome(struct mtd_info *mtd,
+static int nand_write_page_raw_syndrome(struct mtd_info *mtd,
struct nand_chip *chip,
const uint8_t *buf, int oob_required)
{
@@ -1974,6 +1920,8 @@ static void nand_write_page_raw_syndrome(struct mtd_info *mtd,
size = mtd->oobsize - (oob - chip->oob_poi);
if (size)
chip->write_buf(mtd, oob, size);
+
+ return 0;
}
/**
* nand_write_page_swecc - [REPLACEABLE] software ECC based page write function
@@ -1982,7 +1930,7 @@ static void nand_write_page_raw_syndrome(struct mtd_info *mtd,
* @buf: data buffer
* @oob_required: must write chip->oob_poi to OOB
*/
-static void nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
+static int nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf, int oob_required)
{
int i, eccsize = chip->ecc.size;
@@ -1999,7 +1947,7 @@ static void nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
for (i = 0; i < chip->ecc.total; i++)
chip->oob_poi[eccpos[i]] = ecc_calc[i];
- chip->ecc.write_page_raw(mtd, chip, buf, 1);
+ return chip->ecc.write_page_raw(mtd, chip, buf, 1);
}
/**
@@ -2009,7 +1957,7 @@ static void nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
* @buf: data buffer
* @oob_required: must write chip->oob_poi to OOB
*/
-static void nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
+static int nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf, int oob_required)
{
int i, eccsize = chip->ecc.size;
@@ -2029,6 +1977,8 @@ static void nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
chip->oob_poi[eccpos[i]] = ecc_calc[i];
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ return 0;
}
/**
@@ -2041,7 +1991,7 @@ static void nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
* The hw generator calculates the error syndrome automatically. Therefore we
* need a special oob layout and handling.
*/
-static void nand_write_page_syndrome(struct mtd_info *mtd,
+static int nand_write_page_syndrome(struct mtd_info *mtd,
struct nand_chip *chip,
const uint8_t *buf, int oob_required)
{
@@ -2075,6 +2025,8 @@ static void nand_write_page_syndrome(struct mtd_info *mtd,
i = mtd->oobsize - (oob - chip->oob_poi);
if (i)
chip->write_buf(mtd, oob, i);
+
+ return 0;
}
/**
@@ -2096,9 +2048,12 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
if (unlikely(raw))
- chip->ecc.write_page_raw(mtd, chip, buf, oob_required);
+ status = chip->ecc.write_page_raw(mtd, chip, buf, oob_required);
else
- chip->ecc.write_page(mtd, chip, buf, oob_required);
+ status = chip->ecc.write_page(mtd, chip, buf, oob_required);
+
+ if (status < 0)
+ return status;
/*
* Cached progamming disabled for now. Not sure if it's worth the
@@ -2125,16 +2080,6 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
status = chip->waitfunc(mtd, chip);
}
-#ifdef CONFIG_MTD_NAND_VERIFY_WRITE
- /* Send command to read back the data */
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
-
- if (chip->verify_buf(mtd, buf, mtd->writesize))
- return -EIO;
-
- /* Make sure the next page prog is preceded by a status read */
- chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
-#endif
return 0;
}
@@ -2336,7 +2281,7 @@ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
ops.len = len;
ops.datbuf = (uint8_t *)buf;
ops.oobbuf = NULL;
- ops.mode = 0;
+ ops.mode = MTD_OPS_PLACE_OOB;
ret = nand_do_write_ops(mtd, to, &ops);
@@ -2365,7 +2310,7 @@ static int nand_write(struct mtd_info *mtd, loff_t to, size_t len,
ops.len = len;
ops.datbuf = (uint8_t *)buf;
ops.oobbuf = NULL;
- ops.mode = 0;
+ ops.mode = MTD_OPS_PLACE_OOB;
ret = nand_do_write_ops(mtd, to, &ops);
*retlen = ops.retlen;
nand_release_device(mtd);
@@ -2755,6 +2700,50 @@ static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
}
/**
+ * nand_onfi_set_features- [REPLACEABLE] set features for ONFI nand
+ * @mtd: MTD device structure
+ * @chip: nand chip info structure
+ * @addr: feature address.
+ * @subfeature_param: the subfeature parameters, a four bytes array.
+ */
+static int nand_onfi_set_features(struct mtd_info *mtd, struct nand_chip *chip,
+ int addr, uint8_t *subfeature_param)
+{
+ int status;
+
+ if (!chip->onfi_version)
+ return -EINVAL;
+
+ chip->cmdfunc(mtd, NAND_CMD_SET_FEATURES, addr, -1);
+ chip->write_buf(mtd, subfeature_param, ONFI_SUBFEATURE_PARAM_LEN);
+ status = chip->waitfunc(mtd, chip);
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+ return 0;
+}
+
+/**
+ * nand_onfi_get_features- [REPLACEABLE] get features for ONFI nand
+ * @mtd: MTD device structure
+ * @chip: nand chip info structure
+ * @addr: feature address.
+ * @subfeature_param: the subfeature parameters, a four bytes array.
+ */
+static int nand_onfi_get_features(struct mtd_info *mtd, struct nand_chip *chip,
+ int addr, uint8_t *subfeature_param)
+{
+ if (!chip->onfi_version)
+ return -EINVAL;
+
+ /* clear the sub feature parameters */
+ memset(subfeature_param, 0, ONFI_SUBFEATURE_PARAM_LEN);
+
+ chip->cmdfunc(mtd, NAND_CMD_GET_FEATURES, addr, -1);
+ chip->read_buf(mtd, subfeature_param, ONFI_SUBFEATURE_PARAM_LEN);
+ return 0;
+}
+
+/**
* nand_suspend - [MTD Interface] Suspend the NAND flash
* @mtd: MTD device structure
*/
@@ -2809,8 +2798,6 @@ static void nand_set_defaults(struct nand_chip *chip, int busw)
chip->write_buf = busw ? nand_write_buf16 : nand_write_buf;
if (!chip->read_buf)
chip->read_buf = busw ? nand_read_buf16 : nand_read_buf;
- if (!chip->verify_buf)
- chip->verify_buf = busw ? nand_verify_buf16 : nand_verify_buf;
if (!chip->scan_bbt)
chip->scan_bbt = nand_default_bbt;
@@ -2914,14 +2901,250 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
if (le16_to_cpu(p->features) & 1)
*busw = NAND_BUSWIDTH_16;
- chip->options &= ~NAND_CHIPOPTIONS_MSK;
- chip->options |= NAND_NO_READRDY & NAND_CHIPOPTIONS_MSK;
-
pr_info("ONFI flash detected\n");
return 1;
}
/*
+ * nand_id_has_period - Check if an ID string has a given wraparound period
+ * @id_data: the ID string
+ * @arrlen: the length of the @id_data array
+ * @period: the period of repitition
+ *
+ * Check if an ID string is repeated within a given sequence of bytes at
+ * specific repetition interval period (e.g., {0x20,0x01,0x7F,0x20} has a
+ * period of 2). This is a helper function for nand_id_len(). Returns non-zero
+ * if the repetition has a period of @period; otherwise, returns zero.
+ */
+static int nand_id_has_period(u8 *id_data, int arrlen, int period)
+{
+ int i, j;
+ for (i = 0; i < period; i++)
+ for (j = i + period; j < arrlen; j += period)
+ if (id_data[i] != id_data[j])
+ return 0;
+ return 1;
+}
+
+/*
+ * nand_id_len - Get the length of an ID string returned by CMD_READID
+ * @id_data: the ID string
+ * @arrlen: the length of the @id_data array
+
+ * Returns the length of the ID string, according to known wraparound/trailing
+ * zero patterns. If no pattern exists, returns the length of the array.
+ */
+static int nand_id_len(u8 *id_data, int arrlen)
+{
+ int last_nonzero, period;
+
+ /* Find last non-zero byte */
+ for (last_nonzero = arrlen - 1; last_nonzero >= 0; last_nonzero--)
+ if (id_data[last_nonzero])
+ break;
+
+ /* All zeros */
+ if (last_nonzero < 0)
+ return 0;
+
+ /* Calculate wraparound period */
+ for (period = 1; period < arrlen; period++)
+ if (nand_id_has_period(id_data, arrlen, period))
+ break;
+
+ /* There's a repeated pattern */
+ if (period < arrlen)
+ return period;
+
+ /* There are trailing zeros */
+ if (last_nonzero < arrlen - 1)
+ return last_nonzero + 1;
+
+ /* No pattern detected */
+ return arrlen;
+}
+
+/*
+ * Many new NAND share similar device ID codes, which represent the size of the
+ * chip. The rest of the parameters must be decoded according to generic or
+ * manufacturer-specific "extended ID" decoding patterns.
+ */
+static void nand_decode_ext_id(struct mtd_info *mtd, struct nand_chip *chip,
+ u8 id_data[8], int *busw)
+{
+ int extid, id_len;
+ /* The 3rd id byte holds MLC / multichip data */
+ chip->cellinfo = id_data[2];
+ /* The 4th id byte is the important one */
+ extid = id_data[3];
+
+ id_len = nand_id_len(id_data, 8);
+
+ /*
+ * Field definitions are in the following datasheets:
+ * Old style (4,5 byte ID): Samsung K9GAG08U0M (p.32)
+ * New style (6 byte ID): Samsung K9GAG08U0F (p.44)
+ * Hynix MLC (6 byte ID): Hynix H27UBG8T2B (p.22)
+ *
+ * Check for ID length, cell type, and Hynix/Samsung ID to decide what
+ * to do.
+ */
+ if (id_len == 6 && id_data[0] == NAND_MFR_SAMSUNG) {
+ /* Calc pagesize */
+ mtd->writesize = 2048 << (extid & 0x03);
+ extid >>= 2;
+ /* Calc oobsize */
+ switch (((extid >> 2) & 0x04) | (extid & 0x03)) {
+ case 1:
+ mtd->oobsize = 128;
+ break;
+ case 2:
+ mtd->oobsize = 218;
+ break;
+ case 3:
+ mtd->oobsize = 400;
+ break;
+ case 4:
+ mtd->oobsize = 436;
+ break;
+ case 5:
+ mtd->oobsize = 512;
+ break;
+ case 6:
+ default: /* Other cases are "reserved" (unknown) */
+ mtd->oobsize = 640;
+ break;
+ }
+ extid >>= 2;
+ /* Calc blocksize */
+ mtd->erasesize = (128 * 1024) <<
+ (((extid >> 1) & 0x04) | (extid & 0x03));
+ *busw = 0;
+ } else if (id_len == 6 && id_data[0] == NAND_MFR_HYNIX &&
+ (chip->cellinfo & NAND_CI_CELLTYPE_MSK)) {
+ unsigned int tmp;
+
+ /* Calc pagesize */
+ mtd->writesize = 2048 << (extid & 0x03);
+ extid >>= 2;
+ /* Calc oobsize */
+ switch (((extid >> 2) & 0x04) | (extid & 0x03)) {
+ case 0:
+ mtd->oobsize = 128;
+ break;
+ case 1:
+ mtd->oobsize = 224;
+ break;
+ case 2:
+ mtd->oobsize = 448;
+ break;
+ case 3:
+ mtd->oobsize = 64;
+ break;
+ case 4:
+ mtd->oobsize = 32;
+ break;
+ case 5:
+ mtd->oobsize = 16;
+ break;
+ default:
+ mtd->oobsize = 640;
+ break;
+ }
+ extid >>= 2;
+ /* Calc blocksize */
+ tmp = ((extid >> 1) & 0x04) | (extid & 0x03);
+ if (tmp < 0x03)
+ mtd->erasesize = (128 * 1024) << tmp;
+ else if (tmp == 0x03)
+ mtd->erasesize = 768 * 1024;
+ else
+ mtd->erasesize = (64 * 1024) << tmp;
+ *busw = 0;
+ } else {
+ /* Calc pagesize */
+ mtd->writesize = 1024 << (extid & 0x03);
+ extid >>= 2;
+ /* Calc oobsize */
+ mtd->oobsize = (8 << (extid & 0x01)) *
+ (mtd->writesize >> 9);
+ extid >>= 2;
+ /* Calc blocksize. Blocksize is multiples of 64KiB */
+ mtd->erasesize = (64 * 1024) << (extid & 0x03);
+ extid >>= 2;
+ /* Get buswidth information */
+ *busw = (extid & 0x01) ? NAND_BUSWIDTH_16 : 0;
+ }
+}
+
+/*
+ * Old devices have chip data hardcoded in the device ID table. nand_decode_id
+ * decodes a matching ID table entry and assigns the MTD size parameters for
+ * the chip.
+ */
+static void nand_decode_id(struct mtd_info *mtd, struct nand_chip *chip,
+ struct nand_flash_dev *type, u8 id_data[8],
+ int *busw)
+{
+ int maf_id = id_data[0];
+
+ mtd->erasesize = type->erasesize;
+ mtd->writesize = type->pagesize;
+ mtd->oobsize = mtd->writesize / 32;
+ *busw = type->options & NAND_BUSWIDTH_16;
+
+ /*
+ * Check for Spansion/AMD ID + repeating 5th, 6th byte since
+ * some Spansion chips have erasesize that conflicts with size
+ * listed in nand_ids table.
+ * Data sheet (5 byte ID): Spansion S30ML-P ORNAND (p.39)
+ */
+ if (maf_id == NAND_MFR_AMD && id_data[4] != 0x00 && id_data[5] == 0x00
+ && id_data[6] == 0x00 && id_data[7] == 0x00
+ && mtd->writesize == 512) {
+ mtd->erasesize = 128 * 1024;
+ mtd->erasesize <<= ((id_data[3] & 0x03) << 1);
+ }
+}
+
+/*
+ * Set the bad block marker/indicator (BBM/BBI) patterns according to some
+ * heuristic patterns using various detected parameters (e.g., manufacturer,
+ * page size, cell-type information).
+ */
+static void nand_decode_bbm_options(struct mtd_info *mtd,
+ struct nand_chip *chip, u8 id_data[8])
+{
+ int maf_id = id_data[0];
+
+ /* Set the bad block position */
+ if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16))
+ chip->badblockpos = NAND_LARGE_BADBLOCK_POS;
+ else
+ chip->badblockpos = NAND_SMALL_BADBLOCK_POS;
+
+ /*
+ * Bad block marker is stored in the last page of each block on Samsung
+ * and Hynix MLC devices; stored in first two pages of each block on
+ * Micron devices with 2KiB pages and on SLC Samsung, Hynix, Toshiba,
+ * AMD/Spansion, and Macronix. All others scan only the first page.
+ */
+ if ((chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
+ (maf_id == NAND_MFR_SAMSUNG ||
+ maf_id == NAND_MFR_HYNIX))
+ chip->bbt_options |= NAND_BBT_SCANLASTPAGE;
+ else if ((!(chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
+ (maf_id == NAND_MFR_SAMSUNG ||
+ maf_id == NAND_MFR_HYNIX ||
+ maf_id == NAND_MFR_TOSHIBA ||
+ maf_id == NAND_MFR_AMD ||
+ maf_id == NAND_MFR_MACRONIX)) ||
+ (mtd->writesize == 2048 &&
+ maf_id == NAND_MFR_MICRON))
+ chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
+}
+
+/*
* Get the flash and manufacturer id and lookup if the type is supported.
*/
static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
@@ -2932,7 +3155,6 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
{
int i, maf_idx;
u8 id_data[8];
- int ret;
/* Select the device */
chip->select_chip(mtd, 0);
@@ -2959,7 +3181,8 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
- for (i = 0; i < 2; i++)
+ /* Read entire ID string */
+ for (i = 0; i < 8; i++)
id_data[i] = chip->read_byte(mtd);
if (id_data[0] != *maf_id || id_data[1] != *dev_id) {
@@ -2979,18 +3202,10 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
chip->onfi_version = 0;
if (!type->name || !type->pagesize) {
/* Check is chip is ONFI compliant */
- ret = nand_flash_detect_onfi(mtd, chip, &busw);
- if (ret)
+ if (nand_flash_detect_onfi(mtd, chip, &busw))
goto ident_done;
}
- chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
-
- /* Read entire ID string */
-
- for (i = 0; i < 8; i++)
- id_data[i] = chip->read_byte(mtd);
-
if (!type->name)
return ERR_PTR(-ENODEV);
@@ -3003,86 +3218,13 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
/* Set the pagesize, oobsize, erasesize by the driver */
busw = chip->init_size(mtd, chip, id_data);
} else if (!type->pagesize) {
- int extid;
- /* The 3rd id byte holds MLC / multichip data */
- chip->cellinfo = id_data[2];
- /* The 4th id byte is the important one */
- extid = id_data[3];
-
- /*
- * Field definitions are in the following datasheets:
- * Old style (4,5 byte ID): Samsung K9GAG08U0M (p.32)
- * New style (6 byte ID): Samsung K9GBG08U0M (p.40)
- *
- * Check for wraparound + Samsung ID + nonzero 6th byte
- * to decide what to do.
- */
- if (id_data[0] == id_data[6] && id_data[1] == id_data[7] &&
- id_data[0] == NAND_MFR_SAMSUNG &&
- (chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
- id_data[5] != 0x00) {
- /* Calc pagesize */
- mtd->writesize = 2048 << (extid & 0x03);
- extid >>= 2;
- /* Calc oobsize */
- switch (extid & 0x03) {
- case 1:
- mtd->oobsize = 128;
- break;
- case 2:
- mtd->oobsize = 218;
- break;
- case 3:
- mtd->oobsize = 400;
- break;
- default:
- mtd->oobsize = 436;
- break;
- }
- extid >>= 2;
- /* Calc blocksize */
- mtd->erasesize = (128 * 1024) <<
- (((extid >> 1) & 0x04) | (extid & 0x03));
- busw = 0;
- } else {
- /* Calc pagesize */
- mtd->writesize = 1024 << (extid & 0x03);
- extid >>= 2;
- /* Calc oobsize */
- mtd->oobsize = (8 << (extid & 0x01)) *
- (mtd->writesize >> 9);
- extid >>= 2;
- /* Calc blocksize. Blocksize is multiples of 64KiB */
- mtd->erasesize = (64 * 1024) << (extid & 0x03);
- extid >>= 2;
- /* Get buswidth information */
- busw = (extid & 0x01) ? NAND_BUSWIDTH_16 : 0;
- }
+ /* Decode parameters from extended ID */
+ nand_decode_ext_id(mtd, chip, id_data, &busw);
} else {
- /*
- * Old devices have chip data hardcoded in the device id table.
- */
- mtd->erasesize = type->erasesize;
- mtd->writesize = type->pagesize;
- mtd->oobsize = mtd->writesize / 32;
- busw = type->options & NAND_BUSWIDTH_16;
-
- /*
- * Check for Spansion/AMD ID + repeating 5th, 6th byte since
- * some Spansion chips have erasesize that conflicts with size
- * listed in nand_ids table.
- * Data sheet (5 byte ID): Spansion S30ML-P ORNAND (p.39)
- */
- if (*maf_id == NAND_MFR_AMD && id_data[4] != 0x00 &&
- id_data[5] == 0x00 && id_data[6] == 0x00 &&
- id_data[7] == 0x00 && mtd->writesize == 512) {
- mtd->erasesize = 128 * 1024;
- mtd->erasesize <<= ((id_data[3] & 0x03) << 1);
- }
+ nand_decode_id(mtd, chip, type, id_data, &busw);
}
- /* Get chip options, preserve non chip based options */
- chip->options &= ~NAND_CHIPOPTIONS_MSK;
- chip->options |= type->options & NAND_CHIPOPTIONS_MSK;
+ /* Get chip options */
+ chip->options |= type->options;
/*
* Check if chip is not a Samsung device. Do not clear the
@@ -3112,6 +3254,8 @@ ident_done:
return ERR_PTR(-EINVAL);
}
+ nand_decode_bbm_options(mtd, chip, id_data);
+
/* Calculate the address shift from the page size */
chip->page_shift = ffs(mtd->writesize) - 1;
/* Convert chipsize to number of pages per chip -1 */
@@ -3128,33 +3272,6 @@ ident_done:
chip->badblockbits = 8;
- /* Set the bad block position */
- if (mtd->writesize > 512 || (busw & NAND_BUSWIDTH_16))
- chip->badblockpos = NAND_LARGE_BADBLOCK_POS;
- else
- chip->badblockpos = NAND_SMALL_BADBLOCK_POS;
-
- /*
- * Bad block marker is stored in the last page of each block
- * on Samsung and Hynix MLC devices; stored in first two pages
- * of each block on Micron devices with 2KiB pages and on
- * SLC Samsung, Hynix, Toshiba, AMD/Spansion, and Macronix.
- * All others scan only the first page.
- */
- if ((chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
- (*maf_id == NAND_MFR_SAMSUNG ||
- *maf_id == NAND_MFR_HYNIX))
- chip->bbt_options |= NAND_BBT_SCANLASTPAGE;
- else if ((!(chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
- (*maf_id == NAND_MFR_SAMSUNG ||
- *maf_id == NAND_MFR_HYNIX ||
- *maf_id == NAND_MFR_TOSHIBA ||
- *maf_id == NAND_MFR_AMD ||
- *maf_id == NAND_MFR_MACRONIX)) ||
- (mtd->writesize == 2048 &&
- *maf_id == NAND_MFR_MICRON))
- chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
-
/* Check for AND chips with 4 page planes */
if (chip->options & NAND_4PAGE_ARRAY)
chip->erase_cmd = multi_erase_cmd;
@@ -3284,6 +3401,12 @@ int nand_scan_tail(struct mtd_info *mtd)
if (!chip->write_page)
chip->write_page = nand_write_page;
+ /* set for ONFI nand */
+ if (!chip->onfi_set_features)
+ chip->onfi_set_features = nand_onfi_set_features;
+ if (!chip->onfi_get_features)
+ chip->onfi_get_features = nand_onfi_get_features;
+
/*
* Check ECC mode, default to software if 3byte/512byte hardware ECC is
* selected and we have 256 byte pagesize fallback to software ECC
@@ -3477,6 +3600,10 @@ int nand_scan_tail(struct mtd_info *mtd)
/* Invalidate the pagebuffer reference */
chip->pagebuf = -1;
+ /* Large page NAND with SOFT_ECC should support subpage reads */
+ if ((chip->ecc.mode == NAND_ECC_SOFT) && (chip->page_shift > 9))
+ chip->options |= NAND_SUBPAGE_READ;
+
/* Fill in remaining MTD driver data */
mtd->type = MTD_NANDFLASH;
mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM :
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index 30d1319..916d6e9 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -4,7 +4,7 @@
* Overview:
* Bad block table support for the NAND driver
*
- * Copyright (C) 2004 Thomas Gleixner (tglx@linutronix.de)
+ * Copyright © 2004 Thomas Gleixner (tglx@linutronix.de)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -22,7 +22,7 @@
* BBT on flash. If a BBT is found then the contents are read and the memory
* based BBT is created. If a mirrored BBT is selected then the mirror is
* searched too and the versions are compared. If the mirror has a greater
- * version number than the mirror BBT is used to build the memory based BBT.
+ * version number, then the mirror BBT is used to build the memory based BBT.
* If the tables are not versioned, then we "or" the bad block information.
* If one of the BBTs is out of date or does not exist it is (re)created.
* If no BBT exists at all then the device is scanned for factory marked
@@ -62,21 +62,20 @@
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/mtd/mtd.h>
+#include <linux/mtd/bbm.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/nand_ecc.h>
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/vmalloc.h>
#include <linux/export.h>
+#include <linux/string.h>
static int check_pattern_no_oob(uint8_t *buf, struct nand_bbt_descr *td)
{
- int ret;
-
- ret = memcmp(buf, td->pattern, td->len);
- if (!ret)
- return ret;
- return -1;
+ if (memcmp(buf, td->pattern, td->len))
+ return -1;
+ return 0;
}
/**
@@ -92,19 +91,16 @@ static int check_pattern_no_oob(uint8_t *buf, struct nand_bbt_descr *td)
*/
static int check_pattern(uint8_t *buf, int len, int paglen, struct nand_bbt_descr *td)
{
- int i, end = 0;
+ int end = 0;
uint8_t *p = buf;
if (td->options & NAND_BBT_NO_OOB)
return check_pattern_no_oob(buf, td);
end = paglen + td->offs;
- if (td->options & NAND_BBT_SCANEMPTY) {
- for (i = 0; i < end; i++) {
- if (p[i] != 0xff)
- return -1;
- }
- }
+ if (td->options & NAND_BBT_SCANEMPTY)
+ if (memchr_inv(p, 0xff, end))
+ return -1;
p += end;
/* Compare the pattern */
@@ -114,10 +110,8 @@ static int check_pattern(uint8_t *buf, int len, int paglen, struct nand_bbt_desc
if (td->options & NAND_BBT_SCANEMPTY) {
p += td->len;
end += td->len;
- for (i = end; i < len; i++) {
- if (*p++ != 0xff)
- return -1;
- }
+ if (memchr_inv(p, 0xff, len - end))
+ return -1;
}
return 0;
}
@@ -133,14 +127,9 @@ static int check_pattern(uint8_t *buf, int len, int paglen, struct nand_bbt_desc
*/
static int check_short_pattern(uint8_t *buf, struct nand_bbt_descr *td)
{
- int i;
- uint8_t *p = buf;
-
/* Compare the pattern */
- for (i = 0; i < td->len; i++) {
- if (p[td->offs + i] != td->pattern[i])
- return -1;
- }
+ if (memcmp(buf + td->offs, td->pattern, td->len))
+ return -1;
return 0;
}
@@ -288,7 +277,7 @@ static int read_abs_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_desc
}
/* BBT marker is in the first page, no OOB */
-static int scan_read_raw_data(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
+static int scan_read_data(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
struct nand_bbt_descr *td)
{
size_t retlen;
@@ -301,14 +290,24 @@ static int scan_read_raw_data(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
return mtd_read(mtd, offs, len, &retlen, buf);
}
-/* Scan read raw data from flash */
-static int scan_read_raw_oob(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
+/**
+ * scan_read_oob - [GENERIC] Scan data+OOB region to buffer
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @offs: offset at which to scan
+ * @len: length of data region to read
+ *
+ * Scan read data from data+OOB. May traverse multiple pages, interleaving
+ * page,OOB,page,OOB,... in buf. Completes transfer and returns the "strongest"
+ * ECC condition (error or bitflip). May quit on the first (non-ECC) error.
+ */
+static int scan_read_oob(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
size_t len)
{
struct mtd_oob_ops ops;
- int res;
+ int res, ret = 0;
- ops.mode = MTD_OPS_RAW;
+ ops.mode = MTD_OPS_PLACE_OOB;
ops.ooboffs = 0;
ops.ooblen = mtd->oobsize;
@@ -318,24 +317,27 @@ static int scan_read_raw_oob(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
ops.oobbuf = buf + ops.len;
res = mtd_read_oob(mtd, offs, &ops);
-
- if (res)
- return res;
+ if (res) {
+ if (!mtd_is_bitflip_or_eccerr(res))
+ return res;
+ else if (mtd_is_eccerr(res) || !ret)
+ ret = res;
+ }
buf += mtd->oobsize + mtd->writesize;
len -= mtd->writesize;
offs += mtd->writesize;
}
- return 0;
+ return ret;
}
-static int scan_read_raw(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
+static int scan_read(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
size_t len, struct nand_bbt_descr *td)
{
if (td->options & NAND_BBT_NO_OOB)
- return scan_read_raw_data(mtd, buf, offs, td);
+ return scan_read_data(mtd, buf, offs, td);
else
- return scan_read_raw_oob(mtd, buf, offs, len);
+ return scan_read_oob(mtd, buf, offs, len);
}
/* Scan write data with oob to flash */
@@ -373,14 +375,14 @@ static u32 bbt_get_ver_offs(struct mtd_info *mtd, struct nand_bbt_descr *td)
* Read the bad block table(s) for all chips starting at a given page. We
* assume that the bbt bits are in consecutive order.
*/
-static int read_abs_bbts(struct mtd_info *mtd, uint8_t *buf,
- struct nand_bbt_descr *td, struct nand_bbt_descr *md)
+static void read_abs_bbts(struct mtd_info *mtd, uint8_t *buf,
+ struct nand_bbt_descr *td, struct nand_bbt_descr *md)
{
struct nand_chip *this = mtd->priv;
/* Read the primary version, if available */
if (td->options & NAND_BBT_VERSION) {
- scan_read_raw(mtd, buf, (loff_t)td->pages[0] << this->page_shift,
+ scan_read(mtd, buf, (loff_t)td->pages[0] << this->page_shift,
mtd->writesize, td);
td->version[0] = buf[bbt_get_ver_offs(mtd, td)];
pr_info("Bad block table at page %d, version 0x%02X\n",
@@ -389,28 +391,27 @@ static int read_abs_bbts(struct mtd_info *mtd, uint8_t *buf,
/* Read the mirror version, if available */
if (md && (md->options & NAND_BBT_VERSION)) {
- scan_read_raw(mtd, buf, (loff_t)md->pages[0] << this->page_shift,
- mtd->writesize, td);
+ scan_read(mtd, buf, (loff_t)md->pages[0] << this->page_shift,
+ mtd->writesize, md);
md->version[0] = buf[bbt_get_ver_offs(mtd, md)];
pr_info("Bad block table at page %d, version 0x%02X\n",
md->pages[0], md->version[0]);
}
- return 1;
}
/* Scan a given block full */
static int scan_block_full(struct mtd_info *mtd, struct nand_bbt_descr *bd,
loff_t offs, uint8_t *buf, size_t readlen,
- int scanlen, int len)
+ int scanlen, int numpages)
{
int ret, j;
- ret = scan_read_raw_oob(mtd, buf, offs, readlen);
+ ret = scan_read_oob(mtd, buf, offs, readlen);
/* Ignore ECC errors when checking for BBM */
if (ret && !mtd_is_bitflip_or_eccerr(ret))
return ret;
- for (j = 0; j < len; j++, buf += scanlen) {
+ for (j = 0; j < numpages; j++, buf += scanlen) {
if (check_pattern(buf, scanlen, mtd->writesize, bd))
return 1;
}
@@ -419,7 +420,7 @@ static int scan_block_full(struct mtd_info *mtd, struct nand_bbt_descr *bd,
/* Scan a given block partially */
static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd,
- loff_t offs, uint8_t *buf, int len)
+ loff_t offs, uint8_t *buf, int numpages)
{
struct mtd_oob_ops ops;
int j, ret;
@@ -430,7 +431,7 @@ static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd,
ops.datbuf = NULL;
ops.mode = MTD_OPS_PLACE_OOB;
- for (j = 0; j < len; j++) {
+ for (j = 0; j < numpages; j++) {
/*
* Read the full oob until read_oob is fixed to handle single
* byte reads for 16 bit buswidth.
@@ -463,7 +464,7 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
struct nand_bbt_descr *bd, int chip)
{
struct nand_chip *this = mtd->priv;
- int i, numblocks, len, scanlen;
+ int i, numblocks, numpages, scanlen;
int startblock;
loff_t from;
size_t readlen;
@@ -471,11 +472,11 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
pr_info("Scanning device for bad blocks\n");
if (bd->options & NAND_BBT_SCANALLPAGES)
- len = 1 << (this->bbt_erase_shift - this->page_shift);
+ numpages = 1 << (this->bbt_erase_shift - this->page_shift);
else if (bd->options & NAND_BBT_SCAN2NDPAGE)
- len = 2;
+ numpages = 2;
else
- len = 1;
+ numpages = 1;
if (!(bd->options & NAND_BBT_SCANEMPTY)) {
/* We need only read few bytes from the OOB area */
@@ -484,7 +485,7 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
} else {
/* Full page content should be read */
scanlen = mtd->writesize + mtd->oobsize;
- readlen = len * mtd->writesize;
+ readlen = numpages * mtd->writesize;
}
if (chip == -1) {
@@ -508,7 +509,7 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
}
if (this->bbt_options & NAND_BBT_SCANLASTPAGE)
- from += mtd->erasesize - (mtd->writesize * len);
+ from += mtd->erasesize - (mtd->writesize * numpages);
for (i = startblock; i < numblocks;) {
int ret;
@@ -517,9 +518,9 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
if (bd->options & NAND_BBT_SCANALLPAGES)
ret = scan_block_full(mtd, bd, from, buf, readlen,
- scanlen, len);
+ scanlen, numpages);
else
- ret = scan_block_fast(mtd, bd, from, buf, len);
+ ret = scan_block_fast(mtd, bd, from, buf, numpages);
if (ret < 0)
return ret;
@@ -594,7 +595,7 @@ static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
loff_t offs = (loff_t)actblock << this->bbt_erase_shift;
/* Read first page */
- scan_read_raw(mtd, buf, offs, mtd->writesize, td);
+ scan_read(mtd, buf, offs, mtd->writesize, td);
if (!check_pattern(buf, scanlen, mtd->writesize, td)) {
td->pages[i] = actblock << blocktopage;
if (td->options & NAND_BBT_VERSION) {
@@ -626,7 +627,9 @@ static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
*
* Search and read the bad block table(s).
*/
-static int search_read_bbts(struct mtd_info *mtd, uint8_t * buf, struct nand_bbt_descr *td, struct nand_bbt_descr *md)
+static void search_read_bbts(struct mtd_info *mtd, uint8_t *buf,
+ struct nand_bbt_descr *td,
+ struct nand_bbt_descr *md)
{
/* Search the primary table */
search_bbt(mtd, buf, td);
@@ -634,9 +637,6 @@ static int search_read_bbts(struct mtd_info *mtd, uint8_t * buf, struct nand_bbt
/* Search the mirror table */
if (md)
search_bbt(mtd, buf, md);
-
- /* Force result check */
- return 1;
}
/**
@@ -1162,14 +1162,13 @@ int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
/* Is the bbt at a given page? */
if (td->options & NAND_BBT_ABSPAGE) {
- res = read_abs_bbts(mtd, buf, td, md);
+ read_abs_bbts(mtd, buf, td, md);
} else {
/* Search the bad block table using a pattern in oob */
- res = search_read_bbts(mtd, buf, td, md);
+ search_read_bbts(mtd, buf, td, md);
}
- if (res)
- res = check_create(mtd, buf, bd);
+ res = check_create(mtd, buf, bd);
/* Prevent the bbt regions from erasing / writing */
mark_bbt_region(mtd, td);
@@ -1260,7 +1259,7 @@ static struct nand_bbt_descr bbt_main_descr = {
.offs = 8,
.len = 4,
.veroffs = 12,
- .maxblocks = 4,
+ .maxblocks = NAND_BBT_SCAN_MAXBLOCKS,
.pattern = bbt_pattern
};
@@ -1270,27 +1269,27 @@ static struct nand_bbt_descr bbt_mirror_descr = {
.offs = 8,
.len = 4,
.veroffs = 12,
- .maxblocks = 4,
+ .maxblocks = NAND_BBT_SCAN_MAXBLOCKS,
.pattern = mirror_pattern
};
-static struct nand_bbt_descr bbt_main_no_bbt_descr = {
+static struct nand_bbt_descr bbt_main_no_oob_descr = {
.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
| NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP
| NAND_BBT_NO_OOB,
.len = 4,
.veroffs = 4,
- .maxblocks = 4,
+ .maxblocks = NAND_BBT_SCAN_MAXBLOCKS,
.pattern = bbt_pattern
};
-static struct nand_bbt_descr bbt_mirror_no_bbt_descr = {
+static struct nand_bbt_descr bbt_mirror_no_oob_descr = {
.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
| NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP
| NAND_BBT_NO_OOB,
.len = 4,
.veroffs = 4,
- .maxblocks = 4,
+ .maxblocks = NAND_BBT_SCAN_MAXBLOCKS,
.pattern = mirror_pattern
};
@@ -1355,8 +1354,8 @@ int nand_default_bbt(struct mtd_info *mtd)
/* Use the default pattern descriptors */
if (!this->bbt_td) {
if (this->bbt_options & NAND_BBT_NO_OOB) {
- this->bbt_td = &bbt_main_no_bbt_descr;
- this->bbt_md = &bbt_mirror_no_bbt_descr;
+ this->bbt_td = &bbt_main_no_oob_descr;
+ this->bbt_md = &bbt_mirror_no_oob_descr;
} else {
this->bbt_td = &bbt_main_descr;
this->bbt_md = &bbt_mirror_descr;
@@ -1406,3 +1405,4 @@ int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt)
EXPORT_SYMBOL(nand_scan_bbt);
EXPORT_SYMBOL(nand_default_bbt);
+EXPORT_SYMBOL_GPL(nand_update_bbt);
diff --git a/drivers/mtd/nand/nand_bcm_umi.c b/drivers/mtd/nand/nand_bcm_umi.c
deleted file mode 100644
index 46a6bc9..0000000
--- a/drivers/mtd/nand/nand_bcm_umi.c
+++ /dev/null
@@ -1,149 +0,0 @@
-/*****************************************************************************
-* Copyright 2004 - 2009 Broadcom Corporation. All rights reserved.
-*
-* Unless you and Broadcom execute a separate written software license
-* agreement governing use of this software, this software is licensed to you
-* under the terms of the GNU General Public License version 2, available at
-* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
-*
-* Notwithstanding the above, under no circumstances may you combine this
-* software in any way with any other Broadcom software provided under a
-* license other than the GPL, without Broadcom's express prior written
-* consent.
-*****************************************************************************/
-
-/* ---- Include Files ---------------------------------------------------- */
-#include <mach/reg_umi.h>
-#include "nand_bcm_umi.h"
-#ifdef BOOT0_BUILD
-#include <uart.h>
-#endif
-
-/* ---- External Variable Declarations ----------------------------------- */
-/* ---- External Function Prototypes ------------------------------------- */
-/* ---- Public Variables ------------------------------------------------- */
-/* ---- Private Constants and Types -------------------------------------- */
-/* ---- Private Function Prototypes -------------------------------------- */
-/* ---- Private Variables ------------------------------------------------ */
-/* ---- Private Functions ------------------------------------------------ */
-
-#if NAND_ECC_BCH
-/****************************************************************************
-* nand_bch_ecc_flip_bit - Routine to flip an errored bit
-*
-* PURPOSE:
-* This is a helper routine that flips the bit (0 -> 1 or 1 -> 0) of the
-* errored bit specified
-*
-* PARAMETERS:
-* datap - Container that holds the 512 byte data
-* errorLocation - Location of the bit that needs to be flipped
-*
-* RETURNS:
-* None
-****************************************************************************/
-static void nand_bcm_umi_bch_ecc_flip_bit(uint8_t *datap, int errorLocation)
-{
- int locWithinAByte = (errorLocation & REG_UMI_BCH_ERR_LOC_BYTE) >> 0;
- int locWithinAWord = (errorLocation & REG_UMI_BCH_ERR_LOC_WORD) >> 3;
- int locWithinAPage = (errorLocation & REG_UMI_BCH_ERR_LOC_PAGE) >> 5;
-
- uint8_t errorByte = 0;
- uint8_t byteMask = 1 << locWithinAByte;
-
- /* BCH uses big endian, need to change the location
- * bits to little endian */
- locWithinAWord = 3 - locWithinAWord;
-
- errorByte = datap[locWithinAPage * sizeof(uint32_t) + locWithinAWord];
-
-#ifdef BOOT0_BUILD
- puthexs("\nECC Correct Offset: ",
- locWithinAPage * sizeof(uint32_t) + locWithinAWord);
- puthexs(" errorByte:", errorByte);
- puthex8(" Bit: ", locWithinAByte);
-#endif
-
- if (errorByte & byteMask) {
- /* bit needs to be cleared */
- errorByte &= ~byteMask;
- } else {
- /* bit needs to be set */
- errorByte |= byteMask;
- }
-
- /* write back the value with the fixed bit */
- datap[locWithinAPage * sizeof(uint32_t) + locWithinAWord] = errorByte;
-}
-
-/****************************************************************************
-* nand_correct_page_bch - Routine to correct bit errors when reading NAND
-*
-* PURPOSE:
-* This routine reads the BCH registers to determine if there are any bit
-* errors during the read of the last 512 bytes of data + ECC bytes. If
-* errors exists, the routine fixes it.
-*
-* PARAMETERS:
-* datap - Container that holds the 512 byte data
-*
-* RETURNS:
-* 0 or greater = Number of errors corrected
-* (No errors are found or errors have been fixed)
-* -1 = Error(s) cannot be fixed
-****************************************************************************/
-int nand_bcm_umi_bch_correct_page(uint8_t *datap, uint8_t *readEccData,
- int numEccBytes)
-{
- int numErrors;
- int errorLocation;
- int idx;
- uint32_t regValue;
-
- /* wait for read ECC to be valid */
- regValue = nand_bcm_umi_bch_poll_read_ecc_calc();
-
- /*
- * read the control status register to determine if there
- * are error'ed bits
- * see if errors are correctible
- */
- if ((regValue & REG_UMI_BCH_CTRL_STATUS_UNCORR_ERR) > 0) {
- int i;
-
- for (i = 0; i < numEccBytes; i++) {
- if (readEccData[i] != 0xff) {
- /* errors cannot be fixed, return -1 */
- return -1;
- }
- }
- /* If ECC is unprogrammed then we can't correct,
- * assume everything OK */
- return 0;
- }
-
- if ((regValue & REG_UMI_BCH_CTRL_STATUS_CORR_ERR) == 0) {
- /* no errors */
- return 0;
- }
-
- /*
- * Fix errored bits by doing the following:
- * 1. Read the number of errors in the control and status register
- * 2. Read the error location registers that corresponds to the number
- * of errors reported
- * 3. Invert the bit in the data
- */
- numErrors = (regValue & REG_UMI_BCH_CTRL_STATUS_NB_CORR_ERROR) >> 20;
-
- for (idx = 0; idx < numErrors; idx++) {
- errorLocation =
- REG_UMI_BCH_ERR_LOC_ADDR(idx) & REG_UMI_BCH_ERR_LOC_MASK;
-
- /* Flip bit */
- nand_bcm_umi_bch_ecc_flip_bit(datap, errorLocation);
- }
- /* Errors corrected */
- return numErrors;
-}
-#endif
diff --git a/drivers/mtd/nand/nand_bcm_umi.h b/drivers/mtd/nand/nand_bcm_umi.h
deleted file mode 100644
index d901866..0000000
--- a/drivers/mtd/nand/nand_bcm_umi.h
+++ /dev/null
@@ -1,336 +0,0 @@
-/*****************************************************************************
-* Copyright 2003 - 2009 Broadcom Corporation. All rights reserved.
-*
-* Unless you and Broadcom execute a separate written software license
-* agreement governing use of this software, this software is licensed to you
-* under the terms of the GNU General Public License version 2, available at
-* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
-*
-* Notwithstanding the above, under no circumstances may you combine this
-* software in any way with any other Broadcom software provided under a
-* license other than the GPL, without Broadcom's express prior written
-* consent.
-*****************************************************************************/
-#ifndef NAND_BCM_UMI_H
-#define NAND_BCM_UMI_H
-
-/* ---- Include Files ---------------------------------------------------- */
-#include <mach/reg_umi.h>
-#include <mach/reg_nand.h>
-#include <mach/cfg_global.h>
-
-/* ---- Constants and Types ---------------------------------------------- */
-#if (CFG_GLOBAL_CHIP_FAMILY == CFG_GLOBAL_CHIP_FAMILY_BCMRING)
-#define NAND_ECC_BCH (CFG_GLOBAL_CHIP_REV > 0xA0)
-#else
-#define NAND_ECC_BCH 0
-#endif
-
-#define CFG_GLOBAL_NAND_ECC_BCH_NUM_BYTES 13
-
-#if NAND_ECC_BCH
-#ifdef BOOT0_BUILD
-#define NAND_ECC_NUM_BYTES 13
-#else
-#define NAND_ECC_NUM_BYTES CFG_GLOBAL_NAND_ECC_BCH_NUM_BYTES
-#endif
-#else
-#define NAND_ECC_NUM_BYTES 3
-#endif
-
-#define NAND_DATA_ACCESS_SIZE 512
-
-/* ---- Variable Externs ------------------------------------------ */
-/* ---- Function Prototypes --------------------------------------- */
-int nand_bcm_umi_bch_correct_page(uint8_t *datap, uint8_t *readEccData,
- int numEccBytes);
-
-/* Check in device is ready */
-static inline int nand_bcm_umi_dev_ready(void)
-{
- return readl(&REG_UMI_NAND_RCSR) & REG_UMI_NAND_RCSR_RDY;
-}
-
-/* Wait until device is ready */
-static inline void nand_bcm_umi_wait_till_ready(void)
-{
- while (nand_bcm_umi_dev_ready() == 0)
- ;
-}
-
-/* Enable Hamming ECC */
-static inline void nand_bcm_umi_hamming_enable_hwecc(void)
-{
- /* disable and reset ECC, 512 byte page */
- writel(readl(&REG_UMI_NAND_ECC_CSR) & ~(REG_UMI_NAND_ECC_CSR_ECC_ENABLE |
- REG_UMI_NAND_ECC_CSR_256BYTE), &REG_UMI_NAND_ECC_CSR);
- /* enable ECC */
- writel(readl(&REG_UMI_NAND_ECC_CSR) | REG_UMI_NAND_ECC_CSR_ECC_ENABLE,
- &REG_UMI_NAND_ECC_CSR);
-}
-
-#if NAND_ECC_BCH
-/* BCH ECC specifics */
-#define ECC_BITS_PER_CORRECTABLE_BIT 13
-
-/* Enable BCH Read ECC */
-static inline void nand_bcm_umi_bch_enable_read_hwecc(void)
-{
- /* disable and reset ECC */
- writel(REG_UMI_BCH_CTRL_STATUS_RD_ECC_VALID, &REG_UMI_BCH_CTRL_STATUS);
- /* Turn on ECC */
- writel(REG_UMI_BCH_CTRL_STATUS_ECC_RD_EN, &REG_UMI_BCH_CTRL_STATUS);
-}
-
-/* Enable BCH Write ECC */
-static inline void nand_bcm_umi_bch_enable_write_hwecc(void)
-{
- /* disable and reset ECC */
- writel(REG_UMI_BCH_CTRL_STATUS_WR_ECC_VALID, &REG_UMI_BCH_CTRL_STATUS);
- /* Turn on ECC */
- writel(REG_UMI_BCH_CTRL_STATUS_ECC_WR_EN, &REG_UMI_BCH_CTRL_STATUS);
-}
-
-/* Config number of BCH ECC bytes */
-static inline void nand_bcm_umi_bch_config_ecc(uint8_t numEccBytes)
-{
- uint32_t nValue;
- uint32_t tValue;
- uint32_t kValue;
- uint32_t numBits = numEccBytes * 8;
-
- /* disable and reset ECC */
- writel(REG_UMI_BCH_CTRL_STATUS_WR_ECC_VALID |
- REG_UMI_BCH_CTRL_STATUS_RD_ECC_VALID,
- &REG_UMI_BCH_CTRL_STATUS);
-
- /* Every correctible bit requires 13 ECC bits */
- tValue = (uint32_t) (numBits / ECC_BITS_PER_CORRECTABLE_BIT);
-
- /* Total data in number of bits for generating and computing BCH ECC */
- nValue = (NAND_DATA_ACCESS_SIZE + numEccBytes) * 8;
-
- /* K parameter is used internally. K = N - (T * 13) */
- kValue = nValue - (tValue * ECC_BITS_PER_CORRECTABLE_BIT);
-
- /* Write the settings */
- writel(nValue, &REG_UMI_BCH_N);
- writel(tValue, &REG_UMI_BCH_T);
- writel(kValue, &REG_UMI_BCH_K);
-}
-
-/* Pause during ECC read calculation to skip bytes in OOB */
-static inline void nand_bcm_umi_bch_pause_read_ecc_calc(void)
-{
- writel(REG_UMI_BCH_CTRL_STATUS_ECC_RD_EN | REG_UMI_BCH_CTRL_STATUS_PAUSE_ECC_DEC, &REG_UMI_BCH_CTRL_STATUS);
-}
-
-/* Resume during ECC read calculation after skipping bytes in OOB */
-static inline void nand_bcm_umi_bch_resume_read_ecc_calc(void)
-{
- writel(REG_UMI_BCH_CTRL_STATUS_ECC_RD_EN, &REG_UMI_BCH_CTRL_STATUS);
-}
-
-/* Poll read ECC calc to check when hardware completes */
-static inline uint32_t nand_bcm_umi_bch_poll_read_ecc_calc(void)
-{
- uint32_t regVal;
-
- do {
- /* wait for ECC to be valid */
- regVal = readl(&REG_UMI_BCH_CTRL_STATUS);
- } while ((regVal & REG_UMI_BCH_CTRL_STATUS_RD_ECC_VALID) == 0);
-
- return regVal;
-}
-
-/* Poll write ECC calc to check when hardware completes */
-static inline void nand_bcm_umi_bch_poll_write_ecc_calc(void)
-{
- /* wait for ECC to be valid */
- while ((readl(&REG_UMI_BCH_CTRL_STATUS) & REG_UMI_BCH_CTRL_STATUS_WR_ECC_VALID)
- == 0)
- ;
-}
-
-/* Read the OOB and ECC, for kernel write OOB to a buffer */
-#if defined(__KERNEL__) && !defined(STANDALONE)
-static inline void nand_bcm_umi_bch_read_oobEcc(uint32_t pageSize,
- uint8_t *eccCalc, int numEccBytes, uint8_t *oobp)
-#else
-static inline void nand_bcm_umi_bch_read_oobEcc(uint32_t pageSize,
- uint8_t *eccCalc, int numEccBytes)
-#endif
-{
- int eccPos = 0;
- int numToRead = 16; /* There are 16 bytes per sector in the OOB */
-
- /* ECC is already paused when this function is called */
- if (pageSize != NAND_DATA_ACCESS_SIZE) {
- /* skip BI */
-#if defined(__KERNEL__) && !defined(STANDALONE)
- *oobp++ = readb(&REG_NAND_DATA8);
-#else
- readb(&REG_NAND_DATA8);
-#endif
- numToRead--;
- }
-
- while (numToRead > numEccBytes) {
- /* skip free oob region */
-#if defined(__KERNEL__) && !defined(STANDALONE)
- *oobp++ = readb(&REG_NAND_DATA8);
-#else
- readb(&REG_NAND_DATA8);
-#endif
- numToRead--;
- }
-
- if (pageSize == NAND_DATA_ACCESS_SIZE) {
- /* read ECC bytes before BI */
- nand_bcm_umi_bch_resume_read_ecc_calc();
-
- while (numToRead > 11) {
-#if defined(__KERNEL__) && !defined(STANDALONE)
- *oobp = readb(&REG_NAND_DATA8);
- eccCalc[eccPos++] = *oobp;
- oobp++;
-#else
- eccCalc[eccPos++] = readb(&REG_NAND_DATA8);
-#endif
- numToRead--;
- }
-
- nand_bcm_umi_bch_pause_read_ecc_calc();
-
- if (numToRead == 11) {
- /* read BI */
-#if defined(__KERNEL__) && !defined(STANDALONE)
- *oobp++ = readb(&REG_NAND_DATA8);
-#else
- readb(&REG_NAND_DATA8);
-#endif
- numToRead--;
- }
-
- }
- /* read ECC bytes */
- nand_bcm_umi_bch_resume_read_ecc_calc();
- while (numToRead) {
-#if defined(__KERNEL__) && !defined(STANDALONE)
- *oobp = readb(&REG_NAND_DATA8);
- eccCalc[eccPos++] = *oobp;
- oobp++;
-#else
- eccCalc[eccPos++] = readb(&REG_NAND_DATA8);
-#endif
- numToRead--;
- }
-}
-
-/* Helper function to write ECC */
-static inline void NAND_BCM_UMI_ECC_WRITE(int numEccBytes, int eccBytePos,
- uint8_t *oobp, uint8_t eccVal)
-{
- if (eccBytePos <= numEccBytes)
- *oobp = eccVal;
-}
-
-/* Write OOB with ECC */
-static inline void nand_bcm_umi_bch_write_oobEcc(uint32_t pageSize,
- uint8_t *oobp, int numEccBytes)
-{
- uint32_t eccVal = 0xffffffff;
-
- /* wait for write ECC to be valid */
- nand_bcm_umi_bch_poll_write_ecc_calc();
-
- /*
- ** Get the hardware ecc from the 32-bit result registers.
- ** Read after 512 byte accesses. Format B3B2B1B0
- ** where B3 = ecc3, etc.
- */
-
- if (pageSize == NAND_DATA_ACCESS_SIZE) {
- /* Now fill in the ECC bytes */
- if (numEccBytes >= 13)
- eccVal = readl(&REG_UMI_BCH_WR_ECC_3);
-
- /* Usually we skip CM in oob[0,1] */
- NAND_BCM_UMI_ECC_WRITE(numEccBytes, 15, &oobp[0],
- (eccVal >> 16) & 0xff);
- NAND_BCM_UMI_ECC_WRITE(numEccBytes, 14, &oobp[1],
- (eccVal >> 8) & 0xff);
-
- /* Write ECC in oob[2,3,4] */
- NAND_BCM_UMI_ECC_WRITE(numEccBytes, 13, &oobp[2],
- eccVal & 0xff); /* ECC 12 */
-
- if (numEccBytes >= 9)
- eccVal = readl(&REG_UMI_BCH_WR_ECC_2);
-
- NAND_BCM_UMI_ECC_WRITE(numEccBytes, 12, &oobp[3],
- (eccVal >> 24) & 0xff); /* ECC11 */
- NAND_BCM_UMI_ECC_WRITE(numEccBytes, 11, &oobp[4],
- (eccVal >> 16) & 0xff); /* ECC10 */
-
- /* Always Skip BI in oob[5] */
- } else {
- /* Always Skip BI in oob[0] */
-
- /* Now fill in the ECC bytes */
- if (numEccBytes >= 13)
- eccVal = readl(&REG_UMI_BCH_WR_ECC_3);
-
- /* Usually skip CM in oob[1,2] */
- NAND_BCM_UMI_ECC_WRITE(numEccBytes, 15, &oobp[1],
- (eccVal >> 16) & 0xff);
- NAND_BCM_UMI_ECC_WRITE(numEccBytes, 14, &oobp[2],
- (eccVal >> 8) & 0xff);
-
- /* Write ECC in oob[3-15] */
- NAND_BCM_UMI_ECC_WRITE(numEccBytes, 13, &oobp[3],
- eccVal & 0xff); /* ECC12 */
-
- if (numEccBytes >= 9)
- eccVal = readl(&REG_UMI_BCH_WR_ECC_2);
-
- NAND_BCM_UMI_ECC_WRITE(numEccBytes, 12, &oobp[4],
- (eccVal >> 24) & 0xff); /* ECC11 */
- NAND_BCM_UMI_ECC_WRITE(numEccBytes, 11, &oobp[5],
- (eccVal >> 16) & 0xff); /* ECC10 */
- }
-
- /* Fill in the remainder of ECC locations */
- NAND_BCM_UMI_ECC_WRITE(numEccBytes, 10, &oobp[6],
- (eccVal >> 8) & 0xff); /* ECC9 */
- NAND_BCM_UMI_ECC_WRITE(numEccBytes, 9, &oobp[7],
- eccVal & 0xff); /* ECC8 */
-
- if (numEccBytes >= 5)
- eccVal = readl(&REG_UMI_BCH_WR_ECC_1);
-
- NAND_BCM_UMI_ECC_WRITE(numEccBytes, 8, &oobp[8],
- (eccVal >> 24) & 0xff); /* ECC7 */
- NAND_BCM_UMI_ECC_WRITE(numEccBytes, 7, &oobp[9],
- (eccVal >> 16) & 0xff); /* ECC6 */
- NAND_BCM_UMI_ECC_WRITE(numEccBytes, 6, &oobp[10],
- (eccVal >> 8) & 0xff); /* ECC5 */
- NAND_BCM_UMI_ECC_WRITE(numEccBytes, 5, &oobp[11],
- eccVal & 0xff); /* ECC4 */
-
- if (numEccBytes >= 1)
- eccVal = readl(&REG_UMI_BCH_WR_ECC_0);
-
- NAND_BCM_UMI_ECC_WRITE(numEccBytes, 4, &oobp[12],
- (eccVal >> 24) & 0xff); /* ECC3 */
- NAND_BCM_UMI_ECC_WRITE(numEccBytes, 3, &oobp[13],
- (eccVal >> 16) & 0xff); /* ECC2 */
- NAND_BCM_UMI_ECC_WRITE(numEccBytes, 2, &oobp[14],
- (eccVal >> 8) & 0xff); /* ECC1 */
- NAND_BCM_UMI_ECC_WRITE(numEccBytes, 1, &oobp[15],
- eccVal & 0xff); /* ECC0 */
-}
-#endif
-
-#endif /* NAND_BCM_UMI_H */
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c
index 621b70b..e3aa274 100644
--- a/drivers/mtd/nand/nand_ids.c
+++ b/drivers/mtd/nand/nand_ids.c
@@ -70,7 +70,7 @@ struct nand_flash_dev nand_flash_ids[] = {
* These are the new chips with large page size. The pagesize and the
* erasesize is determined from the extended id bytes
*/
-#define LP_OPTIONS (NAND_SAMSUNG_LP_OPTIONS | NAND_NO_READRDY)
+#define LP_OPTIONS NAND_SAMSUNG_LP_OPTIONS
#define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16)
/* 512 Megabit */
@@ -157,7 +157,7 @@ struct nand_flash_dev nand_flash_ids[] = {
* writes possible, but not implemented now
*/
{"AND 128MiB 3,3V 8-bit", 0x01, 2048, 128, 0x4000,
- NAND_IS_AND | NAND_NO_READRDY | NAND_4PAGE_ARRAY | BBT_AUTO_REFRESH},
+ NAND_IS_AND | NAND_4PAGE_ARRAY | BBT_AUTO_REFRESH},
{NULL,}
};
@@ -174,8 +174,9 @@ struct nand_manufacturers nand_manuf_ids[] = {
{NAND_MFR_STMICRO, "ST Micro"},
{NAND_MFR_HYNIX, "Hynix"},
{NAND_MFR_MICRON, "Micron"},
- {NAND_MFR_AMD, "AMD"},
+ {NAND_MFR_AMD, "AMD/Spansion"},
{NAND_MFR_MACRONIX, "Macronix"},
+ {NAND_MFR_EON, "Eon"},
{0x0, "Unknown"}
};
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index cf0cd31..a932c48 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -447,8 +447,6 @@ static unsigned int rptwear_cnt = 0;
/* MTD structure for NAND controller */
static struct mtd_info *nsmtd;
-static u_char ns_verify_buf[NS_LARGEST_PAGE_SIZE];
-
/*
* Allocate array of page pointers, create slab allocation for an array
* and initialize the array by NULL pointers.
@@ -2189,19 +2187,6 @@ static void ns_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
return;
}
-static int ns_nand_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
-{
- ns_nand_read_buf(mtd, (u_char *)&ns_verify_buf[0], len);
-
- if (!memcmp(buf, &ns_verify_buf[0], len)) {
- NS_DBG("verify_buf: the buffer is OK\n");
- return 0;
- } else {
- NS_DBG("verify_buf: the buffer is wrong\n");
- return -EFAULT;
- }
-}
-
/*
* Module initialization function
*/
@@ -2236,7 +2221,6 @@ static int __init ns_init_module(void)
chip->dev_ready = ns_device_ready;
chip->write_buf = ns_nand_write_buf;
chip->read_buf = ns_nand_read_buf;
- chip->verify_buf = ns_nand_verify_buf;
chip->read_word = ns_nand_read_word;
chip->ecc.mode = NAND_ECC_SOFT;
/* The NAND_SKIP_BBTSCAN option is necessary for 'overridesize' */
@@ -2333,6 +2317,7 @@ static int __init ns_init_module(void)
uint64_t new_size = (uint64_t)nsmtd->erasesize << overridesize;
if (new_size >> overridesize != nsmtd->erasesize) {
NS_ERR("overridesize is too big\n");
+ retval = -EINVAL;
goto err_exit;
}
/* N.B. This relies on nand_scan not doing anything with the size before we change it */
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c
index 2b6f632..5fd3f01 100644
--- a/drivers/mtd/nand/ndfc.c
+++ b/drivers/mtd/nand/ndfc.c
@@ -140,18 +140,6 @@ static void ndfc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
out_be32(ndfc->ndfcbase + NDFC_DATA, *p++);
}
-static int ndfc_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
-{
- struct nand_chip *chip = mtd->priv;
- struct ndfc_controller *ndfc = chip->priv;
- uint32_t *p = (uint32_t *) buf;
-
- for(;len > 0; len -= 4)
- if (*p++ != in_be32(ndfc->ndfcbase + NDFC_DATA))
- return -EFAULT;
- return 0;
-}
-
/*
* Initialize chip structure
*/
@@ -172,7 +160,6 @@ static int ndfc_chip_init(struct ndfc_controller *ndfc,
chip->controller = &ndfc->ndfc_control;
chip->read_buf = ndfc_read_buf;
chip->write_buf = ndfc_write_buf;
- chip->verify_buf = ndfc_verify_buf;
chip->ecc.correct = nand_correct_data;
chip->ecc.hwctl = ndfc_enable_hwecc;
chip->ecc.calculate = ndfc_calculate_ecc;
diff --git a/drivers/mtd/nand/nuc900_nand.c b/drivers/mtd/nand/nuc900_nand.c
index 8febe46..94dc46b 100644
--- a/drivers/mtd/nand/nuc900_nand.c
+++ b/drivers/mtd/nand/nuc900_nand.c
@@ -112,22 +112,6 @@ static void nuc900_nand_write_buf(struct mtd_info *mtd,
write_data_reg(nand, buf[i]);
}
-static int nuc900_verify_buf(struct mtd_info *mtd,
- const unsigned char *buf, int len)
-{
- int i;
- struct nuc900_nand *nand;
-
- nand = container_of(mtd, struct nuc900_nand, mtd);
-
- for (i = 0; i < len; i++) {
- if (buf[i] != (unsigned char)read_data_reg(nand))
- return -EFAULT;
- }
-
- return 0;
-}
-
static int nuc900_check_rb(struct nuc900_nand *nand)
{
unsigned int val;
@@ -292,7 +276,6 @@ static int __devinit nuc900_nand_probe(struct platform_device *pdev)
chip->read_byte = nuc900_nand_read_byte;
chip->write_buf = nuc900_nand_write_buf;
chip->read_buf = nuc900_nand_read_buf;
- chip->verify_buf = nuc900_verify_buf;
chip->chip_delay = 50;
chip->options = 0;
chip->ecc.mode = NAND_ECC_SOFT;
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index fc81112..5b31386 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -425,7 +425,7 @@ static void omap_nand_dma_callback(void *data)
}
/*
- * omap_nand_dma_transfer: configer and start dma transfer
+ * omap_nand_dma_transfer: configure and start dma transfer
* @mtd: MTD device structure
* @addr: virtual address in RAM of source/destination
* @len: number of data bytes to be transferred
@@ -546,7 +546,7 @@ static void omap_write_buf_dma_pref(struct mtd_info *mtd,
}
/*
- * omap_nand_irq - GMPC irq handler
+ * omap_nand_irq - GPMC irq handler
* @this_irq: gpmc irq number
* @dev: omap_nand_info structure pointer is passed here
*/
@@ -698,27 +698,6 @@ out_copy:
}
/**
- * omap_verify_buf - Verify chip data against buffer
- * @mtd: MTD device structure
- * @buf: buffer containing the data to compare
- * @len: number of bytes to compare
- */
-static int omap_verify_buf(struct mtd_info *mtd, const u_char * buf, int len)
-{
- struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
- mtd);
- u16 *p = (u16 *) buf;
-
- len >>= 1;
- while (len--) {
- if (*p++ != cpu_to_le16(readw(info->nand.IO_ADDR_R)))
- return -EFAULT;
- }
-
- return 0;
-}
-
-/**
* gen_true_ecc - This function will generate true ECC value
* @ecc_buf: buffer to store ecc code
*
@@ -1326,8 +1305,8 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
/*
* If RDY/BSY line is connected to OMAP then use the omap ready
- * funcrtion and the generic nand_wait function which reads the status
- * register after monitoring the RDY/BSY line.Otherwise use a standard
+ * function and the generic nand_wait function which reads the status
+ * register after monitoring the RDY/BSY line. Otherwise use a standard
* chip delay which is slightly more than tR (AC Timing) of the NAND
* device and read status register until you get a failure or success
*/
@@ -1428,9 +1407,7 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
goto out_release_mem_region;
}
- info->nand.verify_buf = omap_verify_buf;
-
- /* selsect the ecc type */
+ /* select the ecc type */
if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_DEFAULT)
info->nand.ecc.mode = NAND_ECC_SOFT;
else if ((pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW) ||
@@ -1536,7 +1513,8 @@ static int omap_nand_remove(struct platform_device *pdev)
/* Release NAND device, its internal structures and partitions */
nand_release(&info->mtd);
iounmap(info->nand.IO_ADDR_R);
- kfree(&info->mtd);
+ release_mem_region(info->phys_base, NAND_IO_SIZE);
+ kfree(info);
return 0;
}
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index 131b58a..aefaf8c 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -21,7 +21,6 @@
#include <linux/err.h>
#include <asm/io.h>
#include <asm/sizes.h>
-#include <mach/hardware.h>
#include <linux/platform_data/mtd-orion_nand.h>
static void orion_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
index 1bcb520..a47ee68 100644
--- a/drivers/mtd/nand/plat_nand.c
+++ b/drivers/mtd/nand/plat_nand.c
@@ -37,6 +37,11 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
const char **part_types;
int err = 0;
+ if (!pdata) {
+ dev_err(&pdev->dev, "platform_nand_data is missing\n");
+ return -EINVAL;
+ }
+
if (pdata->chip.nr_chips < 1) {
dev_err(&pdev->dev, "invalid number of chips specified\n");
return -EINVAL;
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index c452271..37ee75c 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -683,11 +683,13 @@ static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
info->state = STATE_IDLE;
}
-static void pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
+static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
struct nand_chip *chip, const uint8_t *buf, int oob_required)
{
chip->write_buf(mtd, buf, mtd->writesize);
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ return 0;
}
static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
@@ -771,12 +773,6 @@ static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
info->buf_start += real_len;
}
-static int pxa3xx_nand_verify_buf(struct mtd_info *mtd,
- const uint8_t *buf, int len)
-{
- return 0;
-}
-
static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
{
return;
@@ -1007,7 +1003,6 @@ KEEP_CONFIG:
chip->ecc.size = host->page_size;
chip->ecc.strength = 1;
- chip->options |= NAND_NO_READRDY;
if (host->reg_ndcr & NDCR_DWIDTH_M)
chip->options |= NAND_BUSWIDTH_16;
@@ -1070,7 +1065,6 @@ static int alloc_nand_resource(struct platform_device *pdev)
chip->read_byte = pxa3xx_nand_read_byte;
chip->read_buf = pxa3xx_nand_read_buf;
chip->write_buf = pxa3xx_nand_write_buf;
- chip->verify_buf = pxa3xx_nand_verify_buf;
}
spin_lock_init(&chip->controller->lock);
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c
index 8cb6277..4495f85 100644
--- a/drivers/mtd/nand/r852.c
+++ b/drivers/mtd/nand/r852.c
@@ -309,27 +309,6 @@ static uint8_t r852_read_byte(struct mtd_info *mtd)
return r852_read_reg(dev, R852_DATALINE);
}
-
-/*
- * Readback the buffer to verify it
- */
-int r852_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
-{
- struct r852_device *dev = r852_get_dev(mtd);
-
- /* We can't be sure about anything here... */
- if (dev->card_unstable)
- return -1;
-
- /* This will never happen, unless you wired up a nand chip
- with > 512 bytes page size to the reader */
- if (len > SM_SECTOR_SIZE)
- return 0;
-
- r852_read_buf(mtd, dev->tmp_buffer, len);
- return memcmp(buf, dev->tmp_buffer, len);
-}
-
/*
* Control several chip lines & send commands
*/
@@ -882,7 +861,6 @@ int r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
chip->read_byte = r852_read_byte;
chip->read_buf = r852_read_buf;
chip->write_buf = r852_write_buf;
- chip->verify_buf = r852_verify_buf;
/* ecc */
chip->ecc.mode = NAND_ECC_HW_SYNDROME;
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index d804061..295e4be 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -21,6 +21,8 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#define pr_fmt(fmt) "nand-s3c2410: " fmt
+
#ifdef CONFIG_MTD_NAND_S3C2410_DEBUG
#define DEBUG
#endif
@@ -30,6 +32,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/string.h>
+#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
@@ -43,24 +46,9 @@
#include <linux/mtd/nand_ecc.h>
#include <linux/mtd/partitions.h>
-#include <asm/io.h>
-
#include <plat/regs-nand.h>
#include <linux/platform_data/mtd-nand-s3c2410.h>
-#ifdef CONFIG_MTD_NAND_S3C2410_HWECC
-static int hardware_ecc = 1;
-#else
-static int hardware_ecc = 0;
-#endif
-
-#ifdef CONFIG_MTD_NAND_S3C2410_CLKSTOP
-static const int clock_stop = 1;
-#else
-static const int clock_stop = 0;
-#endif
-
-
/* new oob placement block for use with hardware ecc generation
*/
@@ -109,9 +97,8 @@ enum s3c_nand_clk_state {
* @mtds: An array of MTD instances on this controoler.
* @platform: The platform data for this board.
* @device: The platform device we bound to.
- * @area: The IO area resource that came from request_mem_region().
* @clk: The clock resource for this controller.
- * @regs: The area mapped for the hardware registers described by @area.
+ * @regs: The area mapped for the hardware registers.
* @sel_reg: Pointer to the register controlling the NAND selection.
* @sel_bit: The bit in @sel_reg to select the NAND chip.
* @mtd_count: The number of MTDs created from this controller.
@@ -128,7 +115,6 @@ struct s3c2410_nand_info {
/* device info */
struct device *device;
- struct resource *area;
struct clk *clk;
void __iomem *regs;
void __iomem *sel_reg;
@@ -169,7 +155,11 @@ static struct s3c2410_platform_nand *to_nand_plat(struct platform_device *dev)
static inline int allow_clk_suspend(struct s3c2410_nand_info *info)
{
- return clock_stop;
+#ifdef CONFIG_MTD_NAND_S3C2410_CLKSTOP
+ return 1;
+#else
+ return 0;
+#endif
}
/**
@@ -215,7 +205,8 @@ static int s3c_nand_calc_rate(int wanted, unsigned long clk, int max)
pr_debug("result %d from %ld, %d\n", result, clk, wanted);
if (result > max) {
- printk("%d ns is too big for current clock rate %ld\n", wanted, clk);
+ pr_err("%d ns is too big for current clock rate %ld\n",
+ wanted, clk);
return -1;
}
@@ -225,7 +216,7 @@ static int s3c_nand_calc_rate(int wanted, unsigned long clk, int max)
return result;
}
-#define to_ns(ticks,clk) (((ticks) * NS_IN_KHZ) / (unsigned int)(clk))
+#define to_ns(ticks, clk) (((ticks) * NS_IN_KHZ) / (unsigned int)(clk))
/* controller setup */
@@ -268,7 +259,8 @@ static int s3c2410_nand_setrate(struct s3c2410_nand_info *info)
}
dev_info(info->device, "Tacls=%d, %dns Twrph0=%d %dns, Twrph1=%d %dns\n",
- tacls, to_ns(tacls, clkrate), twrph0, to_ns(twrph0, clkrate), twrph1, to_ns(twrph1, clkrate));
+ tacls, to_ns(tacls, clkrate), twrph0, to_ns(twrph0, clkrate),
+ twrph1, to_ns(twrph1, clkrate));
switch (info->cpu_type) {
case TYPE_S3C2410:
@@ -325,13 +317,13 @@ static int s3c2410_nand_inithw(struct s3c2410_nand_info *info)
if (ret < 0)
return ret;
- switch (info->cpu_type) {
- case TYPE_S3C2410:
+ switch (info->cpu_type) {
+ case TYPE_S3C2410:
default:
break;
- case TYPE_S3C2440:
- case TYPE_S3C2412:
+ case TYPE_S3C2440:
+ case TYPE_S3C2412:
/* enable the controller and de-assert nFCE */
writel(S3C2440_NFCONT_ENABLE, info->regs + S3C2440_NFCONT);
@@ -450,6 +442,7 @@ static int s3c2412_nand_devready(struct mtd_info *mtd)
/* ECC handling functions */
+#ifdef CONFIG_MTD_NAND_S3C2410_HWECC
static int s3c2410_nand_correct_data(struct mtd_info *mtd, u_char *dat,
u_char *read_ecc, u_char *calc_ecc)
{
@@ -463,10 +456,8 @@ static int s3c2410_nand_correct_data(struct mtd_info *mtd, u_char *dat,
diff1 = read_ecc[1] ^ calc_ecc[1];
diff2 = read_ecc[2] ^ calc_ecc[2];
- pr_debug("%s: rd %02x%02x%02x calc %02x%02x%02x diff %02x%02x%02x\n",
- __func__,
- read_ecc[0], read_ecc[1], read_ecc[2],
- calc_ecc[0], calc_ecc[1], calc_ecc[2],
+ pr_debug("%s: rd %*phN calc %*phN diff %02x%02x%02x\n",
+ __func__, 3, read_ecc, 3, calc_ecc,
diff0, diff1, diff2);
if (diff0 == 0 && diff1 == 0 && diff2 == 0)
@@ -546,7 +537,8 @@ static void s3c2412_nand_enable_hwecc(struct mtd_info *mtd, int mode)
unsigned long ctrl;
ctrl = readl(info->regs + S3C2440_NFCONT);
- writel(ctrl | S3C2412_NFCONT_INIT_MAIN_ECC, info->regs + S3C2440_NFCONT);
+ writel(ctrl | S3C2412_NFCONT_INIT_MAIN_ECC,
+ info->regs + S3C2440_NFCONT);
}
static void s3c2440_nand_enable_hwecc(struct mtd_info *mtd, int mode)
@@ -558,7 +550,8 @@ static void s3c2440_nand_enable_hwecc(struct mtd_info *mtd, int mode)
writel(ctrl | S3C2440_NFCONT_INITECC, info->regs + S3C2440_NFCONT);
}
-static int s3c2410_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code)
+static int s3c2410_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
+ u_char *ecc_code)
{
struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
@@ -566,13 +559,13 @@ static int s3c2410_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u
ecc_code[1] = readb(info->regs + S3C2410_NFECC + 1);
ecc_code[2] = readb(info->regs + S3C2410_NFECC + 2);
- pr_debug("%s: returning ecc %02x%02x%02x\n", __func__,
- ecc_code[0], ecc_code[1], ecc_code[2]);
+ pr_debug("%s: returning ecc %*phN\n", __func__, 3, ecc_code);
return 0;
}
-static int s3c2412_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code)
+static int s3c2412_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
+ u_char *ecc_code)
{
struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
unsigned long ecc = readl(info->regs + S3C2412_NFMECC0);
@@ -581,12 +574,13 @@ static int s3c2412_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u
ecc_code[1] = ecc >> 8;
ecc_code[2] = ecc >> 16;
- pr_debug("calculate_ecc: returning ecc %02x,%02x,%02x\n", ecc_code[0], ecc_code[1], ecc_code[2]);
+ pr_debug("%s: returning ecc %*phN\n", __func__, 3, ecc_code);
return 0;
}
-static int s3c2440_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code)
+static int s3c2440_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
+ u_char *ecc_code)
{
struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
unsigned long ecc = readl(info->regs + S3C2440_NFMECC0);
@@ -599,6 +593,7 @@ static int s3c2440_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u
return 0;
}
+#endif
/* over-ride the standard functions for a little more speed. We can
* use read/write block to move the data buffers to/from the controller
@@ -625,13 +620,15 @@ static void s3c2440_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
}
}
-static void s3c2410_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
+static void s3c2410_nand_write_buf(struct mtd_info *mtd, const u_char *buf,
+ int len)
{
struct nand_chip *this = mtd->priv;
writesb(this->IO_ADDR_W, buf, len);
}
-static void s3c2440_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
+static void s3c2440_nand_write_buf(struct mtd_info *mtd, const u_char *buf,
+ int len)
{
struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
@@ -675,7 +672,8 @@ static inline int s3c2410_nand_cpufreq_register(struct s3c2410_nand_info *info)
CPUFREQ_TRANSITION_NOTIFIER);
}
-static inline void s3c2410_nand_cpufreq_deregister(struct s3c2410_nand_info *info)
+static inline void
+s3c2410_nand_cpufreq_deregister(struct s3c2410_nand_info *info)
{
cpufreq_unregister_notifier(&info->freq_transition,
CPUFREQ_TRANSITION_NOTIFIER);
@@ -687,7 +685,8 @@ static inline int s3c2410_nand_cpufreq_register(struct s3c2410_nand_info *info)
return 0;
}
-static inline void s3c2410_nand_cpufreq_deregister(struct s3c2410_nand_info *info)
+static inline void
+s3c2410_nand_cpufreq_deregister(struct s3c2410_nand_info *info)
{
}
#endif
@@ -717,29 +716,12 @@ static int s3c24xx_nand_remove(struct platform_device *pdev)
pr_debug("releasing mtd %d (%p)\n", mtdno, ptr);
nand_release(&ptr->mtd);
}
-
- kfree(info->mtds);
}
/* free the common resources */
- if (!IS_ERR(info->clk)) {
+ if (!IS_ERR(info->clk))
s3c2410_nand_clk_set_state(info, CLOCK_DISABLE);
- clk_put(info->clk);
- }
-
- if (info->regs != NULL) {
- iounmap(info->regs);
- info->regs = NULL;
- }
-
- if (info->area != NULL) {
- release_resource(info->area);
- kfree(info->area);
- info->area = NULL;
- }
-
- kfree(info);
return 0;
}
@@ -810,7 +792,7 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
dev_info(info->device, "System booted from NAND\n");
break;
- }
+ }
chip->IO_ADDR_R = chip->IO_ADDR_W;
@@ -819,32 +801,31 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
nmtd->mtd.owner = THIS_MODULE;
nmtd->set = set;
- if (hardware_ecc) {
+#ifdef CONFIG_MTD_NAND_S3C2410_HWECC
+ chip->ecc.calculate = s3c2410_nand_calculate_ecc;
+ chip->ecc.correct = s3c2410_nand_correct_data;
+ chip->ecc.mode = NAND_ECC_HW;
+ chip->ecc.strength = 1;
+
+ switch (info->cpu_type) {
+ case TYPE_S3C2410:
+ chip->ecc.hwctl = s3c2410_nand_enable_hwecc;
chip->ecc.calculate = s3c2410_nand_calculate_ecc;
- chip->ecc.correct = s3c2410_nand_correct_data;
- chip->ecc.mode = NAND_ECC_HW;
- chip->ecc.strength = 1;
-
- switch (info->cpu_type) {
- case TYPE_S3C2410:
- chip->ecc.hwctl = s3c2410_nand_enable_hwecc;
- chip->ecc.calculate = s3c2410_nand_calculate_ecc;
- break;
-
- case TYPE_S3C2412:
- chip->ecc.hwctl = s3c2412_nand_enable_hwecc;
- chip->ecc.calculate = s3c2412_nand_calculate_ecc;
- break;
-
- case TYPE_S3C2440:
- chip->ecc.hwctl = s3c2440_nand_enable_hwecc;
- chip->ecc.calculate = s3c2440_nand_calculate_ecc;
- break;
+ break;
- }
- } else {
- chip->ecc.mode = NAND_ECC_SOFT;
+ case TYPE_S3C2412:
+ chip->ecc.hwctl = s3c2412_nand_enable_hwecc;
+ chip->ecc.calculate = s3c2412_nand_calculate_ecc;
+ break;
+
+ case TYPE_S3C2440:
+ chip->ecc.hwctl = s3c2440_nand_enable_hwecc;
+ chip->ecc.calculate = s3c2440_nand_calculate_ecc;
+ break;
}
+#else
+ chip->ecc.mode = NAND_ECC_SOFT;
+#endif
if (set->ecc_layout != NULL)
chip->ecc.layout = set->ecc_layout;
@@ -921,7 +902,7 @@ static void s3c2410_nand_update_chip(struct s3c2410_nand_info *info,
static int s3c24xx_nand_probe(struct platform_device *pdev)
{
struct s3c2410_platform_nand *plat = to_nand_plat(pdev);
- enum s3c_cpu_type cpu_type;
+ enum s3c_cpu_type cpu_type;
struct s3c2410_nand_info *info;
struct s3c2410_nand_mtd *nmtd;
struct s3c2410_nand_set *sets;
@@ -935,7 +916,7 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
pr_debug("s3c2410_nand_probe(%p)\n", pdev);
- info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (info == NULL) {
dev_err(&pdev->dev, "no memory for flash info\n");
err = -ENOMEM;
@@ -949,7 +930,7 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
/* get the clock source and enable it */
- info->clk = clk_get(&pdev->dev, "nand");
+ info->clk = devm_clk_get(&pdev->dev, "nand");
if (IS_ERR(info->clk)) {
dev_err(&pdev->dev, "failed to get clock\n");
err = -ENOENT;
@@ -961,22 +942,14 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
/* allocate and map the resource */
/* currently we assume we have the one resource */
- res = pdev->resource;
+ res = pdev->resource;
size = resource_size(res);
- info->area = request_mem_region(res->start, size, pdev->name);
-
- if (info->area == NULL) {
- dev_err(&pdev->dev, "cannot reserve register region\n");
- err = -ENOENT;
- goto exit_error;
- }
-
- info->device = &pdev->dev;
- info->platform = plat;
- info->regs = ioremap(res->start, size);
- info->cpu_type = cpu_type;
+ info->device = &pdev->dev;
+ info->platform = plat;
+ info->cpu_type = cpu_type;
+ info->regs = devm_request_and_ioremap(&pdev->dev, res);
if (info->regs == NULL) {
dev_err(&pdev->dev, "cannot reserve register region\n");
err = -EIO;
@@ -999,7 +972,7 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
/* allocate our information */
size = nr_sets * sizeof(*info->mtds);
- info->mtds = kzalloc(size, GFP_KERNEL);
+ info->mtds = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
if (info->mtds == NULL) {
dev_err(&pdev->dev, "failed to allocate mtd storage\n");
err = -ENOMEM;
@@ -1011,7 +984,8 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
nmtd = info->mtds;
for (setno = 0; setno < nr_sets; setno++, nmtd++) {
- pr_debug("initialising set %d (%p, info %p)\n", setno, nmtd, info);
+ pr_debug("initialising set %d (%p, info %p)\n",
+ setno, nmtd, info);
s3c2410_nand_init_chip(info, nmtd, sets);
@@ -1134,20 +1108,7 @@ static struct platform_driver s3c24xx_nand_driver = {
},
};
-static int __init s3c2410_nand_init(void)
-{
- printk("S3C24XX NAND Driver, (c) 2004 Simtec Electronics\n");
-
- return platform_driver_register(&s3c24xx_nand_driver);
-}
-
-static void __exit s3c2410_nand_exit(void)
-{
- platform_driver_unregister(&s3c24xx_nand_driver);
-}
-
-module_init(s3c2410_nand_init);
-module_exit(s3c2410_nand_exit);
+module_platform_driver(s3c24xx_nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
index aa9b8a5..4fbfe96 100644
--- a/drivers/mtd/nand/sh_flctl.c
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -24,10 +24,12 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/delay.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
+#include <linux/string.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
@@ -43,11 +45,17 @@ static struct nand_ecclayout flctl_4secc_oob_16 = {
};
static struct nand_ecclayout flctl_4secc_oob_64 = {
- .eccbytes = 10,
- .eccpos = {48, 49, 50, 51, 52, 53, 54, 55, 56, 57},
+ .eccbytes = 4 * 10,
+ .eccpos = {
+ 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 54, 55, 56, 57, 58, 59, 60, 61, 62, 63 },
.oobfree = {
- {.offset = 60,
- . length = 4} },
+ {.offset = 2, .length = 4},
+ {.offset = 16, .length = 6},
+ {.offset = 32, .length = 6},
+ {.offset = 48, .length = 6} },
};
static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
@@ -61,15 +69,15 @@ static struct nand_bbt_descr flctl_4secc_smallpage = {
static struct nand_bbt_descr flctl_4secc_largepage = {
.options = NAND_BBT_SCAN2NDPAGE,
- .offs = 58,
+ .offs = 0,
.len = 2,
.pattern = scan_ff_pattern,
};
static void empty_fifo(struct sh_flctl *flctl)
{
- writel(0x000c0000, FLINTDMACR(flctl)); /* FIFO Clear */
- writel(0x00000000, FLINTDMACR(flctl)); /* Clear Error flags */
+ writel(flctl->flintdmacr_base | AC1CLR | AC0CLR, FLINTDMACR(flctl));
+ writel(flctl->flintdmacr_base, FLINTDMACR(flctl));
}
static void start_translation(struct sh_flctl *flctl)
@@ -158,27 +166,56 @@ static void wait_wfifo_ready(struct sh_flctl *flctl)
timeout_error(flctl, __func__);
}
-static int wait_recfifo_ready(struct sh_flctl *flctl, int sector_number)
+static enum flctl_ecc_res_t wait_recfifo_ready
+ (struct sh_flctl *flctl, int sector_number)
{
uint32_t timeout = LOOP_TIMEOUT_MAX;
- int checked[4];
void __iomem *ecc_reg[4];
int i;
+ int state = FL_SUCCESS;
uint32_t data, size;
- memset(checked, 0, sizeof(checked));
-
+ /*
+ * First this loops checks in FLDTCNTR if we are ready to read out the
+ * oob data. This is the case if either all went fine without errors or
+ * if the bottom part of the loop corrected the errors or marked them as
+ * uncorrectable and the controller is given time to push the data into
+ * the FIFO.
+ */
while (timeout--) {
+ /* check if all is ok and we can read out the OOB */
size = readl(FLDTCNTR(flctl)) >> 24;
- if (size & 0xFF)
- return 0; /* success */
+ if ((size & 0xFF) == 4)
+ return state;
+
+ /* check if a correction code has been calculated */
+ if (!(readl(FL4ECCCR(flctl)) & _4ECCEND)) {
+ /*
+ * either we wait for the fifo to be filled or a
+ * correction pattern is being generated
+ */
+ udelay(1);
+ continue;
+ }
- if (readl(FL4ECCCR(flctl)) & _4ECCFA)
- return 1; /* can't correct */
+ /* check for an uncorrectable error */
+ if (readl(FL4ECCCR(flctl)) & _4ECCFA) {
+ /* check if we face a non-empty page */
+ for (i = 0; i < 512; i++) {
+ if (flctl->done_buff[i] != 0xff) {
+ state = FL_ERROR; /* can't correct */
+ break;
+ }
+ }
- udelay(1);
- if (!(readl(FL4ECCCR(flctl)) & _4ECCEND))
+ if (state == FL_SUCCESS)
+ dev_dbg(&flctl->pdev->dev,
+ "reading empty sector %d, ecc error ignored\n",
+ sector_number);
+
+ writel(0, FL4ECCCR(flctl));
continue;
+ }
/* start error correction */
ecc_reg[0] = FL4ECCRESULT0(flctl);
@@ -187,28 +224,26 @@ static int wait_recfifo_ready(struct sh_flctl *flctl, int sector_number)
ecc_reg[3] = FL4ECCRESULT3(flctl);
for (i = 0; i < 3; i++) {
+ uint8_t org;
+ int index;
+
data = readl(ecc_reg[i]);
- if (data != INIT_FL4ECCRESULT_VAL && !checked[i]) {
- uint8_t org;
- int index;
-
- if (flctl->page_size)
- index = (512 * sector_number) +
- (data >> 16);
- else
- index = data >> 16;
-
- org = flctl->done_buff[index];
- flctl->done_buff[index] = org ^ (data & 0xFF);
- checked[i] = 1;
- }
- }
+ if (flctl->page_size)
+ index = (512 * sector_number) +
+ (data >> 16);
+ else
+ index = data >> 16;
+
+ org = flctl->done_buff[index];
+ flctl->done_buff[index] = org ^ (data & 0xFF);
+ }
+ state = FL_REPAIRABLE;
writel(0, FL4ECCCR(flctl));
}
timeout_error(flctl, __func__);
- return 1; /* timeout */
+ return FL_TIMEOUT; /* timeout */
}
static void wait_wecfifo_ready(struct sh_flctl *flctl)
@@ -241,31 +276,33 @@ static void read_fiforeg(struct sh_flctl *flctl, int rlen, int offset)
{
int i, len_4align;
unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
- void *fifo_addr = (void *)FLDTFIFO(flctl);
len_4align = (rlen + 3) / 4;
for (i = 0; i < len_4align; i++) {
wait_rfifo_ready(flctl);
- buf[i] = readl(fifo_addr);
+ buf[i] = readl(FLDTFIFO(flctl));
buf[i] = be32_to_cpu(buf[i]);
}
}
-static int read_ecfiforeg(struct sh_flctl *flctl, uint8_t *buff, int sector)
+static enum flctl_ecc_res_t read_ecfiforeg
+ (struct sh_flctl *flctl, uint8_t *buff, int sector)
{
int i;
+ enum flctl_ecc_res_t res;
unsigned long *ecc_buf = (unsigned long *)buff;
- void *fifo_addr = (void *)FLECFIFO(flctl);
- for (i = 0; i < 4; i++) {
- if (wait_recfifo_ready(flctl , sector))
- return 1;
- ecc_buf[i] = readl(fifo_addr);
- ecc_buf[i] = be32_to_cpu(ecc_buf[i]);
+ res = wait_recfifo_ready(flctl , sector);
+
+ if (res != FL_ERROR) {
+ for (i = 0; i < 4; i++) {
+ ecc_buf[i] = readl(FLECFIFO(flctl));
+ ecc_buf[i] = be32_to_cpu(ecc_buf[i]);
+ }
}
- return 0;
+ return res;
}
static void write_fiforeg(struct sh_flctl *flctl, int rlen, int offset)
@@ -281,6 +318,18 @@ static void write_fiforeg(struct sh_flctl *flctl, int rlen, int offset)
}
}
+static void write_ec_fiforeg(struct sh_flctl *flctl, int rlen, int offset)
+{
+ int i, len_4align;
+ unsigned long *data = (unsigned long *)&flctl->done_buff[offset];
+
+ len_4align = (rlen + 3) / 4;
+ for (i = 0; i < len_4align; i++) {
+ wait_wecfifo_ready(flctl);
+ writel(cpu_to_be32(data[i]), FLECFIFO(flctl));
+ }
+}
+
static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_val)
{
struct sh_flctl *flctl = mtd_to_flctl(mtd);
@@ -346,73 +395,65 @@ static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_va
static int flctl_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *buf, int oob_required, int page)
{
- int i, eccsize = chip->ecc.size;
- int eccbytes = chip->ecc.bytes;
- int eccsteps = chip->ecc.steps;
- uint8_t *p = buf;
- struct sh_flctl *flctl = mtd_to_flctl(mtd);
-
- for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
- chip->read_buf(mtd, p, eccsize);
-
- for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
- if (flctl->hwecc_cant_correct[i])
- mtd->ecc_stats.failed++;
- else
- mtd->ecc_stats.corrected += 0; /* FIXME */
- }
-
+ chip->read_buf(mtd, buf, mtd->writesize);
+ if (oob_required)
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
return 0;
}
-static void flctl_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
+static int flctl_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf, int oob_required)
{
- int i, eccsize = chip->ecc.size;
- int eccbytes = chip->ecc.bytes;
- int eccsteps = chip->ecc.steps;
- const uint8_t *p = buf;
-
- for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
- chip->write_buf(mtd, p, eccsize);
+ chip->write_buf(mtd, buf, mtd->writesize);
+ chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+ return 0;
}
static void execmd_read_page_sector(struct mtd_info *mtd, int page_addr)
{
struct sh_flctl *flctl = mtd_to_flctl(mtd);
int sector, page_sectors;
+ enum flctl_ecc_res_t ecc_result;
- if (flctl->page_size)
- page_sectors = 4;
- else
- page_sectors = 1;
-
- writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE | _4ECCCORRECT,
- FLCMNCR(flctl));
+ page_sectors = flctl->page_size ? 4 : 1;
set_cmd_regs(mtd, NAND_CMD_READ0,
(NAND_CMD_READSTART << 8) | NAND_CMD_READ0);
- for (sector = 0; sector < page_sectors; sector++) {
- int ret;
+ writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE | _4ECCCORRECT,
+ FLCMNCR(flctl));
+ writel(readl(FLCMDCR(flctl)) | page_sectors, FLCMDCR(flctl));
+ writel(page_addr << 2, FLADR(flctl));
- empty_fifo(flctl);
- writel(readl(FLCMDCR(flctl)) | 1, FLCMDCR(flctl));
- writel(page_addr << 2 | sector, FLADR(flctl));
+ empty_fifo(flctl);
+ start_translation(flctl);
- start_translation(flctl);
+ for (sector = 0; sector < page_sectors; sector++) {
read_fiforeg(flctl, 512, 512 * sector);
- ret = read_ecfiforeg(flctl,
+ ecc_result = read_ecfiforeg(flctl,
&flctl->done_buff[mtd->writesize + 16 * sector],
sector);
- if (ret)
- flctl->hwecc_cant_correct[sector] = 1;
-
- writel(0x0, FL4ECCCR(flctl));
- wait_completion(flctl);
+ switch (ecc_result) {
+ case FL_REPAIRABLE:
+ dev_info(&flctl->pdev->dev,
+ "applied ecc on page 0x%x", page_addr);
+ flctl->mtd.ecc_stats.corrected++;
+ break;
+ case FL_ERROR:
+ dev_warn(&flctl->pdev->dev,
+ "page 0x%x contains corrupted data\n",
+ page_addr);
+ flctl->mtd.ecc_stats.failed++;
+ break;
+ default:
+ ;
+ }
}
+
+ wait_completion(flctl);
+
writel(readl(FLCMNCR(flctl)) & ~(ACM_SACCES_MODE | _4ECCCORRECT),
FLCMNCR(flctl));
}
@@ -420,30 +461,20 @@ static void execmd_read_page_sector(struct mtd_info *mtd, int page_addr)
static void execmd_read_oob(struct mtd_info *mtd, int page_addr)
{
struct sh_flctl *flctl = mtd_to_flctl(mtd);
+ int page_sectors = flctl->page_size ? 4 : 1;
+ int i;
set_cmd_regs(mtd, NAND_CMD_READ0,
(NAND_CMD_READSTART << 8) | NAND_CMD_READ0);
empty_fifo(flctl);
- if (flctl->page_size) {
- int i;
- /* In case that the page size is 2k */
- for (i = 0; i < 16 * 3; i++)
- flctl->done_buff[i] = 0xFF;
-
- set_addr(mtd, 3 * 528 + 512, page_addr);
- writel(16, FLDTCNTR(flctl));
- start_translation(flctl);
- read_fiforeg(flctl, 16, 16 * 3);
- wait_completion(flctl);
- } else {
- /* In case that the page size is 512b */
- set_addr(mtd, 512, page_addr);
+ for (i = 0; i < page_sectors; i++) {
+ set_addr(mtd, (512 + 16) * i + 512 , page_addr);
writel(16, FLDTCNTR(flctl));
start_translation(flctl);
- read_fiforeg(flctl, 16, 0);
+ read_fiforeg(flctl, 16, 16 * i);
wait_completion(flctl);
}
}
@@ -451,34 +482,26 @@ static void execmd_read_oob(struct mtd_info *mtd, int page_addr)
static void execmd_write_page_sector(struct mtd_info *mtd)
{
struct sh_flctl *flctl = mtd_to_flctl(mtd);
- int i, page_addr = flctl->seqin_page_addr;
+ int page_addr = flctl->seqin_page_addr;
int sector, page_sectors;
- if (flctl->page_size)
- page_sectors = 4;
- else
- page_sectors = 1;
-
- writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE, FLCMNCR(flctl));
+ page_sectors = flctl->page_size ? 4 : 1;
set_cmd_regs(mtd, NAND_CMD_PAGEPROG,
(NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN);
- for (sector = 0; sector < page_sectors; sector++) {
- empty_fifo(flctl);
- writel(readl(FLCMDCR(flctl)) | 1, FLCMDCR(flctl));
- writel(page_addr << 2 | sector, FLADR(flctl));
+ empty_fifo(flctl);
+ writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE, FLCMNCR(flctl));
+ writel(readl(FLCMDCR(flctl)) | page_sectors, FLCMDCR(flctl));
+ writel(page_addr << 2, FLADR(flctl));
+ start_translation(flctl);
- start_translation(flctl);
+ for (sector = 0; sector < page_sectors; sector++) {
write_fiforeg(flctl, 512, 512 * sector);
-
- for (i = 0; i < 4; i++) {
- wait_wecfifo_ready(flctl); /* wait for write ready */
- writel(0xFFFFFFFF, FLECFIFO(flctl));
- }
- wait_completion(flctl);
+ write_ec_fiforeg(flctl, 16, mtd->writesize + 16 * sector);
}
+ wait_completion(flctl);
writel(readl(FLCMNCR(flctl)) & ~ACM_SACCES_MODE, FLCMNCR(flctl));
}
@@ -488,18 +511,12 @@ static void execmd_write_oob(struct mtd_info *mtd)
int page_addr = flctl->seqin_page_addr;
int sector, page_sectors;
- if (flctl->page_size) {
- sector = 3;
- page_sectors = 4;
- } else {
- sector = 0;
- page_sectors = 1;
- }
+ page_sectors = flctl->page_size ? 4 : 1;
set_cmd_regs(mtd, NAND_CMD_PAGEPROG,
(NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN);
- for (; sector < page_sectors; sector++) {
+ for (sector = 0; sector < page_sectors; sector++) {
empty_fifo(flctl);
set_addr(mtd, sector * 528 + 512, page_addr);
writel(16, FLDTCNTR(flctl)); /* set read size */
@@ -731,10 +748,9 @@ static void flctl_select_chip(struct mtd_info *mtd, int chipnr)
static void flctl_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
{
struct sh_flctl *flctl = mtd_to_flctl(mtd);
- int i, index = flctl->index;
+ int index = flctl->index;
- for (i = 0; i < len; i++)
- flctl->done_buff[index + i] = buf[i];
+ memcpy(&flctl->done_buff[index], buf, len);
flctl->index += len;
}
@@ -763,20 +779,11 @@ static uint16_t flctl_read_word(struct mtd_info *mtd)
static void flctl_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
{
- int i;
-
- for (i = 0; i < len; i++)
- buf[i] = flctl_read_byte(mtd);
-}
-
-static int flctl_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
-{
- int i;
+ struct sh_flctl *flctl = mtd_to_flctl(mtd);
+ int index = flctl->index;
- for (i = 0; i < len; i++)
- if (buf[i] != flctl_read_byte(mtd))
- return -EFAULT;
- return 0;
+ memcpy(buf, &flctl->done_buff[index], len);
+ flctl->index += len;
}
static int flctl_chip_init_tail(struct mtd_info *mtd)
@@ -831,7 +838,7 @@ static int flctl_chip_init_tail(struct mtd_info *mtd)
chip->ecc.mode = NAND_ECC_HW;
/* 4 symbols ECC enabled */
- flctl->flcmncr_base |= _4ECCEN | ECCPOS2 | ECCPOS_02;
+ flctl->flcmncr_base |= _4ECCEN;
} else {
chip->ecc.mode = NAND_ECC_SOFT;
}
@@ -839,6 +846,16 @@ static int flctl_chip_init_tail(struct mtd_info *mtd)
return 0;
}
+static irqreturn_t flctl_handle_flste(int irq, void *dev_id)
+{
+ struct sh_flctl *flctl = dev_id;
+
+ dev_err(&flctl->pdev->dev, "flste irq: %x\n", readl(FLINTDMACR(flctl)));
+ writel(flctl->flintdmacr_base, FLINTDMACR(flctl));
+
+ return IRQ_HANDLED;
+}
+
static int __devinit flctl_probe(struct platform_device *pdev)
{
struct resource *res;
@@ -847,6 +864,7 @@ static int __devinit flctl_probe(struct platform_device *pdev)
struct nand_chip *nand;
struct sh_flctl_platform_data *pdata;
int ret = -ENXIO;
+ int irq;
pdata = pdev->dev.platform_data;
if (pdata == NULL) {
@@ -872,14 +890,27 @@ static int __devinit flctl_probe(struct platform_device *pdev)
goto err_iomap;
}
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "failed to get flste irq data\n");
+ goto err_flste;
+ }
+
+ ret = request_irq(irq, flctl_handle_flste, IRQF_SHARED, "flste", flctl);
+ if (ret) {
+ dev_err(&pdev->dev, "request interrupt failed.\n");
+ goto err_flste;
+ }
+
platform_set_drvdata(pdev, flctl);
flctl_mtd = &flctl->mtd;
nand = &flctl->chip;
flctl_mtd->priv = nand;
flctl->pdev = pdev;
- flctl->flcmncr_base = pdata->flcmncr_val;
flctl->hwecc = pdata->has_hwecc;
flctl->holden = pdata->use_holden;
+ flctl->flcmncr_base = pdata->flcmncr_val;
+ flctl->flintdmacr_base = flctl->hwecc ? (STERINTE | ECERB) : STERINTE;
/* Set address of hardware control function */
/* 20 us command delay time */
@@ -888,7 +919,6 @@ static int __devinit flctl_probe(struct platform_device *pdev)
nand->read_byte = flctl_read_byte;
nand->write_buf = flctl_write_buf;
nand->read_buf = flctl_read_buf;
- nand->verify_buf = flctl_verify_buf;
nand->select_chip = flctl_select_chip;
nand->cmdfunc = flctl_cmdfunc;
@@ -918,6 +948,9 @@ static int __devinit flctl_probe(struct platform_device *pdev)
err_chip:
pm_runtime_disable(&pdev->dev);
+ free_irq(irq, flctl);
+err_flste:
+ iounmap(flctl->reg);
err_iomap:
kfree(flctl);
return ret;
@@ -929,6 +962,8 @@ static int __devexit flctl_remove(struct platform_device *pdev)
nand_release(&flctl->mtd);
pm_runtime_disable(&pdev->dev);
+ free_irq(platform_get_irq(pdev, 0), flctl);
+ iounmap(flctl->reg);
kfree(flctl);
return 0;
diff --git a/drivers/mtd/nand/socrates_nand.c b/drivers/mtd/nand/socrates_nand.c
index e02b08b..f3f28fa 100644
--- a/drivers/mtd/nand/socrates_nand.c
+++ b/drivers/mtd/nand/socrates_nand.c
@@ -98,24 +98,6 @@ static uint16_t socrates_nand_read_word(struct mtd_info *mtd)
return word;
}
-/**
- * socrates_nand_verify_buf - Verify chip data against buffer
- * @mtd: MTD device structure
- * @buf: buffer containing the data to compare
- * @len: number of bytes to compare
- */
-static int socrates_nand_verify_buf(struct mtd_info *mtd, const u8 *buf,
- int len)
-{
- int i;
-
- for (i = 0; i < len; i++) {
- if (buf[i] != socrates_nand_read_byte(mtd))
- return -EFAULT;
- }
- return 0;
-}
-
/*
* Hardware specific access to control-lines
*/
@@ -201,7 +183,6 @@ static int __devinit socrates_nand_probe(struct platform_device *ofdev)
nand_chip->read_word = socrates_nand_read_word;
nand_chip->write_buf = socrates_nand_write_buf;
nand_chip->read_buf = socrates_nand_read_buf;
- nand_chip->verify_buf = socrates_nand_verify_buf;
nand_chip->dev_ready = socrates_nand_device_ready;
nand_chip->ecc.mode = NAND_ECC_SOFT; /* enable ECC */
diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c
index 5aa5180..508e9e0 100644
--- a/drivers/mtd/nand/tmio_nand.c
+++ b/drivers/mtd/nand/tmio_nand.c
@@ -256,18 +256,6 @@ static void tmio_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
tmio_ioread16_rep(tmio->fcr + FCR_DATA, buf, len >> 1);
}
-static int
-tmio_nand_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
-{
- struct tmio_nand *tmio = mtd_to_tmio(mtd);
- u16 *p = (u16 *) buf;
-
- for (len >>= 1; len; len--)
- if (*(p++) != tmio_ioread16(tmio->fcr + FCR_DATA))
- return -EFAULT;
- return 0;
-}
-
static void tmio_nand_enable_hwecc(struct mtd_info *mtd, int mode)
{
struct tmio_nand *tmio = mtd_to_tmio(mtd);
@@ -424,7 +412,6 @@ static int tmio_probe(struct platform_device *dev)
nand_chip->read_byte = tmio_nand_read_byte;
nand_chip->write_buf = tmio_nand_write_buf;
nand_chip->read_buf = tmio_nand_read_buf;
- nand_chip->verify_buf = tmio_nand_verify_buf;
/* set eccmode using hardware ECC */
nand_chip->ecc.mode = NAND_ECC_HW;
diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c
index 26398dcf..e3d7266 100644
--- a/drivers/mtd/nand/txx9ndfmc.c
+++ b/drivers/mtd/nand/txx9ndfmc.c
@@ -131,18 +131,6 @@ static void txx9ndfmc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
*buf++ = __raw_readl(ndfdtr);
}
-static int txx9ndfmc_verify_buf(struct mtd_info *mtd, const uint8_t *buf,
- int len)
-{
- struct platform_device *dev = mtd_to_platdev(mtd);
- void __iomem *ndfdtr = ndregaddr(dev, TXX9_NDFDTR);
-
- while (len--)
- if (*buf++ != (uint8_t)__raw_readl(ndfdtr))
- return -EFAULT;
- return 0;
-}
-
static void txx9ndfmc_cmd_ctrl(struct mtd_info *mtd, int cmd,
unsigned int ctrl)
{
@@ -346,7 +334,6 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
chip->read_byte = txx9ndfmc_read_byte;
chip->read_buf = txx9ndfmc_read_buf;
chip->write_buf = txx9ndfmc_write_buf;
- chip->verify_buf = txx9ndfmc_verify_buf;
chip->cmd_ctrl = txx9ndfmc_cmd_ctrl;
chip->dev_ready = txx9ndfmc_dev_ready;
chip->ecc.calculate = txx9ndfmc_calculate_ecc;
diff --git a/drivers/mtd/nand/xway_nand.c b/drivers/mtd/nand/xway_nand.c
new file mode 100644
index 0000000..3f81dc8
--- /dev/null
+++ b/drivers/mtd/nand/xway_nand.c
@@ -0,0 +1,201 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright © 2012 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/mtd/nand.h>
+#include <linux/of_gpio.h>
+#include <linux/of_platform.h>
+
+#include <lantiq_soc.h>
+
+/* nand registers */
+#define EBU_ADDSEL1 0x24
+#define EBU_NAND_CON 0xB0
+#define EBU_NAND_WAIT 0xB4
+#define EBU_NAND_ECC0 0xB8
+#define EBU_NAND_ECC_AC 0xBC
+
+/* nand commands */
+#define NAND_CMD_ALE (1 << 2)
+#define NAND_CMD_CLE (1 << 3)
+#define NAND_CMD_CS (1 << 4)
+#define NAND_WRITE_CMD_RESET 0xff
+#define NAND_WRITE_CMD (NAND_CMD_CS | NAND_CMD_CLE)
+#define NAND_WRITE_ADDR (NAND_CMD_CS | NAND_CMD_ALE)
+#define NAND_WRITE_DATA (NAND_CMD_CS)
+#define NAND_READ_DATA (NAND_CMD_CS)
+#define NAND_WAIT_WR_C (1 << 3)
+#define NAND_WAIT_RD (0x1)
+
+/* we need to tel the ebu which addr we mapped the nand to */
+#define ADDSEL1_MASK(x) (x << 4)
+#define ADDSEL1_REGEN 1
+
+/* we need to tell the EBU that we have nand attached and set it up properly */
+#define BUSCON1_SETUP (1 << 22)
+#define BUSCON1_BCGEN_RES (0x3 << 12)
+#define BUSCON1_WAITWRC2 (2 << 8)
+#define BUSCON1_WAITRDC2 (2 << 6)
+#define BUSCON1_HOLDC1 (1 << 4)
+#define BUSCON1_RECOVC1 (1 << 2)
+#define BUSCON1_CMULT4 1
+
+#define NAND_CON_CE (1 << 20)
+#define NAND_CON_OUT_CS1 (1 << 10)
+#define NAND_CON_IN_CS1 (1 << 8)
+#define NAND_CON_PRE_P (1 << 7)
+#define NAND_CON_WP_P (1 << 6)
+#define NAND_CON_SE_P (1 << 5)
+#define NAND_CON_CS_P (1 << 4)
+#define NAND_CON_CSMUX (1 << 1)
+#define NAND_CON_NANDM 1
+
+static void xway_reset_chip(struct nand_chip *chip)
+{
+ unsigned long nandaddr = (unsigned long) chip->IO_ADDR_W;
+ unsigned long flags;
+
+ nandaddr &= ~NAND_WRITE_ADDR;
+ nandaddr |= NAND_WRITE_CMD;
+
+ /* finish with a reset */
+ spin_lock_irqsave(&ebu_lock, flags);
+ writeb(NAND_WRITE_CMD_RESET, (void __iomem *) nandaddr);
+ while ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_WR_C) == 0)
+ ;
+ spin_unlock_irqrestore(&ebu_lock, flags);
+}
+
+static void xway_select_chip(struct mtd_info *mtd, int chip)
+{
+
+ switch (chip) {
+ case -1:
+ ltq_ebu_w32_mask(NAND_CON_CE, 0, EBU_NAND_CON);
+ ltq_ebu_w32_mask(NAND_CON_NANDM, 0, EBU_NAND_CON);
+ break;
+ case 0:
+ ltq_ebu_w32_mask(0, NAND_CON_NANDM, EBU_NAND_CON);
+ ltq_ebu_w32_mask(0, NAND_CON_CE, EBU_NAND_CON);
+ break;
+ default:
+ BUG();
+ }
+}
+
+static void xway_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+{
+ struct nand_chip *this = mtd->priv;
+ unsigned long nandaddr = (unsigned long) this->IO_ADDR_W;
+ unsigned long flags;
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+ nandaddr &= ~(NAND_WRITE_CMD | NAND_WRITE_ADDR);
+ if (ctrl & NAND_CLE)
+ nandaddr |= NAND_WRITE_CMD;
+ else
+ nandaddr |= NAND_WRITE_ADDR;
+ this->IO_ADDR_W = (void __iomem *) nandaddr;
+ }
+
+ if (cmd != NAND_CMD_NONE) {
+ spin_lock_irqsave(&ebu_lock, flags);
+ writeb(cmd, this->IO_ADDR_W);
+ while ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_WR_C) == 0)
+ ;
+ spin_unlock_irqrestore(&ebu_lock, flags);
+ }
+}
+
+static int xway_dev_ready(struct mtd_info *mtd)
+{
+ return ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_RD;
+}
+
+static unsigned char xway_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+ unsigned long nandaddr = (unsigned long) this->IO_ADDR_R;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&ebu_lock, flags);
+ ret = ltq_r8((void __iomem *)(nandaddr + NAND_READ_DATA));
+ spin_unlock_irqrestore(&ebu_lock, flags);
+
+ return ret;
+}
+
+static int xway_nand_probe(struct platform_device *pdev)
+{
+ struct nand_chip *this = platform_get_drvdata(pdev);
+ unsigned long nandaddr = (unsigned long) this->IO_ADDR_W;
+ const __be32 *cs = of_get_property(pdev->dev.of_node,
+ "lantiq,cs", NULL);
+ u32 cs_flag = 0;
+
+ /* load our CS from the DT. Either we find a valid 1 or default to 0 */
+ if (cs && (*cs == 1))
+ cs_flag = NAND_CON_IN_CS1 | NAND_CON_OUT_CS1;
+
+ /* setup the EBU to run in NAND mode on our base addr */
+ ltq_ebu_w32(CPHYSADDR(nandaddr)
+ | ADDSEL1_MASK(3) | ADDSEL1_REGEN, EBU_ADDSEL1);
+
+ ltq_ebu_w32(BUSCON1_SETUP | BUSCON1_BCGEN_RES | BUSCON1_WAITWRC2
+ | BUSCON1_WAITRDC2 | BUSCON1_HOLDC1 | BUSCON1_RECOVC1
+ | BUSCON1_CMULT4, LTQ_EBU_BUSCON1);
+
+ ltq_ebu_w32(NAND_CON_NANDM | NAND_CON_CSMUX | NAND_CON_CS_P
+ | NAND_CON_SE_P | NAND_CON_WP_P | NAND_CON_PRE_P
+ | cs_flag, EBU_NAND_CON);
+
+ /* finish with a reset */
+ xway_reset_chip(this);
+
+ return 0;
+}
+
+/* allow users to override the partition in DT using the cmdline */
+static const char *part_probes[] = { "cmdlinepart", "ofpart", NULL };
+
+static struct platform_nand_data xway_nand_data = {
+ .chip = {
+ .nr_chips = 1,
+ .chip_delay = 30,
+ .part_probe_types = part_probes,
+ },
+ .ctrl = {
+ .probe = xway_nand_probe,
+ .cmd_ctrl = xway_cmd_ctrl,
+ .dev_ready = xway_dev_ready,
+ .select_chip = xway_select_chip,
+ .read_byte = xway_read_byte,
+ }
+};
+
+/*
+ * Try to find the node inside the DT. If it is available attach out
+ * platform_nand_data
+ */
+static int __init xway_register_nand(void)
+{
+ struct device_node *node;
+ struct platform_device *pdev;
+
+ node = of_find_compatible_node(NULL, NULL, "lantiq,nand-xway");
+ if (!node)
+ return -ENOENT;
+ pdev = of_find_device_by_node(node);
+ if (!pdev)
+ return -EINVAL;
+ pdev->dev.platform_data = &xway_nand_data;
+ of_node_put(node);
+ return 0;
+}
+
+subsys_initcall(xway_register_nand);
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
index 9e2dfd5..8dd6ba5 100644
--- a/drivers/mtd/sm_ftl.c
+++ b/drivers/mtd/sm_ftl.c
@@ -346,7 +346,6 @@ static int sm_write_sector(struct sm_ftl *ftl,
ret = mtd_write_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
/* Now we assume that hardware will catch write bitflip errors */
- /* If you are paranoid, use CONFIG_MTD_NAND_VERIFY_WRITE */
if (ret) {
dbg("write to block %d at zone %d, failed with error %d",
diff --git a/drivers/mtd/tests/Makefile b/drivers/mtd/tests/Makefile
index b44dcab..bd0065c 100644
--- a/drivers/mtd/tests/Makefile
+++ b/drivers/mtd/tests/Makefile
@@ -6,3 +6,4 @@ obj-$(CONFIG_MTD_TESTS) += mtd_stresstest.o
obj-$(CONFIG_MTD_TESTS) += mtd_subpagetest.o
obj-$(CONFIG_MTD_TESTS) += mtd_torturetest.o
obj-$(CONFIG_MTD_TESTS) += mtd_nandecctest.o
+obj-$(CONFIG_MTD_TESTS) += mtd_nandbiterrs.o
diff --git a/drivers/mtd/tests/mtd_nandbiterrs.c b/drivers/mtd/tests/mtd_nandbiterrs.c
new file mode 100644
index 0000000..cc8d62c
--- /dev/null
+++ b/drivers/mtd/tests/mtd_nandbiterrs.c
@@ -0,0 +1,460 @@
+/*
+ * Copyright © 2012 NetCommWireless
+ * Iwo Mergler <Iwo.Mergler@netcommwireless.com.au>
+ *
+ * Test for multi-bit error recovery on a NAND page This mostly tests the
+ * ECC controller / driver.
+ *
+ * There are two test modes:
+ *
+ * 0 - artificially inserting bit errors until the ECC fails
+ * This is the default method and fairly quick. It should
+ * be independent of the quality of the FLASH.
+ *
+ * 1 - re-writing the same pattern repeatedly until the ECC fails.
+ * This method relies on the physics of NAND FLASH to eventually
+ * generate '0' bits if '1' has been written sufficient times.
+ * Depending on the NAND, the first bit errors will appear after
+ * 1000 or more writes and then will usually snowball, reaching the
+ * limits of the ECC quickly.
+ *
+ * The test stops after 10000 cycles, should your FLASH be
+ * exceptionally good and not generate bit errors before that. Try
+ * a different page in that case.
+ *
+ * Please note that neither of these tests will significantly 'use up' any
+ * FLASH endurance. Only a maximum of two erase operations will be performed.
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; see the file COPYING. If not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/mtd/mtd.h>
+#include <linux/err.h>
+#include <linux/mtd/nand.h>
+#include <linux/slab.h>
+
+#define msg(FMT, VA...) pr_info("mtd_nandbiterrs: "FMT, ##VA)
+
+static int dev;
+module_param(dev, int, S_IRUGO);
+MODULE_PARM_DESC(dev, "MTD device number to use");
+
+static unsigned page_offset;
+module_param(page_offset, uint, S_IRUGO);
+MODULE_PARM_DESC(page_offset, "Page number relative to dev start");
+
+static unsigned seed;
+module_param(seed, uint, S_IRUGO);
+MODULE_PARM_DESC(seed, "Random seed");
+
+static int mode;
+module_param(mode, int, S_IRUGO);
+MODULE_PARM_DESC(mode, "0=incremental errors, 1=overwrite test");
+
+static unsigned max_overwrite = 10000;
+
+static loff_t offset; /* Offset of the page we're using. */
+static unsigned eraseblock; /* Eraseblock number for our page. */
+
+/* We assume that the ECC can correct up to a certain number
+ * of biterrors per subpage. */
+static unsigned subsize; /* Size of subpages */
+static unsigned subcount; /* Number of subpages per page */
+
+static struct mtd_info *mtd; /* MTD device */
+
+static uint8_t *wbuffer; /* One page write / compare buffer */
+static uint8_t *rbuffer; /* One page read buffer */
+
+/* 'random' bytes from known offsets */
+static uint8_t hash(unsigned offset)
+{
+ unsigned v = offset;
+ unsigned char c;
+ v ^= 0x7f7edfd3;
+ v = v ^ (v >> 3);
+ v = v ^ (v >> 5);
+ v = v ^ (v >> 13);
+ c = v & 0xFF;
+ /* Reverse bits of result. */
+ c = (c & 0x0F) << 4 | (c & 0xF0) >> 4;
+ c = (c & 0x33) << 2 | (c & 0xCC) >> 2;
+ c = (c & 0x55) << 1 | (c & 0xAA) >> 1;
+ return c;
+}
+
+static int erase_block(void)
+{
+ int err;
+ struct erase_info ei;
+ loff_t addr = eraseblock * mtd->erasesize;
+
+ msg("erase_block\n");
+
+ memset(&ei, 0, sizeof(struct erase_info));
+ ei.mtd = mtd;
+ ei.addr = addr;
+ ei.len = mtd->erasesize;
+
+ err = mtd_erase(mtd, &ei);
+ if (err || ei.state == MTD_ERASE_FAILED) {
+ msg("error %d while erasing\n", err);
+ if (!err)
+ err = -EIO;
+ return err;
+ }
+
+ return 0;
+}
+
+/* Writes wbuffer to page */
+static int write_page(int log)
+{
+ int err = 0;
+ size_t written;
+
+ if (log)
+ msg("write_page\n");
+
+ err = mtd_write(mtd, offset, mtd->writesize, &written, wbuffer);
+ if (err || written != mtd->writesize) {
+ msg("error: write failed at %#llx\n", (long long)offset);
+ if (!err)
+ err = -EIO;
+ }
+
+ return err;
+}
+
+/* Re-writes the data area while leaving the OOB alone. */
+static int rewrite_page(int log)
+{
+ int err = 0;
+ struct mtd_oob_ops ops;
+
+ if (log)
+ msg("rewrite page\n");
+
+ ops.mode = MTD_OPS_RAW; /* No ECC */
+ ops.len = mtd->writesize;
+ ops.retlen = 0;
+ ops.ooblen = 0;
+ ops.oobretlen = 0;
+ ops.ooboffs = 0;
+ ops.datbuf = wbuffer;
+ ops.oobbuf = NULL;
+
+ err = mtd_write_oob(mtd, offset, &ops);
+ if (err || ops.retlen != mtd->writesize) {
+ msg("error: write_oob failed (%d)\n", err);
+ if (!err)
+ err = -EIO;
+ }
+
+ return err;
+}
+
+/* Reads page into rbuffer. Returns number of corrected bit errors (>=0)
+ * or error (<0) */
+static int read_page(int log)
+{
+ int err = 0;
+ size_t read;
+ struct mtd_ecc_stats oldstats;
+
+ if (log)
+ msg("read_page\n");
+
+ /* Saving last mtd stats */
+ memcpy(&oldstats, &mtd->ecc_stats, sizeof(oldstats));
+
+ err = mtd_read(mtd, offset, mtd->writesize, &read, rbuffer);
+ if (err == -EUCLEAN)
+ err = mtd->ecc_stats.corrected - oldstats.corrected;
+
+ if (err < 0 || read != mtd->writesize) {
+ msg("error: read failed at %#llx\n", (long long)offset);
+ if (err >= 0)
+ err = -EIO;
+ }
+
+ return err;
+}
+
+/* Verifies rbuffer against random sequence */
+static int verify_page(int log)
+{
+ unsigned i, errs = 0;
+
+ if (log)
+ msg("verify_page\n");
+
+ for (i = 0; i < mtd->writesize; i++) {
+ if (rbuffer[i] != hash(i+seed)) {
+ msg("Error: page offset %u, expected %02x, got %02x\n",
+ i, hash(i+seed), rbuffer[i]);
+ errs++;
+ }
+ }
+
+ if (errs)
+ return -EIO;
+ else
+ return 0;
+}
+
+#define CBIT(v, n) ((v) & (1 << (n)))
+#define BCLR(v, n) ((v) = (v) & ~(1 << (n)))
+
+/* Finds the first '1' bit in wbuffer starting at offset 'byte'
+ * and sets it to '0'. */
+static int insert_biterror(unsigned byte)
+{
+ int bit;
+
+ while (byte < mtd->writesize) {
+ for (bit = 7; bit >= 0; bit--) {
+ if (CBIT(wbuffer[byte], bit)) {
+ BCLR(wbuffer[byte], bit);
+ msg("Inserted biterror @ %u/%u\n", byte, bit);
+ return 0;
+ }
+ }
+ byte++;
+ }
+ msg("biterror: Failed to find a '1' bit\n");
+ return -EIO;
+}
+
+/* Writes 'random' data to page and then introduces deliberate bit
+ * errors into the page, while verifying each step. */
+static int incremental_errors_test(void)
+{
+ int err = 0;
+ unsigned i;
+ unsigned errs_per_subpage = 0;
+
+ msg("incremental biterrors test\n");
+
+ for (i = 0; i < mtd->writesize; i++)
+ wbuffer[i] = hash(i+seed);
+
+ err = write_page(1);
+ if (err)
+ goto exit;
+
+ while (1) {
+
+ err = rewrite_page(1);
+ if (err)
+ goto exit;
+
+ err = read_page(1);
+ if (err > 0)
+ msg("Read reported %d corrected bit errors\n", err);
+ if (err < 0) {
+ msg("After %d biterrors per subpage, read reported error %d\n",
+ errs_per_subpage, err);
+ err = 0;
+ goto exit;
+ }
+
+ err = verify_page(1);
+ if (err) {
+ msg("ECC failure, read data is incorrect despite read success\n");
+ goto exit;
+ }
+
+ msg("Successfully corrected %d bit errors per subpage\n",
+ errs_per_subpage);
+
+ for (i = 0; i < subcount; i++) {
+ err = insert_biterror(i * subsize);
+ if (err < 0)
+ goto exit;
+ }
+ errs_per_subpage++;
+ }
+
+exit:
+ return err;
+}
+
+
+/* Writes 'random' data to page and then re-writes that same data repeatedly.
+ This eventually develops bit errors (bits written as '1' will slowly become
+ '0'), which are corrected as far as the ECC is capable of. */
+static int overwrite_test(void)
+{
+ int err = 0;
+ unsigned i;
+ unsigned max_corrected = 0;
+ unsigned opno = 0;
+ /* We don't expect more than this many correctable bit errors per
+ * page. */
+ #define MAXBITS 512
+ static unsigned bitstats[MAXBITS]; /* bit error histogram. */
+
+ memset(bitstats, 0, sizeof(bitstats));
+
+ msg("overwrite biterrors test\n");
+
+ for (i = 0; i < mtd->writesize; i++)
+ wbuffer[i] = hash(i+seed);
+
+ err = write_page(1);
+ if (err)
+ goto exit;
+
+ while (opno < max_overwrite) {
+
+ err = rewrite_page(0);
+ if (err)
+ break;
+
+ err = read_page(0);
+ if (err >= 0) {
+ if (err >= MAXBITS) {
+ msg("Implausible number of bit errors corrected\n");
+ err = -EIO;
+ break;
+ }
+ bitstats[err]++;
+ if (err > max_corrected) {
+ max_corrected = err;
+ msg("Read reported %d corrected bit errors\n",
+ err);
+ }
+ } else { /* err < 0 */
+ msg("Read reported error %d\n", err);
+ err = 0;
+ break;
+ }
+
+ err = verify_page(0);
+ if (err) {
+ bitstats[max_corrected] = opno;
+ msg("ECC failure, read data is incorrect despite read success\n");
+ break;
+ }
+
+ opno++;
+ }
+
+ /* At this point bitstats[0] contains the number of ops with no bit
+ * errors, bitstats[1] the number of ops with 1 bit error, etc. */
+ msg("Bit error histogram (%d operations total):\n", opno);
+ for (i = 0; i < max_corrected; i++)
+ msg("Page reads with %3d corrected bit errors: %d\n",
+ i, bitstats[i]);
+
+exit:
+ return err;
+}
+
+static int __init mtd_nandbiterrs_init(void)
+{
+ int err = 0;
+
+ msg("\n");
+ msg("==================================================\n");
+ msg("MTD device: %d\n", dev);
+
+ mtd = get_mtd_device(NULL, dev);
+ if (IS_ERR(mtd)) {
+ err = PTR_ERR(mtd);
+ msg("error: cannot get MTD device\n");
+ goto exit_mtddev;
+ }
+
+ if (mtd->type != MTD_NANDFLASH) {
+ msg("this test requires NAND flash\n");
+ err = -ENODEV;
+ goto exit_nand;
+ }
+
+ msg("MTD device size %llu, eraseblock=%u, page=%u, oob=%u\n",
+ (unsigned long long)mtd->size, mtd->erasesize,
+ mtd->writesize, mtd->oobsize);
+
+ subsize = mtd->writesize >> mtd->subpage_sft;
+ subcount = mtd->writesize / subsize;
+
+ msg("Device uses %d subpages of %d bytes\n", subcount, subsize);
+
+ offset = page_offset * mtd->writesize;
+ eraseblock = mtd_div_by_eb(offset, mtd);
+
+ msg("Using page=%u, offset=%llu, eraseblock=%u\n",
+ page_offset, offset, eraseblock);
+
+ wbuffer = kmalloc(mtd->writesize, GFP_KERNEL);
+ if (!wbuffer) {
+ err = -ENOMEM;
+ goto exit_wbuffer;
+ }
+
+ rbuffer = kmalloc(mtd->writesize, GFP_KERNEL);
+ if (!rbuffer) {
+ err = -ENOMEM;
+ goto exit_rbuffer;
+ }
+
+ err = erase_block();
+ if (err)
+ goto exit_error;
+
+ if (mode == 0)
+ err = incremental_errors_test();
+ else
+ err = overwrite_test();
+
+ if (err)
+ goto exit_error;
+
+ /* We leave the block un-erased in case of test failure. */
+ err = erase_block();
+ if (err)
+ goto exit_error;
+
+ err = -EIO;
+ msg("finished successfully.\n");
+ msg("==================================================\n");
+
+exit_error:
+ kfree(rbuffer);
+exit_rbuffer:
+ kfree(wbuffer);
+exit_wbuffer:
+ /* Nothing */
+exit_nand:
+ put_mtd_device(mtd);
+exit_mtddev:
+ return err;
+}
+
+static void __exit mtd_nandbiterrs_exit(void)
+{
+ return;
+}
+
+module_init(mtd_nandbiterrs_init);
+module_exit(mtd_nandbiterrs_exit);
+
+MODULE_DESCRIPTION("NAND bit error recovery test");
+MODULE_AUTHOR("Iwo Mergler");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/tests/mtd_nandecctest.c b/drivers/mtd/tests/mtd_nandecctest.c
index 70d6d7d..b437fa4 100644
--- a/drivers/mtd/tests/mtd_nandecctest.c
+++ b/drivers/mtd/tests/mtd_nandecctest.c
@@ -4,60 +4,287 @@
#include <linux/random.h>
#include <linux/string.h>
#include <linux/bitops.h>
-#include <linux/jiffies.h>
+#include <linux/slab.h>
#include <linux/mtd/nand_ecc.h>
+/*
+ * Test the implementation for software ECC
+ *
+ * No actual MTD device is needed, So we don't need to warry about losing
+ * important data by human error.
+ *
+ * This covers possible patterns of corruption which can be reliably corrected
+ * or detected.
+ */
+
#if defined(CONFIG_MTD_NAND) || defined(CONFIG_MTD_NAND_MODULE)
-static void inject_single_bit_error(void *data, size_t size)
+struct nand_ecc_test {
+ const char *name;
+ void (*prepare)(void *, void *, void *, void *, const size_t);
+ int (*verify)(void *, void *, void *, const size_t);
+};
+
+/*
+ * The reason for this __change_bit_le() instead of __change_bit() is to inject
+ * bit error properly within the region which is not a multiple of
+ * sizeof(unsigned long) on big-endian systems
+ */
+#ifdef __LITTLE_ENDIAN
+#define __change_bit_le(nr, addr) __change_bit(nr, addr)
+#elif defined(__BIG_ENDIAN)
+#define __change_bit_le(nr, addr) \
+ __change_bit((nr) ^ ((BITS_PER_LONG - 1) & ~0x7), addr)
+#else
+#error "Unknown byte order"
+#endif
+
+static void single_bit_error_data(void *error_data, void *correct_data,
+ size_t size)
{
- unsigned long offset = random32() % (size * BITS_PER_BYTE);
+ unsigned int offset = random32() % (size * BITS_PER_BYTE);
- __change_bit(offset, data);
+ memcpy(error_data, correct_data, size);
+ __change_bit_le(offset, error_data);
}
-static unsigned char data[512];
-static unsigned char error_data[512];
+static void double_bit_error_data(void *error_data, void *correct_data,
+ size_t size)
+{
+ unsigned int offset[2];
+
+ offset[0] = random32() % (size * BITS_PER_BYTE);
+ do {
+ offset[1] = random32() % (size * BITS_PER_BYTE);
+ } while (offset[0] == offset[1]);
-static int nand_ecc_test(const size_t size)
+ memcpy(error_data, correct_data, size);
+
+ __change_bit_le(offset[0], error_data);
+ __change_bit_le(offset[1], error_data);
+}
+
+static unsigned int random_ecc_bit(size_t size)
{
- unsigned char code[3];
- unsigned char error_code[3];
- char testname[30];
+ unsigned int offset = random32() % (3 * BITS_PER_BYTE);
+
+ if (size == 256) {
+ /*
+ * Don't inject a bit error into the insignificant bits (16th
+ * and 17th bit) in ECC code for 256 byte data block
+ */
+ while (offset == 16 || offset == 17)
+ offset = random32() % (3 * BITS_PER_BYTE);
+ }
- BUG_ON(sizeof(data) < size);
+ return offset;
+}
- sprintf(testname, "nand-ecc-%zu", size);
+static void single_bit_error_ecc(void *error_ecc, void *correct_ecc,
+ size_t size)
+{
+ unsigned int offset = random_ecc_bit(size);
- get_random_bytes(data, size);
+ memcpy(error_ecc, correct_ecc, 3);
+ __change_bit_le(offset, error_ecc);
+}
- memcpy(error_data, data, size);
- inject_single_bit_error(error_data, size);
+static void double_bit_error_ecc(void *error_ecc, void *correct_ecc,
+ size_t size)
+{
+ unsigned int offset[2];
- __nand_calculate_ecc(data, size, code);
- __nand_calculate_ecc(error_data, size, error_code);
- __nand_correct_data(error_data, code, error_code, size);
+ offset[0] = random_ecc_bit(size);
+ do {
+ offset[1] = random_ecc_bit(size);
+ } while (offset[0] == offset[1]);
- if (!memcmp(data, error_data, size)) {
- printk(KERN_INFO "mtd_nandecctest: ok - %s\n", testname);
+ memcpy(error_ecc, correct_ecc, 3);
+ __change_bit_le(offset[0], error_ecc);
+ __change_bit_le(offset[1], error_ecc);
+}
+
+static void no_bit_error(void *error_data, void *error_ecc,
+ void *correct_data, void *correct_ecc, const size_t size)
+{
+ memcpy(error_data, correct_data, size);
+ memcpy(error_ecc, correct_ecc, 3);
+}
+
+static int no_bit_error_verify(void *error_data, void *error_ecc,
+ void *correct_data, const size_t size)
+{
+ unsigned char calc_ecc[3];
+ int ret;
+
+ __nand_calculate_ecc(error_data, size, calc_ecc);
+ ret = __nand_correct_data(error_data, error_ecc, calc_ecc, size);
+ if (ret == 0 && !memcmp(correct_data, error_data, size))
return 0;
- }
- printk(KERN_ERR "mtd_nandecctest: not ok - %s\n", testname);
+ return -EINVAL;
+}
+
+static void single_bit_error_in_data(void *error_data, void *error_ecc,
+ void *correct_data, void *correct_ecc, const size_t size)
+{
+ single_bit_error_data(error_data, correct_data, size);
+ memcpy(error_ecc, correct_ecc, 3);
+}
+
+static void single_bit_error_in_ecc(void *error_data, void *error_ecc,
+ void *correct_data, void *correct_ecc, const size_t size)
+{
+ memcpy(error_data, correct_data, size);
+ single_bit_error_ecc(error_ecc, correct_ecc, size);
+}
+
+static int single_bit_error_correct(void *error_data, void *error_ecc,
+ void *correct_data, const size_t size)
+{
+ unsigned char calc_ecc[3];
+ int ret;
- printk(KERN_DEBUG "hexdump of data:\n");
- print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 4,
- data, size, false);
- printk(KERN_DEBUG "hexdump of error data:\n");
- print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 4,
+ __nand_calculate_ecc(error_data, size, calc_ecc);
+ ret = __nand_correct_data(error_data, error_ecc, calc_ecc, size);
+ if (ret == 1 && !memcmp(correct_data, error_data, size))
+ return 0;
+
+ return -EINVAL;
+}
+
+static void double_bit_error_in_data(void *error_data, void *error_ecc,
+ void *correct_data, void *correct_ecc, const size_t size)
+{
+ double_bit_error_data(error_data, correct_data, size);
+ memcpy(error_ecc, correct_ecc, 3);
+}
+
+static void single_bit_error_in_data_and_ecc(void *error_data, void *error_ecc,
+ void *correct_data, void *correct_ecc, const size_t size)
+{
+ single_bit_error_data(error_data, correct_data, size);
+ single_bit_error_ecc(error_ecc, correct_ecc, size);
+}
+
+static void double_bit_error_in_ecc(void *error_data, void *error_ecc,
+ void *correct_data, void *correct_ecc, const size_t size)
+{
+ memcpy(error_data, correct_data, size);
+ double_bit_error_ecc(error_ecc, correct_ecc, size);
+}
+
+static int double_bit_error_detect(void *error_data, void *error_ecc,
+ void *correct_data, const size_t size)
+{
+ unsigned char calc_ecc[3];
+ int ret;
+
+ __nand_calculate_ecc(error_data, size, calc_ecc);
+ ret = __nand_correct_data(error_data, error_ecc, calc_ecc, size);
+
+ return (ret == -1) ? 0 : -EINVAL;
+}
+
+static const struct nand_ecc_test nand_ecc_test[] = {
+ {
+ .name = "no-bit-error",
+ .prepare = no_bit_error,
+ .verify = no_bit_error_verify,
+ },
+ {
+ .name = "single-bit-error-in-data-correct",
+ .prepare = single_bit_error_in_data,
+ .verify = single_bit_error_correct,
+ },
+ {
+ .name = "single-bit-error-in-ecc-correct",
+ .prepare = single_bit_error_in_ecc,
+ .verify = single_bit_error_correct,
+ },
+ {
+ .name = "double-bit-error-in-data-detect",
+ .prepare = double_bit_error_in_data,
+ .verify = double_bit_error_detect,
+ },
+ {
+ .name = "single-bit-error-in-data-and-ecc-detect",
+ .prepare = single_bit_error_in_data_and_ecc,
+ .verify = double_bit_error_detect,
+ },
+ {
+ .name = "double-bit-error-in-ecc-detect",
+ .prepare = double_bit_error_in_ecc,
+ .verify = double_bit_error_detect,
+ },
+};
+
+static void dump_data_ecc(void *error_data, void *error_ecc, void *correct_data,
+ void *correct_ecc, const size_t size)
+{
+ pr_info("hexdump of error data:\n");
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4,
error_data, size, false);
+ print_hex_dump(KERN_INFO, "hexdump of error ecc: ",
+ DUMP_PREFIX_NONE, 16, 1, error_ecc, 3, false);
+
+ pr_info("hexdump of correct data:\n");
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4,
+ correct_data, size, false);
+ print_hex_dump(KERN_INFO, "hexdump of correct ecc: ",
+ DUMP_PREFIX_NONE, 16, 1, correct_ecc, 3, false);
+}
+
+static int nand_ecc_test_run(const size_t size)
+{
+ int i;
+ int err = 0;
+ void *error_data;
+ void *error_ecc;
+ void *correct_data;
+ void *correct_ecc;
- return -1;
+ error_data = kmalloc(size, GFP_KERNEL);
+ error_ecc = kmalloc(3, GFP_KERNEL);
+ correct_data = kmalloc(size, GFP_KERNEL);
+ correct_ecc = kmalloc(3, GFP_KERNEL);
+
+ if (!error_data || !error_ecc || !correct_data || !correct_ecc) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ get_random_bytes(correct_data, size);
+ __nand_calculate_ecc(correct_data, size, correct_ecc);
+
+ for (i = 0; i < ARRAY_SIZE(nand_ecc_test); i++) {
+ nand_ecc_test[i].prepare(error_data, error_ecc,
+ correct_data, correct_ecc, size);
+ err = nand_ecc_test[i].verify(error_data, error_ecc,
+ correct_data, size);
+
+ if (err) {
+ pr_err("mtd_nandecctest: not ok - %s-%zd\n",
+ nand_ecc_test[i].name, size);
+ dump_data_ecc(error_data, error_ecc,
+ correct_data, correct_ecc, size);
+ break;
+ }
+ pr_info("mtd_nandecctest: ok - %s-%zd\n",
+ nand_ecc_test[i].name, size);
+ }
+error:
+ kfree(error_data);
+ kfree(error_ecc);
+ kfree(correct_data);
+ kfree(correct_ecc);
+
+ return err;
}
#else
-static int nand_ecc_test(const size_t size)
+static int nand_ecc_test_run(const size_t size)
{
return 0;
}
@@ -66,12 +293,13 @@ static int nand_ecc_test(const size_t size)
static int __init ecc_test_init(void)
{
- srandom32(jiffies);
+ int err;
- nand_ecc_test(256);
- nand_ecc_test(512);
+ err = nand_ecc_test_run(256);
+ if (err)
+ return err;
- return 0;
+ return nand_ecc_test_run(512);
}
static void __exit ecc_test_exit(void)
diff --git a/drivers/mtd/tests/mtd_speedtest.c b/drivers/mtd/tests/mtd_speedtest.c
index 2aec4f3..42b0f74 100644
--- a/drivers/mtd/tests/mtd_speedtest.c
+++ b/drivers/mtd/tests/mtd_speedtest.c
@@ -26,6 +26,7 @@
#include <linux/mtd/mtd.h>
#include <linux/slab.h>
#include <linux/sched.h>
+#include <linux/random.h>
#define PRINT_PREF KERN_INFO "mtd_speedtest: "
@@ -47,25 +48,13 @@ static int ebcnt;
static int pgcnt;
static int goodebcnt;
static struct timeval start, finish;
-static unsigned long next = 1;
-
-static inline unsigned int simple_rand(void)
-{
- next = next * 1103515245 + 12345;
- return (unsigned int)((next / 65536) % 32768);
-}
-
-static inline void simple_srand(unsigned long seed)
-{
- next = seed;
-}
static void set_random_data(unsigned char *buf, size_t len)
{
size_t i;
for (i = 0; i < len; ++i)
- buf[i] = simple_rand();
+ buf[i] = random32();
}
static int erase_eraseblock(int ebnum)
@@ -407,7 +396,6 @@ static int __init mtd_speedtest_init(void)
goto out;
}
- simple_srand(1);
set_random_data(iobuf, mtd->erasesize);
err = scan_for_bad_eraseblocks();
diff --git a/drivers/mtd/tests/mtd_stresstest.c b/drivers/mtd/tests/mtd_stresstest.c
index 7b33f22..cb268ce 100644
--- a/drivers/mtd/tests/mtd_stresstest.c
+++ b/drivers/mtd/tests/mtd_stresstest.c
@@ -27,6 +27,7 @@
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/vmalloc.h>
+#include <linux/random.h>
#define PRINT_PREF KERN_INFO "mtd_stresstest: "
@@ -48,28 +49,13 @@ static int pgsize;
static int bufsize;
static int ebcnt;
static int pgcnt;
-static unsigned long next = 1;
-
-static inline unsigned int simple_rand(void)
-{
- next = next * 1103515245 + 12345;
- return (unsigned int)((next / 65536) % 32768);
-}
-
-static inline void simple_srand(unsigned long seed)
-{
- next = seed;
-}
static int rand_eb(void)
{
- int eb;
+ unsigned int eb;
again:
- if (ebcnt < 32768)
- eb = simple_rand();
- else
- eb = (simple_rand() << 15) | simple_rand();
+ eb = random32();
/* Read or write up 2 eraseblocks at a time - hence 'ebcnt - 1' */
eb %= (ebcnt - 1);
if (bbt[eb])
@@ -79,24 +65,18 @@ again:
static int rand_offs(void)
{
- int offs;
+ unsigned int offs;
- if (bufsize < 32768)
- offs = simple_rand();
- else
- offs = (simple_rand() << 15) | simple_rand();
+ offs = random32();
offs %= bufsize;
return offs;
}
static int rand_len(int offs)
{
- int len;
+ unsigned int len;
- if (bufsize < 32768)
- len = simple_rand();
- else
- len = (simple_rand() << 15) | simple_rand();
+ len = random32();
len %= (bufsize - offs);
return len;
}
@@ -211,7 +191,7 @@ static int do_write(void)
static int do_operation(void)
{
- if (simple_rand() & 1)
+ if (random32() & 1)
return do_read();
else
return do_write();
@@ -302,9 +282,8 @@ static int __init mtd_stresstest_init(void)
}
for (i = 0; i < ebcnt; i++)
offsets[i] = mtd->erasesize;
- simple_srand(current->pid);
for (i = 0; i < bufsize; i++)
- writebuf[i] = simple_rand();
+ writebuf[i] = random32();
err = scan_for_bad_eraseblocks();
if (err)
diff --git a/drivers/mtd/ubi/Kconfig b/drivers/mtd/ubi/Kconfig
index 271a842..36663af 100644
--- a/drivers/mtd/ubi/Kconfig
+++ b/drivers/mtd/ubi/Kconfig
@@ -56,6 +56,27 @@ config MTD_UBI_BEB_LIMIT
Leave the default value if unsure.
+config MTD_UBI_FASTMAP
+ bool "UBI Fastmap (Experimental feature)"
+ default n
+ help
+ Important: this feature is experimental so far and the on-flash
+ format for fastmap may change in the next kernel versions
+
+ Fastmap is a mechanism which allows attaching an UBI device
+ in nearly constant time. Instead of scanning the whole MTD device it
+ only has to locate a checkpoint (called fastmap) on the device.
+ The on-flash fastmap contains all information needed to attach
+ the device. Using fastmap makes only sense on large devices where
+ attaching by scanning takes long. UBI will not automatically install
+ a fastmap on old images, but you can set the UBI module parameter
+ fm_autoconvert to 1 if you want so. Please note that fastmap-enabled
+ images are still usable with UBI implementations without
+ fastmap support. On typical flash devices the whole fastmap fits
+ into one PEB. UBI will reserve PEBs to hold two fastmaps.
+
+ If in doubt, say "N".
+
config MTD_UBI_GLUEBI
tristate "MTD devices emulation driver (gluebi)"
help
diff --git a/drivers/mtd/ubi/Makefile b/drivers/mtd/ubi/Makefile
index a0803ac..b46b0c97 100644
--- a/drivers/mtd/ubi/Makefile
+++ b/drivers/mtd/ubi/Makefile
@@ -2,5 +2,6 @@ obj-$(CONFIG_MTD_UBI) += ubi.o
ubi-y += vtbl.o vmt.o upd.o build.o cdev.o kapi.o eba.o io.o wl.o attach.o
ubi-y += misc.o debug.o
+ubi-$(CONFIG_MTD_UBI_FASTMAP) += fastmap.o
obj-$(CONFIG_MTD_UBI_GLUEBI) += gluebi.o
diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c
index f7adf53..fec406b 100644
--- a/drivers/mtd/ubi/attach.c
+++ b/drivers/mtd/ubi/attach.c
@@ -300,7 +300,7 @@ static struct ubi_ainf_volume *add_volume(struct ubi_attach_info *ai,
}
/**
- * compare_lebs - find out which logical eraseblock is newer.
+ * ubi_compare_lebs - find out which logical eraseblock is newer.
* @ubi: UBI device description object
* @aeb: first logical eraseblock to compare
* @pnum: physical eraseblock number of the second logical eraseblock to
@@ -319,7 +319,7 @@ static struct ubi_ainf_volume *add_volume(struct ubi_attach_info *ai,
* o bit 2 is cleared: the older LEB is not corrupted;
* o bit 2 is set: the older LEB is corrupted.
*/
-static int compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
+int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
int pnum, const struct ubi_vid_hdr *vid_hdr)
{
void *buf;
@@ -337,7 +337,7 @@ static int compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
* support these images anymore. Well, those images still work,
* but only if no unclean reboots happened.
*/
- ubi_err("unsupported on-flash UBI format\n");
+ ubi_err("unsupported on-flash UBI format");
return -EINVAL;
}
@@ -507,7 +507,7 @@ int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum,
* sequence numbers. We still can attach these images, unless
* there is a need to distinguish between old and new
* eraseblocks, in which case we'll refuse the image in
- * 'compare_lebs()'. In other words, we attach old clean
+ * 'ubi_compare_lebs()'. In other words, we attach old clean
* images, but refuse attaching old images with duplicated
* logical eraseblocks because there was an unclean reboot.
*/
@@ -523,7 +523,7 @@ int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum,
* Now we have to drop the older one and preserve the newer
* one.
*/
- cmp_res = compare_lebs(ubi, aeb, pnum, vid_hdr);
+ cmp_res = ubi_compare_lebs(ubi, aeb, pnum, vid_hdr);
if (cmp_res < 0)
return cmp_res;
@@ -748,7 +748,7 @@ struct ubi_ainf_peb *ubi_early_get_peb(struct ubi_device *ubi,
/**
* check_corruption - check the data area of PEB.
* @ubi: UBI device description object
- * @vid_hrd: the (corrupted) VID header of this PEB
+ * @vid_hdr: the (corrupted) VID header of this PEB
* @pnum: the physical eraseblock number to check
*
* This is a helper function which is used to distinguish between VID header
@@ -810,6 +810,8 @@ out_unlock:
* @ubi: UBI device description object
* @ai: attaching information
* @pnum: the physical eraseblock number
+ * @vid: The volume ID of the found volume will be stored in this pointer
+ * @sqnum: The sqnum of the found volume will be stored in this pointer
*
* This function reads UBI headers of PEB @pnum, checks them, and adds
* information about this PEB to the corresponding list or RB-tree in the
@@ -817,10 +819,10 @@ out_unlock:
* successfully handled and a negative error code in case of failure.
*/
static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
- int pnum)
+ int pnum, int *vid, unsigned long long *sqnum)
{
long long uninitialized_var(ec);
- int err, bitflips = 0, vol_id, ec_err = 0;
+ int err, bitflips = 0, vol_id = -1, ec_err = 0;
dbg_bld("scan PEB %d", pnum);
@@ -991,14 +993,21 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
}
vol_id = be32_to_cpu(vidh->vol_id);
+ if (vid)
+ *vid = vol_id;
+ if (sqnum)
+ *sqnum = be64_to_cpu(vidh->sqnum);
if (vol_id > UBI_MAX_VOLUMES && vol_id != UBI_LAYOUT_VOLUME_ID) {
int lnum = be32_to_cpu(vidh->lnum);
/* Unsupported internal volume */
switch (vidh->compat) {
case UBI_COMPAT_DELETE:
- ubi_msg("\"delete\" compatible internal volume %d:%d found, will remove it",
- vol_id, lnum);
+ if (vol_id != UBI_FM_SB_VOLUME_ID
+ && vol_id != UBI_FM_DATA_VOLUME_ID) {
+ ubi_msg("\"delete\" compatible internal volume %d:%d found, will remove it",
+ vol_id, lnum);
+ }
err = add_to_list(ai, pnum, vol_id, lnum,
ec, 1, &ai->erase);
if (err)
@@ -1121,51 +1130,126 @@ static int late_analysis(struct ubi_device *ubi, struct ubi_attach_info *ai)
}
/**
+ * destroy_av - free volume attaching information.
+ * @av: volume attaching information
+ * @ai: attaching information
+ *
+ * This function destroys the volume attaching information.
+ */
+static void destroy_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av)
+{
+ struct ubi_ainf_peb *aeb;
+ struct rb_node *this = av->root.rb_node;
+
+ while (this) {
+ if (this->rb_left)
+ this = this->rb_left;
+ else if (this->rb_right)
+ this = this->rb_right;
+ else {
+ aeb = rb_entry(this, struct ubi_ainf_peb, u.rb);
+ this = rb_parent(this);
+ if (this) {
+ if (this->rb_left == &aeb->u.rb)
+ this->rb_left = NULL;
+ else
+ this->rb_right = NULL;
+ }
+
+ kmem_cache_free(ai->aeb_slab_cache, aeb);
+ }
+ }
+ kfree(av);
+}
+
+/**
+ * destroy_ai - destroy attaching information.
+ * @ai: attaching information
+ */
+static void destroy_ai(struct ubi_attach_info *ai)
+{
+ struct ubi_ainf_peb *aeb, *aeb_tmp;
+ struct ubi_ainf_volume *av;
+ struct rb_node *rb;
+
+ list_for_each_entry_safe(aeb, aeb_tmp, &ai->alien, u.list) {
+ list_del(&aeb->u.list);
+ kmem_cache_free(ai->aeb_slab_cache, aeb);
+ }
+ list_for_each_entry_safe(aeb, aeb_tmp, &ai->erase, u.list) {
+ list_del(&aeb->u.list);
+ kmem_cache_free(ai->aeb_slab_cache, aeb);
+ }
+ list_for_each_entry_safe(aeb, aeb_tmp, &ai->corr, u.list) {
+ list_del(&aeb->u.list);
+ kmem_cache_free(ai->aeb_slab_cache, aeb);
+ }
+ list_for_each_entry_safe(aeb, aeb_tmp, &ai->free, u.list) {
+ list_del(&aeb->u.list);
+ kmem_cache_free(ai->aeb_slab_cache, aeb);
+ }
+
+ /* Destroy the volume RB-tree */
+ rb = ai->volumes.rb_node;
+ while (rb) {
+ if (rb->rb_left)
+ rb = rb->rb_left;
+ else if (rb->rb_right)
+ rb = rb->rb_right;
+ else {
+ av = rb_entry(rb, struct ubi_ainf_volume, rb);
+
+ rb = rb_parent(rb);
+ if (rb) {
+ if (rb->rb_left == &av->rb)
+ rb->rb_left = NULL;
+ else
+ rb->rb_right = NULL;
+ }
+
+ destroy_av(ai, av);
+ }
+ }
+
+ if (ai->aeb_slab_cache)
+ kmem_cache_destroy(ai->aeb_slab_cache);
+
+ kfree(ai);
+}
+
+/**
* scan_all - scan entire MTD device.
* @ubi: UBI device description object
+ * @ai: attach info object
+ * @start: start scanning at this PEB
*
* This function does full scanning of an MTD device and returns complete
* information about it in form of a "struct ubi_attach_info" object. In case
* of failure, an error code is returned.
*/
-static struct ubi_attach_info *scan_all(struct ubi_device *ubi)
+static int scan_all(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ int start)
{
int err, pnum;
struct rb_node *rb1, *rb2;
struct ubi_ainf_volume *av;
struct ubi_ainf_peb *aeb;
- struct ubi_attach_info *ai;
-
- ai = kzalloc(sizeof(struct ubi_attach_info), GFP_KERNEL);
- if (!ai)
- return ERR_PTR(-ENOMEM);
-
- INIT_LIST_HEAD(&ai->corr);
- INIT_LIST_HEAD(&ai->free);
- INIT_LIST_HEAD(&ai->erase);
- INIT_LIST_HEAD(&ai->alien);
- ai->volumes = RB_ROOT;
err = -ENOMEM;
- ai->aeb_slab_cache = kmem_cache_create("ubi_aeb_slab_cache",
- sizeof(struct ubi_ainf_peb),
- 0, 0, NULL);
- if (!ai->aeb_slab_cache)
- goto out_ai;
ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
if (!ech)
- goto out_ai;
+ return err;
vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
if (!vidh)
goto out_ech;
- for (pnum = 0; pnum < ubi->peb_count; pnum++) {
+ for (pnum = start; pnum < ubi->peb_count; pnum++) {
cond_resched();
dbg_gen("process PEB %d", pnum);
- err = scan_peb(ubi, ai, pnum);
+ err = scan_peb(ubi, ai, pnum, NULL, NULL);
if (err < 0)
goto out_vidh;
}
@@ -1210,32 +1294,144 @@ static struct ubi_attach_info *scan_all(struct ubi_device *ubi)
ubi_free_vid_hdr(ubi, vidh);
kfree(ech);
- return ai;
+ return 0;
out_vidh:
ubi_free_vid_hdr(ubi, vidh);
out_ech:
kfree(ech);
-out_ai:
- ubi_destroy_ai(ai);
- return ERR_PTR(err);
+ return err;
+}
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+
+/**
+ * scan_fastmap - try to find a fastmap and attach from it.
+ * @ubi: UBI device description object
+ * @ai: attach info object
+ *
+ * Returns 0 on success, negative return values indicate an internal
+ * error.
+ * UBI_NO_FASTMAP denotes that no fastmap was found.
+ * UBI_BAD_FASTMAP denotes that the found fastmap was invalid.
+ */
+static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info *ai)
+{
+ int err, pnum, fm_anchor = -1;
+ unsigned long long max_sqnum = 0;
+
+ err = -ENOMEM;
+
+ ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+ if (!ech)
+ goto out;
+
+ vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
+ if (!vidh)
+ goto out_ech;
+
+ for (pnum = 0; pnum < UBI_FM_MAX_START; pnum++) {
+ int vol_id = -1;
+ unsigned long long sqnum = -1;
+ cond_resched();
+
+ dbg_gen("process PEB %d", pnum);
+ err = scan_peb(ubi, ai, pnum, &vol_id, &sqnum);
+ if (err < 0)
+ goto out_vidh;
+
+ if (vol_id == UBI_FM_SB_VOLUME_ID && sqnum > max_sqnum) {
+ max_sqnum = sqnum;
+ fm_anchor = pnum;
+ }
+ }
+
+ ubi_free_vid_hdr(ubi, vidh);
+ kfree(ech);
+
+ if (fm_anchor < 0)
+ return UBI_NO_FASTMAP;
+
+ return ubi_scan_fastmap(ubi, ai, fm_anchor);
+
+out_vidh:
+ ubi_free_vid_hdr(ubi, vidh);
+out_ech:
+ kfree(ech);
+out:
+ return err;
+}
+
+#endif
+
+static struct ubi_attach_info *alloc_ai(const char *slab_name)
+{
+ struct ubi_attach_info *ai;
+
+ ai = kzalloc(sizeof(struct ubi_attach_info), GFP_KERNEL);
+ if (!ai)
+ return ai;
+
+ INIT_LIST_HEAD(&ai->corr);
+ INIT_LIST_HEAD(&ai->free);
+ INIT_LIST_HEAD(&ai->erase);
+ INIT_LIST_HEAD(&ai->alien);
+ ai->volumes = RB_ROOT;
+ ai->aeb_slab_cache = kmem_cache_create(slab_name,
+ sizeof(struct ubi_ainf_peb),
+ 0, 0, NULL);
+ if (!ai->aeb_slab_cache) {
+ kfree(ai);
+ ai = NULL;
+ }
+
+ return ai;
}
/**
* ubi_attach - attach an MTD device.
* @ubi: UBI device descriptor
+ * @force_scan: if set to non-zero attach by scanning
*
* This function returns zero in case of success and a negative error code in
* case of failure.
*/
-int ubi_attach(struct ubi_device *ubi)
+int ubi_attach(struct ubi_device *ubi, int force_scan)
{
int err;
struct ubi_attach_info *ai;
- ai = scan_all(ubi);
- if (IS_ERR(ai))
- return PTR_ERR(ai);
+ ai = alloc_ai("ubi_aeb_slab_cache");
+ if (!ai)
+ return -ENOMEM;
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ /* On small flash devices we disable fastmap in any case. */
+ if ((int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd) <= UBI_FM_MAX_START) {
+ ubi->fm_disabled = 1;
+ force_scan = 1;
+ }
+
+ if (force_scan)
+ err = scan_all(ubi, ai, 0);
+ else {
+ err = scan_fast(ubi, ai);
+ if (err > 0) {
+ if (err != UBI_NO_FASTMAP) {
+ destroy_ai(ai);
+ ai = alloc_ai("ubi_aeb_slab_cache2");
+ if (!ai)
+ return -ENOMEM;
+ }
+
+ err = scan_all(ubi, ai, UBI_FM_MAX_START);
+ }
+ }
+#else
+ err = scan_all(ubi, ai, 0);
+#endif
+ if (err)
+ goto out_ai;
ubi->bad_peb_count = ai->bad_peb_count;
ubi->good_peb_count = ubi->peb_count - ubi->bad_peb_count;
@@ -1256,7 +1452,29 @@ int ubi_attach(struct ubi_device *ubi)
if (err)
goto out_wl;
- ubi_destroy_ai(ai);
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ if (ubi->fm && ubi->dbg->chk_gen) {
+ struct ubi_attach_info *scan_ai;
+
+ scan_ai = alloc_ai("ubi_ckh_aeb_slab_cache");
+ if (!scan_ai)
+ goto out_wl;
+
+ err = scan_all(ubi, scan_ai, 0);
+ if (err) {
+ destroy_ai(scan_ai);
+ goto out_wl;
+ }
+
+ err = self_check_eba(ubi, ai, scan_ai);
+ destroy_ai(scan_ai);
+
+ if (err)
+ goto out_wl;
+ }
+#endif
+
+ destroy_ai(ai);
return 0;
out_wl:
@@ -1265,99 +1483,11 @@ out_vtbl:
ubi_free_internal_volumes(ubi);
vfree(ubi->vtbl);
out_ai:
- ubi_destroy_ai(ai);
+ destroy_ai(ai);
return err;
}
/**
- * destroy_av - free volume attaching information.
- * @av: volume attaching information
- * @ai: attaching information
- *
- * This function destroys the volume attaching information.
- */
-static void destroy_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av)
-{
- struct ubi_ainf_peb *aeb;
- struct rb_node *this = av->root.rb_node;
-
- while (this) {
- if (this->rb_left)
- this = this->rb_left;
- else if (this->rb_right)
- this = this->rb_right;
- else {
- aeb = rb_entry(this, struct ubi_ainf_peb, u.rb);
- this = rb_parent(this);
- if (this) {
- if (this->rb_left == &aeb->u.rb)
- this->rb_left = NULL;
- else
- this->rb_right = NULL;
- }
-
- kmem_cache_free(ai->aeb_slab_cache, aeb);
- }
- }
- kfree(av);
-}
-
-/**
- * ubi_destroy_ai - destroy attaching information.
- * @ai: attaching information
- */
-void ubi_destroy_ai(struct ubi_attach_info *ai)
-{
- struct ubi_ainf_peb *aeb, *aeb_tmp;
- struct ubi_ainf_volume *av;
- struct rb_node *rb;
-
- list_for_each_entry_safe(aeb, aeb_tmp, &ai->alien, u.list) {
- list_del(&aeb->u.list);
- kmem_cache_free(ai->aeb_slab_cache, aeb);
- }
- list_for_each_entry_safe(aeb, aeb_tmp, &ai->erase, u.list) {
- list_del(&aeb->u.list);
- kmem_cache_free(ai->aeb_slab_cache, aeb);
- }
- list_for_each_entry_safe(aeb, aeb_tmp, &ai->corr, u.list) {
- list_del(&aeb->u.list);
- kmem_cache_free(ai->aeb_slab_cache, aeb);
- }
- list_for_each_entry_safe(aeb, aeb_tmp, &ai->free, u.list) {
- list_del(&aeb->u.list);
- kmem_cache_free(ai->aeb_slab_cache, aeb);
- }
-
- /* Destroy the volume RB-tree */
- rb = ai->volumes.rb_node;
- while (rb) {
- if (rb->rb_left)
- rb = rb->rb_left;
- else if (rb->rb_right)
- rb = rb->rb_right;
- else {
- av = rb_entry(rb, struct ubi_ainf_volume, rb);
-
- rb = rb_parent(rb);
- if (rb) {
- if (rb->rb_left == &av->rb)
- rb->rb_left = NULL;
- else
- rb->rb_right = NULL;
- }
-
- destroy_av(ai, av);
- }
- }
-
- if (ai->aeb_slab_cache)
- kmem_cache_destroy(ai->aeb_slab_cache);
-
- kfree(ai);
-}
-
-/**
* self_check_ai - check the attaching information.
* @ubi: UBI device description object
* @ai: attaching information
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 3497703..344b4cb 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -76,7 +76,10 @@ static int __initdata mtd_devs;
/* MTD devices specification parameters */
static struct mtd_dev_param __initdata mtd_dev_param[UBI_MAX_DEVICES];
-
+#ifdef CONFIG_MTD_UBI_FASTMAP
+/* UBI module parameter to enable fastmap automatically on non-fastmap images */
+static bool fm_autoconvert;
+#endif
/* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */
struct class *ubi_class;
@@ -153,6 +156,19 @@ int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, int ntype)
ubi_do_get_device_info(ubi, &nt.di);
ubi_do_get_volume_info(ubi, vol, &nt.vi);
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ switch (ntype) {
+ case UBI_VOLUME_ADDED:
+ case UBI_VOLUME_REMOVED:
+ case UBI_VOLUME_RESIZED:
+ case UBI_VOLUME_RENAMED:
+ if (ubi_update_fastmap(ubi)) {
+ ubi_err("Unable to update fastmap!");
+ ubi_ro_mode(ubi);
+ }
+ }
+#endif
return blocking_notifier_call_chain(&ubi_notifiers, ntype, &nt);
}
@@ -918,10 +934,40 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
ubi->vid_hdr_offset = vid_hdr_offset;
ubi->autoresize_vol_id = -1;
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ ubi->fm_pool.used = ubi->fm_pool.size = 0;
+ ubi->fm_wl_pool.used = ubi->fm_wl_pool.size = 0;
+
+ /*
+ * fm_pool.max_size is 5% of the total number of PEBs but it's also
+ * between UBI_FM_MAX_POOL_SIZE and UBI_FM_MIN_POOL_SIZE.
+ */
+ ubi->fm_pool.max_size = min(((int)mtd_div_by_eb(ubi->mtd->size,
+ ubi->mtd) / 100) * 5, UBI_FM_MAX_POOL_SIZE);
+ if (ubi->fm_pool.max_size < UBI_FM_MIN_POOL_SIZE)
+ ubi->fm_pool.max_size = UBI_FM_MIN_POOL_SIZE;
+
+ ubi->fm_wl_pool.max_size = UBI_FM_WL_POOL_SIZE;
+ ubi->fm_disabled = !fm_autoconvert;
+
+ if (!ubi->fm_disabled && (int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd)
+ <= UBI_FM_MAX_START) {
+ ubi_err("More than %i PEBs are needed for fastmap, sorry.",
+ UBI_FM_MAX_START);
+ ubi->fm_disabled = 1;
+ }
+
+ ubi_msg("default fastmap pool size: %d", ubi->fm_pool.max_size);
+ ubi_msg("default fastmap WL pool size: %d", ubi->fm_wl_pool.max_size);
+#else
+ ubi->fm_disabled = 1;
+#endif
mutex_init(&ubi->buf_mutex);
mutex_init(&ubi->ckvol_mutex);
mutex_init(&ubi->device_mutex);
spin_lock_init(&ubi->volumes_lock);
+ mutex_init(&ubi->fm_mutex);
+ init_rwsem(&ubi->fm_sem);
ubi_msg("attaching mtd%d to ubi%d", mtd->index, ubi_num);
@@ -934,11 +980,17 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
if (!ubi->peb_buf)
goto out_free;
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ ubi->fm_size = ubi_calc_fm_size(ubi);
+ ubi->fm_buf = vzalloc(ubi->fm_size);
+ if (!ubi->fm_buf)
+ goto out_free;
+#endif
err = ubi_debugging_init_dev(ubi);
if (err)
goto out_free;
- err = ubi_attach(ubi);
+ err = ubi_attach(ubi, 0);
if (err) {
ubi_err("failed to attach mtd%d, error %d", mtd->index, err);
goto out_debugging;
@@ -1012,6 +1064,7 @@ out_debugging:
ubi_debugging_exit_dev(ubi);
out_free:
vfree(ubi->peb_buf);
+ vfree(ubi->fm_buf);
if (ref)
put_device(&ubi->dev);
else
@@ -1061,7 +1114,11 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
ubi_assert(ubi_num == ubi->ubi_num);
ubi_notify_all(ubi, UBI_VOLUME_REMOVED, NULL);
ubi_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num);
-
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ /* If we don't write a new fastmap at detach time we lose all
+ * EC updates that have been made since the last written fastmap. */
+ ubi_update_fastmap(ubi);
+#endif
/*
* Before freeing anything, we have to stop the background thread to
* prevent it from doing anything on this device while we are freeing.
@@ -1077,12 +1134,14 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
ubi_debugfs_exit_dev(ubi);
uif_close(ubi);
+
ubi_wl_close(ubi);
ubi_free_internal_volumes(ubi);
vfree(ubi->vtbl);
put_mtd_device(ubi->mtd);
ubi_debugging_exit_dev(ubi);
vfree(ubi->peb_buf);
+ vfree(ubi->fm_buf);
ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num);
put_device(&ubi->dev);
return 0;
@@ -1404,7 +1463,10 @@ MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: mtd=<name|num|pa
"Example 2: mtd=content,1984 mtd=4 - attach MTD device with name \"content\" using VID header offset 1984, and MTD device number 4 with default VID header offset.\n"
"Example 3: mtd=/dev/mtd1,0,25 - attach MTD device /dev/mtd1 using default VID header offset and reserve 25*nand_size_in_blocks/1024 erase blocks for bad block handling.\n"
"\t(e.g. if the NAND *chipset* has 4096 PEB, 100 will be reserved for this UBI device).");
-
+#ifdef CONFIG_MTD_UBI_FASTMAP
+module_param(fm_autoconvert, bool, 0644);
+MODULE_PARM_DESC(fm_autoconvert, "Set this parameter to enable fastmap automatically on images without a fastmap.");
+#endif
MODULE_VERSION(__stringify(UBI_VERSION));
MODULE_DESCRIPTION("UBI - Unsorted Block Images");
MODULE_AUTHOR("Artem Bityutskiy");
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index a26d7d2..0e11671d 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -57,7 +57,7 @@
* global sequence counter value. It also increases the global sequence
* counter.
*/
-static unsigned long long next_sqnum(struct ubi_device *ubi)
+unsigned long long ubi_next_sqnum(struct ubi_device *ubi)
{
unsigned long long sqnum;
@@ -340,7 +340,9 @@ int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
dbg_eba("erase LEB %d:%d, PEB %d", vol_id, lnum, pnum);
+ down_read(&ubi->fm_sem);
vol->eba_tbl[lnum] = UBI_LEB_UNMAPPED;
+ up_read(&ubi->fm_sem);
err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 0);
out_unlock:
@@ -521,7 +523,7 @@ retry:
goto out_put;
}
- vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
if (err)
goto write_error;
@@ -548,7 +550,9 @@ retry:
mutex_unlock(&ubi->buf_mutex);
ubi_free_vid_hdr(ubi, vid_hdr);
+ down_read(&ubi->fm_sem);
vol->eba_tbl[lnum] = new_pnum;
+ up_read(&ubi->fm_sem);
ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
ubi_msg("data was successfully recovered");
@@ -632,7 +636,7 @@ int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
}
vid_hdr->vol_type = UBI_VID_DYNAMIC;
- vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
vid_hdr->vol_id = cpu_to_be32(vol_id);
vid_hdr->lnum = cpu_to_be32(lnum);
vid_hdr->compat = ubi_get_compat(ubi, vol_id);
@@ -665,7 +669,9 @@ retry:
}
}
+ down_read(&ubi->fm_sem);
vol->eba_tbl[lnum] = pnum;
+ up_read(&ubi->fm_sem);
leb_write_unlock(ubi, vol_id, lnum);
ubi_free_vid_hdr(ubi, vid_hdr);
@@ -692,7 +698,7 @@ write_error:
return err;
}
- vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
ubi_msg("try another PEB");
goto retry;
}
@@ -745,7 +751,7 @@ int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
return err;
}
- vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
vid_hdr->vol_id = cpu_to_be32(vol_id);
vid_hdr->lnum = cpu_to_be32(lnum);
vid_hdr->compat = ubi_get_compat(ubi, vol_id);
@@ -783,7 +789,9 @@ retry:
}
ubi_assert(vol->eba_tbl[lnum] < 0);
+ down_read(&ubi->fm_sem);
vol->eba_tbl[lnum] = pnum;
+ up_read(&ubi->fm_sem);
leb_write_unlock(ubi, vol_id, lnum);
ubi_free_vid_hdr(ubi, vid_hdr);
@@ -810,7 +818,7 @@ write_error:
return err;
}
- vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
ubi_msg("try another PEB");
goto retry;
}
@@ -862,7 +870,7 @@ int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
if (err)
goto out_mutex;
- vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
vid_hdr->vol_id = cpu_to_be32(vol_id);
vid_hdr->lnum = cpu_to_be32(lnum);
vid_hdr->compat = ubi_get_compat(ubi, vol_id);
@@ -904,7 +912,9 @@ retry:
goto out_leb_unlock;
}
+ down_read(&ubi->fm_sem);
vol->eba_tbl[lnum] = pnum;
+ up_read(&ubi->fm_sem);
out_leb_unlock:
leb_write_unlock(ubi, vol_id, lnum);
@@ -930,7 +940,7 @@ write_error:
goto out_leb_unlock;
}
- vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
ubi_msg("try another PEB");
goto retry;
}
@@ -1089,7 +1099,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
vid_hdr->data_size = cpu_to_be32(data_size);
vid_hdr->data_crc = cpu_to_be32(crc);
}
- vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
err = ubi_io_write_vid_hdr(ubi, to, vid_hdr);
if (err) {
@@ -1151,7 +1161,9 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
}
ubi_assert(vol->eba_tbl[lnum] == from);
+ down_read(&ubi->fm_sem);
vol->eba_tbl[lnum] = to;
+ up_read(&ubi->fm_sem);
out_unlock_buf:
mutex_unlock(&ubi->buf_mutex);
@@ -1202,6 +1214,102 @@ static void print_rsvd_warning(struct ubi_device *ubi,
}
/**
+ * self_check_eba - run a self check on the EBA table constructed by fastmap.
+ * @ubi: UBI device description object
+ * @ai_fastmap: UBI attach info object created by fastmap
+ * @ai_scan: UBI attach info object created by scanning
+ *
+ * Returns < 0 in case of an internal error, 0 otherwise.
+ * If a bad EBA table entry was found it will be printed out and
+ * ubi_assert() triggers.
+ */
+int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap,
+ struct ubi_attach_info *ai_scan)
+{
+ int i, j, num_volumes, ret = 0;
+ int **scan_eba, **fm_eba;
+ struct ubi_ainf_volume *av;
+ struct ubi_volume *vol;
+ struct ubi_ainf_peb *aeb;
+ struct rb_node *rb;
+
+ num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
+
+ scan_eba = kmalloc(sizeof(*scan_eba) * num_volumes, GFP_KERNEL);
+ if (!scan_eba)
+ return -ENOMEM;
+
+ fm_eba = kmalloc(sizeof(*fm_eba) * num_volumes, GFP_KERNEL);
+ if (!fm_eba) {
+ kfree(scan_eba);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < num_volumes; i++) {
+ vol = ubi->volumes[i];
+ if (!vol)
+ continue;
+
+ scan_eba[i] = kmalloc(vol->reserved_pebs * sizeof(**scan_eba),
+ GFP_KERNEL);
+ if (!scan_eba[i]) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ fm_eba[i] = kmalloc(vol->reserved_pebs * sizeof(**fm_eba),
+ GFP_KERNEL);
+ if (!fm_eba[i]) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ for (j = 0; j < vol->reserved_pebs; j++)
+ scan_eba[i][j] = fm_eba[i][j] = UBI_LEB_UNMAPPED;
+
+ av = ubi_find_av(ai_scan, idx2vol_id(ubi, i));
+ if (!av)
+ continue;
+
+ ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb)
+ scan_eba[i][aeb->lnum] = aeb->pnum;
+
+ av = ubi_find_av(ai_fastmap, idx2vol_id(ubi, i));
+ if (!av)
+ continue;
+
+ ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb)
+ fm_eba[i][aeb->lnum] = aeb->pnum;
+
+ for (j = 0; j < vol->reserved_pebs; j++) {
+ if (scan_eba[i][j] != fm_eba[i][j]) {
+ if (scan_eba[i][j] == UBI_LEB_UNMAPPED ||
+ fm_eba[i][j] == UBI_LEB_UNMAPPED)
+ continue;
+
+ ubi_err("LEB:%i:%i is PEB:%i instead of %i!",
+ vol->vol_id, i, fm_eba[i][j],
+ scan_eba[i][j]);
+ ubi_assert(0);
+ }
+ }
+ }
+
+out_free:
+ for (i = 0; i < num_volumes; i++) {
+ if (!ubi->volumes[i])
+ continue;
+
+ kfree(scan_eba[i]);
+ kfree(fm_eba[i]);
+ }
+
+ kfree(scan_eba);
+ kfree(fm_eba);
+ return ret;
+}
+
+/**
* ubi_eba_init - initialize the EBA sub-system using attaching information.
* @ubi: UBI device description object
* @ai: attaching information
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
new file mode 100644
index 0000000..1a5f53c
--- /dev/null
+++ b/drivers/mtd/ubi/fastmap.c
@@ -0,0 +1,1537 @@
+/*
+ * Copyright (c) 2012 Linutronix GmbH
+ * Author: Richard Weinberger <richard@nod.at>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ */
+
+#include <linux/crc32.h>
+#include "ubi.h"
+
+/**
+ * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device.
+ * @ubi: UBI device description object
+ */
+size_t ubi_calc_fm_size(struct ubi_device *ubi)
+{
+ size_t size;
+
+ size = sizeof(struct ubi_fm_hdr) + \
+ sizeof(struct ubi_fm_scan_pool) + \
+ sizeof(struct ubi_fm_scan_pool) + \
+ (ubi->peb_count * sizeof(struct ubi_fm_ec)) + \
+ (sizeof(struct ubi_fm_eba) + \
+ (ubi->peb_count * sizeof(__be32))) + \
+ sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES;
+ return roundup(size, ubi->leb_size);
+}
+
+
+/**
+ * new_fm_vhdr - allocate a new volume header for fastmap usage.
+ * @ubi: UBI device description object
+ * @vol_id: the VID of the new header
+ *
+ * Returns a new struct ubi_vid_hdr on success.
+ * NULL indicates out of memory.
+ */
+static struct ubi_vid_hdr *new_fm_vhdr(struct ubi_device *ubi, int vol_id)
+{
+ struct ubi_vid_hdr *new;
+
+ new = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
+ if (!new)
+ goto out;
+
+ new->vol_type = UBI_VID_DYNAMIC;
+ new->vol_id = cpu_to_be32(vol_id);
+
+ /* UBI implementations without fastmap support have to delete the
+ * fastmap.
+ */
+ new->compat = UBI_COMPAT_DELETE;
+
+out:
+ return new;
+}
+
+/**
+ * add_aeb - create and add a attach erase block to a given list.
+ * @ai: UBI attach info object
+ * @list: the target list
+ * @pnum: PEB number of the new attach erase block
+ * @ec: erease counter of the new LEB
+ * @scrub: scrub this PEB after attaching
+ *
+ * Returns 0 on success, < 0 indicates an internal error.
+ */
+static int add_aeb(struct ubi_attach_info *ai, struct list_head *list,
+ int pnum, int ec, int scrub)
+{
+ struct ubi_ainf_peb *aeb;
+
+ aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
+ if (!aeb)
+ return -ENOMEM;
+
+ aeb->pnum = pnum;
+ aeb->ec = ec;
+ aeb->lnum = -1;
+ aeb->scrub = scrub;
+ aeb->copy_flag = aeb->sqnum = 0;
+
+ ai->ec_sum += aeb->ec;
+ ai->ec_count++;
+
+ if (ai->max_ec < aeb->ec)
+ ai->max_ec = aeb->ec;
+
+ if (ai->min_ec > aeb->ec)
+ ai->min_ec = aeb->ec;
+
+ list_add_tail(&aeb->u.list, list);
+
+ return 0;
+}
+
+/**
+ * add_vol - create and add a new volume to ubi_attach_info.
+ * @ai: ubi_attach_info object
+ * @vol_id: VID of the new volume
+ * @used_ebs: number of used EBS
+ * @data_pad: data padding value of the new volume
+ * @vol_type: volume type
+ * @last_eb_bytes: number of bytes in the last LEB
+ *
+ * Returns the new struct ubi_ainf_volume on success.
+ * NULL indicates an error.
+ */
+static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id,
+ int used_ebs, int data_pad, u8 vol_type,
+ int last_eb_bytes)
+{
+ struct ubi_ainf_volume *av;
+ struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
+
+ while (*p) {
+ parent = *p;
+ av = rb_entry(parent, struct ubi_ainf_volume, rb);
+
+ if (vol_id > av->vol_id)
+ p = &(*p)->rb_left;
+ else if (vol_id > av->vol_id)
+ p = &(*p)->rb_right;
+ }
+
+ av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL);
+ if (!av)
+ goto out;
+
+ av->highest_lnum = av->leb_count = 0;
+ av->vol_id = vol_id;
+ av->used_ebs = used_ebs;
+ av->data_pad = data_pad;
+ av->last_data_size = last_eb_bytes;
+ av->compat = 0;
+ av->vol_type = vol_type;
+ av->root = RB_ROOT;
+
+ dbg_bld("found volume (ID %i)", vol_id);
+
+ rb_link_node(&av->rb, parent, p);
+ rb_insert_color(&av->rb, &ai->volumes);
+
+out:
+ return av;
+}
+
+/**
+ * assign_aeb_to_av - assigns a SEB to a given ainf_volume and removes it
+ * from it's original list.
+ * @ai: ubi_attach_info object
+ * @aeb: the to be assigned SEB
+ * @av: target scan volume
+ */
+static void assign_aeb_to_av(struct ubi_attach_info *ai,
+ struct ubi_ainf_peb *aeb,
+ struct ubi_ainf_volume *av)
+{
+ struct ubi_ainf_peb *tmp_aeb;
+ struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
+
+ p = &av->root.rb_node;
+ while (*p) {
+ parent = *p;
+
+ tmp_aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
+ if (aeb->lnum != tmp_aeb->lnum) {
+ if (aeb->lnum < tmp_aeb->lnum)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+
+ continue;
+ } else
+ break;
+ }
+
+ list_del(&aeb->u.list);
+ av->leb_count++;
+
+ rb_link_node(&aeb->u.rb, parent, p);
+ rb_insert_color(&aeb->u.rb, &av->root);
+}
+
+/**
+ * update_vol - inserts or updates a LEB which was found a pool.
+ * @ubi: the UBI device object
+ * @ai: attach info object
+ * @av: the volume this LEB belongs to
+ * @new_vh: the volume header derived from new_aeb
+ * @new_aeb: the AEB to be examined
+ *
+ * Returns 0 on success, < 0 indicates an internal error.
+ */
+static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ struct ubi_ainf_volume *av, struct ubi_vid_hdr *new_vh,
+ struct ubi_ainf_peb *new_aeb)
+{
+ struct rb_node **p = &av->root.rb_node, *parent = NULL;
+ struct ubi_ainf_peb *aeb, *victim;
+ int cmp_res;
+
+ while (*p) {
+ parent = *p;
+ aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
+
+ if (be32_to_cpu(new_vh->lnum) != aeb->lnum) {
+ if (be32_to_cpu(new_vh->lnum) < aeb->lnum)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+
+ continue;
+ }
+
+ /* This case can happen if the fastmap gets written
+ * because of a volume change (creation, deletion, ..).
+ * Then a PEB can be within the persistent EBA and the pool.
+ */
+ if (aeb->pnum == new_aeb->pnum) {
+ ubi_assert(aeb->lnum == new_aeb->lnum);
+ kmem_cache_free(ai->aeb_slab_cache, new_aeb);
+
+ return 0;
+ }
+
+ cmp_res = ubi_compare_lebs(ubi, aeb, new_aeb->pnum, new_vh);
+ if (cmp_res < 0)
+ return cmp_res;
+
+ /* new_aeb is newer */
+ if (cmp_res & 1) {
+ victim = kmem_cache_alloc(ai->aeb_slab_cache,
+ GFP_KERNEL);
+ if (!victim)
+ return -ENOMEM;
+
+ victim->ec = aeb->ec;
+ victim->pnum = aeb->pnum;
+ list_add_tail(&victim->u.list, &ai->erase);
+
+ if (av->highest_lnum == be32_to_cpu(new_vh->lnum))
+ av->last_data_size = \
+ be32_to_cpu(new_vh->data_size);
+
+ dbg_bld("vol %i: AEB %i's PEB %i is the newer",
+ av->vol_id, aeb->lnum, new_aeb->pnum);
+
+ aeb->ec = new_aeb->ec;
+ aeb->pnum = new_aeb->pnum;
+ aeb->copy_flag = new_vh->copy_flag;
+ aeb->scrub = new_aeb->scrub;
+ kmem_cache_free(ai->aeb_slab_cache, new_aeb);
+
+ /* new_aeb is older */
+ } else {
+ dbg_bld("vol %i: AEB %i's PEB %i is old, dropping it",
+ av->vol_id, aeb->lnum, new_aeb->pnum);
+ list_add_tail(&new_aeb->u.list, &ai->erase);
+ }
+
+ return 0;
+ }
+ /* This LEB is new, let's add it to the volume */
+
+ if (av->highest_lnum <= be32_to_cpu(new_vh->lnum)) {
+ av->highest_lnum = be32_to_cpu(new_vh->lnum);
+ av->last_data_size = be32_to_cpu(new_vh->data_size);
+ }
+
+ if (av->vol_type == UBI_STATIC_VOLUME)
+ av->used_ebs = be32_to_cpu(new_vh->used_ebs);
+
+ av->leb_count++;
+
+ rb_link_node(&new_aeb->u.rb, parent, p);
+ rb_insert_color(&new_aeb->u.rb, &av->root);
+
+ return 0;
+}
+
+/**
+ * process_pool_aeb - we found a non-empty PEB in a pool.
+ * @ubi: UBI device object
+ * @ai: attach info object
+ * @new_vh: the volume header derived from new_aeb
+ * @new_aeb: the AEB to be examined
+ *
+ * Returns 0 on success, < 0 indicates an internal error.
+ */
+static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ struct ubi_vid_hdr *new_vh,
+ struct ubi_ainf_peb *new_aeb)
+{
+ struct ubi_ainf_volume *av, *tmp_av = NULL;
+ struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
+ int found = 0;
+
+ if (be32_to_cpu(new_vh->vol_id) == UBI_FM_SB_VOLUME_ID ||
+ be32_to_cpu(new_vh->vol_id) == UBI_FM_DATA_VOLUME_ID) {
+ kmem_cache_free(ai->aeb_slab_cache, new_aeb);
+
+ return 0;
+ }
+
+ /* Find the volume this SEB belongs to */
+ while (*p) {
+ parent = *p;
+ tmp_av = rb_entry(parent, struct ubi_ainf_volume, rb);
+
+ if (be32_to_cpu(new_vh->vol_id) > tmp_av->vol_id)
+ p = &(*p)->rb_left;
+ else if (be32_to_cpu(new_vh->vol_id) < tmp_av->vol_id)
+ p = &(*p)->rb_right;
+ else {
+ found = 1;
+ break;
+ }
+ }
+
+ if (found)
+ av = tmp_av;
+ else {
+ ubi_err("orphaned volume in fastmap pool!");
+ return UBI_BAD_FASTMAP;
+ }
+
+ ubi_assert(be32_to_cpu(new_vh->vol_id) == av->vol_id);
+
+ return update_vol(ubi, ai, av, new_vh, new_aeb);
+}
+
+/**
+ * unmap_peb - unmap a PEB.
+ * If fastmap detects a free PEB in the pool it has to check whether
+ * this PEB has been unmapped after writing the fastmap.
+ *
+ * @ai: UBI attach info object
+ * @pnum: The PEB to be unmapped
+ */
+static void unmap_peb(struct ubi_attach_info *ai, int pnum)
+{
+ struct ubi_ainf_volume *av;
+ struct rb_node *node, *node2;
+ struct ubi_ainf_peb *aeb;
+
+ for (node = rb_first(&ai->volumes); node; node = rb_next(node)) {
+ av = rb_entry(node, struct ubi_ainf_volume, rb);
+
+ for (node2 = rb_first(&av->root); node2;
+ node2 = rb_next(node2)) {
+ aeb = rb_entry(node2, struct ubi_ainf_peb, u.rb);
+ if (aeb->pnum == pnum) {
+ rb_erase(&aeb->u.rb, &av->root);
+ kmem_cache_free(ai->aeb_slab_cache, aeb);
+ return;
+ }
+ }
+ }
+}
+
+/**
+ * scan_pool - scans a pool for changed (no longer empty PEBs).
+ * @ubi: UBI device object
+ * @ai: attach info object
+ * @pebs: an array of all PEB numbers in the to be scanned pool
+ * @pool_size: size of the pool (number of entries in @pebs)
+ * @max_sqnum: pointer to the maximal sequence number
+ * @eba_orphans: list of PEBs which need to be scanned
+ * @free: list of PEBs which are most likely free (and go into @ai->free)
+ *
+ * Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned.
+ * < 0 indicates an internal error.
+ */
+static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ int *pebs, int pool_size, unsigned long long *max_sqnum,
+ struct list_head *eba_orphans, struct list_head *free)
+{
+ struct ubi_vid_hdr *vh;
+ struct ubi_ec_hdr *ech;
+ struct ubi_ainf_peb *new_aeb, *tmp_aeb;
+ int i, pnum, err, found_orphan, ret = 0;
+
+ ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+ if (!ech)
+ return -ENOMEM;
+
+ vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
+ if (!vh) {
+ kfree(ech);
+ return -ENOMEM;
+ }
+
+ dbg_bld("scanning fastmap pool: size = %i", pool_size);
+
+ /*
+ * Now scan all PEBs in the pool to find changes which have been made
+ * after the creation of the fastmap
+ */
+ for (i = 0; i < pool_size; i++) {
+ int scrub = 0;
+
+ pnum = be32_to_cpu(pebs[i]);
+
+ if (ubi_io_is_bad(ubi, pnum)) {
+ ubi_err("bad PEB in fastmap pool!");
+ ret = UBI_BAD_FASTMAP;
+ goto out;
+ }
+
+ err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
+ if (err && err != UBI_IO_BITFLIPS) {
+ ubi_err("unable to read EC header! PEB:%i err:%i",
+ pnum, err);
+ ret = err > 0 ? UBI_BAD_FASTMAP : err;
+ goto out;
+ } else if (ret == UBI_IO_BITFLIPS)
+ scrub = 1;
+
+ if (be32_to_cpu(ech->image_seq) != ubi->image_seq) {
+ ubi_err("bad image seq: 0x%x, expected: 0x%x",
+ be32_to_cpu(ech->image_seq), ubi->image_seq);
+ err = UBI_BAD_FASTMAP;
+ goto out;
+ }
+
+ err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
+ if (err == UBI_IO_FF || err == UBI_IO_FF_BITFLIPS) {
+ unsigned long long ec = be64_to_cpu(ech->ec);
+ unmap_peb(ai, pnum);
+ dbg_bld("Adding PEB to free: %i", pnum);
+ if (err == UBI_IO_FF_BITFLIPS)
+ add_aeb(ai, free, pnum, ec, 1);
+ else
+ add_aeb(ai, free, pnum, ec, 0);
+ continue;
+ } else if (err == 0 || err == UBI_IO_BITFLIPS) {
+ dbg_bld("Found non empty PEB:%i in pool", pnum);
+
+ if (err == UBI_IO_BITFLIPS)
+ scrub = 1;
+
+ found_orphan = 0;
+ list_for_each_entry(tmp_aeb, eba_orphans, u.list) {
+ if (tmp_aeb->pnum == pnum) {
+ found_orphan = 1;
+ break;
+ }
+ }
+ if (found_orphan) {
+ kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
+ list_del(&tmp_aeb->u.list);
+ }
+
+ new_aeb = kmem_cache_alloc(ai->aeb_slab_cache,
+ GFP_KERNEL);
+ if (!new_aeb) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ new_aeb->ec = be64_to_cpu(ech->ec);
+ new_aeb->pnum = pnum;
+ new_aeb->lnum = be32_to_cpu(vh->lnum);
+ new_aeb->sqnum = be64_to_cpu(vh->sqnum);
+ new_aeb->copy_flag = vh->copy_flag;
+ new_aeb->scrub = scrub;
+
+ if (*max_sqnum < new_aeb->sqnum)
+ *max_sqnum = new_aeb->sqnum;
+
+ err = process_pool_aeb(ubi, ai, vh, new_aeb);
+ if (err) {
+ ret = err > 0 ? UBI_BAD_FASTMAP : err;
+ goto out;
+ }
+ } else {
+ /* We are paranoid and fall back to scanning mode */
+ ubi_err("fastmap pool PEBs contains damaged PEBs!");
+ ret = err > 0 ? UBI_BAD_FASTMAP : err;
+ goto out;
+ }
+
+ }
+
+out:
+ ubi_free_vid_hdr(ubi, vh);
+ kfree(ech);
+ return ret;
+}
+
+/**
+ * count_fastmap_pebs - Counts the PEBs found by fastmap.
+ * @ai: The UBI attach info object
+ */
+static int count_fastmap_pebs(struct ubi_attach_info *ai)
+{
+ struct ubi_ainf_peb *aeb;
+ struct ubi_ainf_volume *av;
+ struct rb_node *rb1, *rb2;
+ int n = 0;
+
+ list_for_each_entry(aeb, &ai->erase, u.list)
+ n++;
+
+ list_for_each_entry(aeb, &ai->free, u.list)
+ n++;
+
+ ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
+ ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
+ n++;
+
+ return n;
+}
+
+/**
+ * ubi_attach_fastmap - creates ubi_attach_info from a fastmap.
+ * @ubi: UBI device object
+ * @ai: UBI attach info object
+ * @fm: the fastmap to be attached
+ *
+ * Returns 0 on success, UBI_BAD_FASTMAP if the found fastmap was unusable.
+ * < 0 indicates an internal error.
+ */
+static int ubi_attach_fastmap(struct ubi_device *ubi,
+ struct ubi_attach_info *ai,
+ struct ubi_fastmap_layout *fm)
+{
+ struct list_head used, eba_orphans, free;
+ struct ubi_ainf_volume *av;
+ struct ubi_ainf_peb *aeb, *tmp_aeb, *_tmp_aeb;
+ struct ubi_ec_hdr *ech;
+ struct ubi_fm_sb *fmsb;
+ struct ubi_fm_hdr *fmhdr;
+ struct ubi_fm_scan_pool *fmpl1, *fmpl2;
+ struct ubi_fm_ec *fmec;
+ struct ubi_fm_volhdr *fmvhdr;
+ struct ubi_fm_eba *fm_eba;
+ int ret, i, j, pool_size, wl_pool_size;
+ size_t fm_pos = 0, fm_size = ubi->fm_size;
+ unsigned long long max_sqnum = 0;
+ void *fm_raw = ubi->fm_buf;
+
+ INIT_LIST_HEAD(&used);
+ INIT_LIST_HEAD(&free);
+ INIT_LIST_HEAD(&eba_orphans);
+ INIT_LIST_HEAD(&ai->corr);
+ INIT_LIST_HEAD(&ai->free);
+ INIT_LIST_HEAD(&ai->erase);
+ INIT_LIST_HEAD(&ai->alien);
+ ai->volumes = RB_ROOT;
+ ai->min_ec = UBI_MAX_ERASECOUNTER;
+
+ ai->aeb_slab_cache = kmem_cache_create("ubi_ainf_peb_slab",
+ sizeof(struct ubi_ainf_peb),
+ 0, 0, NULL);
+ if (!ai->aeb_slab_cache) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ fmsb = (struct ubi_fm_sb *)(fm_raw);
+ ai->max_sqnum = fmsb->sqnum;
+ fm_pos += sizeof(struct ubi_fm_sb);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmhdr);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) {
+ ubi_err("bad fastmap header magic: 0x%x, expected: 0x%x",
+ be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC);
+ goto fail_bad;
+ }
+
+ fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmpl1);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+ if (be32_to_cpu(fmpl1->magic) != UBI_FM_POOL_MAGIC) {
+ ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x",
+ be32_to_cpu(fmpl1->magic), UBI_FM_POOL_MAGIC);
+ goto fail_bad;
+ }
+
+ fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmpl2);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+ if (be32_to_cpu(fmpl2->magic) != UBI_FM_POOL_MAGIC) {
+ ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x",
+ be32_to_cpu(fmpl2->magic), UBI_FM_POOL_MAGIC);
+ goto fail_bad;
+ }
+
+ pool_size = be16_to_cpu(fmpl1->size);
+ wl_pool_size = be16_to_cpu(fmpl2->size);
+ fm->max_pool_size = be16_to_cpu(fmpl1->max_size);
+ fm->max_wl_pool_size = be16_to_cpu(fmpl2->max_size);
+
+ if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) {
+ ubi_err("bad pool size: %i", pool_size);
+ goto fail_bad;
+ }
+
+ if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) {
+ ubi_err("bad WL pool size: %i", wl_pool_size);
+ goto fail_bad;
+ }
+
+
+ if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE ||
+ fm->max_pool_size < 0) {
+ ubi_err("bad maximal pool size: %i", fm->max_pool_size);
+ goto fail_bad;
+ }
+
+ if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE ||
+ fm->max_wl_pool_size < 0) {
+ ubi_err("bad maximal WL pool size: %i", fm->max_wl_pool_size);
+ goto fail_bad;
+ }
+
+ /* read EC values from free list */
+ for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) {
+ fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmec);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum),
+ be32_to_cpu(fmec->ec), 0);
+ }
+
+ /* read EC values from used list */
+ for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) {
+ fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmec);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
+ be32_to_cpu(fmec->ec), 0);
+ }
+
+ /* read EC values from scrub list */
+ for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) {
+ fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmec);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
+ be32_to_cpu(fmec->ec), 1);
+ }
+
+ /* read EC values from erase list */
+ for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) {
+ fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmec);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum),
+ be32_to_cpu(fmec->ec), 1);
+ }
+
+ ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
+ ai->bad_peb_count = be32_to_cpu(fmhdr->bad_peb_count);
+
+ /* Iterate over all volumes and read their EBA table */
+ for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) {
+ fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmvhdr);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) {
+ ubi_err("bad fastmap vol header magic: 0x%x, " \
+ "expected: 0x%x",
+ be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC);
+ goto fail_bad;
+ }
+
+ av = add_vol(ai, be32_to_cpu(fmvhdr->vol_id),
+ be32_to_cpu(fmvhdr->used_ebs),
+ be32_to_cpu(fmvhdr->data_pad),
+ fmvhdr->vol_type,
+ be32_to_cpu(fmvhdr->last_eb_bytes));
+
+ if (!av)
+ goto fail_bad;
+
+ ai->vols_found++;
+ if (ai->highest_vol_id < be32_to_cpu(fmvhdr->vol_id))
+ ai->highest_vol_id = be32_to_cpu(fmvhdr->vol_id);
+
+ fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fm_eba);
+ fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs));
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) {
+ ubi_err("bad fastmap EBA header magic: 0x%x, " \
+ "expected: 0x%x",
+ be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC);
+ goto fail_bad;
+ }
+
+ for (j = 0; j < be32_to_cpu(fm_eba->reserved_pebs); j++) {
+ int pnum = be32_to_cpu(fm_eba->pnum[j]);
+
+ if ((int)be32_to_cpu(fm_eba->pnum[j]) < 0)
+ continue;
+
+ aeb = NULL;
+ list_for_each_entry(tmp_aeb, &used, u.list) {
+ if (tmp_aeb->pnum == pnum)
+ aeb = tmp_aeb;
+ }
+
+ /* This can happen if a PEB is already in an EBA known
+ * by this fastmap but the PEB itself is not in the used
+ * list.
+ * In this case the PEB can be within the fastmap pool
+ * or while writing the fastmap it was in the protection
+ * queue.
+ */
+ if (!aeb) {
+ aeb = kmem_cache_alloc(ai->aeb_slab_cache,
+ GFP_KERNEL);
+ if (!aeb) {
+ ret = -ENOMEM;
+
+ goto fail;
+ }
+
+ aeb->lnum = j;
+ aeb->pnum = be32_to_cpu(fm_eba->pnum[j]);
+ aeb->ec = -1;
+ aeb->scrub = aeb->copy_flag = aeb->sqnum = 0;
+ list_add_tail(&aeb->u.list, &eba_orphans);
+ continue;
+ }
+
+ aeb->lnum = j;
+
+ if (av->highest_lnum <= aeb->lnum)
+ av->highest_lnum = aeb->lnum;
+
+ assign_aeb_to_av(ai, aeb, av);
+
+ dbg_bld("inserting PEB:%i (LEB %i) to vol %i",
+ aeb->pnum, aeb->lnum, av->vol_id);
+ }
+
+ ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+ if (!ech) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &eba_orphans,
+ u.list) {
+ int err;
+
+ if (ubi_io_is_bad(ubi, tmp_aeb->pnum)) {
+ ubi_err("bad PEB in fastmap EBA orphan list");
+ ret = UBI_BAD_FASTMAP;
+ kfree(ech);
+ goto fail;
+ }
+
+ err = ubi_io_read_ec_hdr(ubi, tmp_aeb->pnum, ech, 0);
+ if (err && err != UBI_IO_BITFLIPS) {
+ ubi_err("unable to read EC header! PEB:%i " \
+ "err:%i", tmp_aeb->pnum, err);
+ ret = err > 0 ? UBI_BAD_FASTMAP : err;
+ kfree(ech);
+
+ goto fail;
+ } else if (err == UBI_IO_BITFLIPS)
+ tmp_aeb->scrub = 1;
+
+ tmp_aeb->ec = be64_to_cpu(ech->ec);
+ assign_aeb_to_av(ai, tmp_aeb, av);
+ }
+
+ kfree(ech);
+ }
+
+ ret = scan_pool(ubi, ai, fmpl1->pebs, pool_size, &max_sqnum,
+ &eba_orphans, &free);
+ if (ret)
+ goto fail;
+
+ ret = scan_pool(ubi, ai, fmpl2->pebs, wl_pool_size, &max_sqnum,
+ &eba_orphans, &free);
+ if (ret)
+ goto fail;
+
+ if (max_sqnum > ai->max_sqnum)
+ ai->max_sqnum = max_sqnum;
+
+ list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) {
+ list_del(&tmp_aeb->u.list);
+ list_add_tail(&tmp_aeb->u.list, &ai->free);
+ }
+
+ /*
+ * If fastmap is leaking PEBs (must not happen), raise a
+ * fat warning and fall back to scanning mode.
+ * We do this here because in ubi_wl_init() it's too late
+ * and we cannot fall back to scanning.
+ */
+ if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count -
+ ai->bad_peb_count - fm->used_blocks))
+ goto fail_bad;
+
+ return 0;
+
+fail_bad:
+ ret = UBI_BAD_FASTMAP;
+fail:
+ return ret;
+}
+
+/**
+ * ubi_scan_fastmap - scan the fastmap.
+ * @ubi: UBI device object
+ * @ai: UBI attach info to be filled
+ * @fm_anchor: The fastmap starts at this PEB
+ *
+ * Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found,
+ * UBI_BAD_FASTMAP if one was found but is not usable.
+ * < 0 indicates an internal error.
+ */
+int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ int fm_anchor)
+{
+ struct ubi_fm_sb *fmsb, *fmsb2;
+ struct ubi_vid_hdr *vh;
+ struct ubi_ec_hdr *ech;
+ struct ubi_fastmap_layout *fm;
+ int i, used_blocks, pnum, ret = 0;
+ size_t fm_size;
+ __be32 crc, tmp_crc;
+ unsigned long long sqnum = 0;
+
+ mutex_lock(&ubi->fm_mutex);
+ memset(ubi->fm_buf, 0, ubi->fm_size);
+
+ fmsb = kmalloc(sizeof(*fmsb), GFP_KERNEL);
+ if (!fmsb) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ fm = kzalloc(sizeof(*fm), GFP_KERNEL);
+ if (!fm) {
+ ret = -ENOMEM;
+ kfree(fmsb);
+ goto out;
+ }
+
+ ret = ubi_io_read(ubi, fmsb, fm_anchor, ubi->leb_start, sizeof(*fmsb));
+ if (ret && ret != UBI_IO_BITFLIPS)
+ goto free_fm_sb;
+ else if (ret == UBI_IO_BITFLIPS)
+ fm->to_be_tortured[0] = 1;
+
+ if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) {
+ ubi_err("bad super block magic: 0x%x, expected: 0x%x",
+ be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC);
+ ret = UBI_BAD_FASTMAP;
+ goto free_fm_sb;
+ }
+
+ if (fmsb->version != UBI_FM_FMT_VERSION) {
+ ubi_err("bad fastmap version: %i, expected: %i",
+ fmsb->version, UBI_FM_FMT_VERSION);
+ ret = UBI_BAD_FASTMAP;
+ goto free_fm_sb;
+ }
+
+ used_blocks = be32_to_cpu(fmsb->used_blocks);
+ if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) {
+ ubi_err("number of fastmap blocks is invalid: %i", used_blocks);
+ ret = UBI_BAD_FASTMAP;
+ goto free_fm_sb;
+ }
+
+ fm_size = ubi->leb_size * used_blocks;
+ if (fm_size != ubi->fm_size) {
+ ubi_err("bad fastmap size: %zi, expected: %zi", fm_size,
+ ubi->fm_size);
+ ret = UBI_BAD_FASTMAP;
+ goto free_fm_sb;
+ }
+
+ ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+ if (!ech) {
+ ret = -ENOMEM;
+ goto free_fm_sb;
+ }
+
+ vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
+ if (!vh) {
+ ret = -ENOMEM;
+ goto free_hdr;
+ }
+
+ for (i = 0; i < used_blocks; i++) {
+ pnum = be32_to_cpu(fmsb->block_loc[i]);
+
+ if (ubi_io_is_bad(ubi, pnum)) {
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ }
+
+ ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
+ if (ret && ret != UBI_IO_BITFLIPS) {
+ ubi_err("unable to read fastmap block# %i EC (PEB: %i)",
+ i, pnum);
+ if (ret > 0)
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ } else if (ret == UBI_IO_BITFLIPS)
+ fm->to_be_tortured[i] = 1;
+
+ if (!ubi->image_seq)
+ ubi->image_seq = be32_to_cpu(ech->image_seq);
+
+ if (be32_to_cpu(ech->image_seq) != ubi->image_seq) {
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ }
+
+ ret = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
+ if (ret && ret != UBI_IO_BITFLIPS) {
+ ubi_err("unable to read fastmap block# %i (PEB: %i)",
+ i, pnum);
+ goto free_hdr;
+ }
+
+ if (i == 0) {
+ if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) {
+ ubi_err("bad fastmap anchor vol_id: 0x%x," \
+ " expected: 0x%x",
+ be32_to_cpu(vh->vol_id),
+ UBI_FM_SB_VOLUME_ID);
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ }
+ } else {
+ if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) {
+ ubi_err("bad fastmap data vol_id: 0x%x," \
+ " expected: 0x%x",
+ be32_to_cpu(vh->vol_id),
+ UBI_FM_DATA_VOLUME_ID);
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ }
+ }
+
+ if (sqnum < be64_to_cpu(vh->sqnum))
+ sqnum = be64_to_cpu(vh->sqnum);
+
+ ret = ubi_io_read(ubi, ubi->fm_buf + (ubi->leb_size * i), pnum,
+ ubi->leb_start, ubi->leb_size);
+ if (ret && ret != UBI_IO_BITFLIPS) {
+ ubi_err("unable to read fastmap block# %i (PEB: %i, " \
+ "err: %i)", i, pnum, ret);
+ goto free_hdr;
+ }
+ }
+
+ kfree(fmsb);
+ fmsb = NULL;
+
+ fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf);
+ tmp_crc = be32_to_cpu(fmsb2->data_crc);
+ fmsb2->data_crc = 0;
+ crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size);
+ if (crc != tmp_crc) {
+ ubi_err("fastmap data CRC is invalid");
+ ubi_err("CRC should be: 0x%x, calc: 0x%x", tmp_crc, crc);
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ }
+
+ fmsb2->sqnum = sqnum;
+
+ fm->used_blocks = used_blocks;
+
+ ret = ubi_attach_fastmap(ubi, ai, fm);
+ if (ret) {
+ if (ret > 0)
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ }
+
+ for (i = 0; i < used_blocks; i++) {
+ struct ubi_wl_entry *e;
+
+ e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
+ if (!e) {
+ while (i--)
+ kfree(fm->e[i]);
+
+ ret = -ENOMEM;
+ goto free_hdr;
+ }
+
+ e->pnum = be32_to_cpu(fmsb2->block_loc[i]);
+ e->ec = be32_to_cpu(fmsb2->block_ec[i]);
+ fm->e[i] = e;
+ }
+
+ ubi->fm = fm;
+ ubi->fm_pool.max_size = ubi->fm->max_pool_size;
+ ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size;
+ ubi_msg("attached by fastmap");
+ ubi_msg("fastmap pool size: %d", ubi->fm_pool.max_size);
+ ubi_msg("fastmap WL pool size: %d", ubi->fm_wl_pool.max_size);
+ ubi->fm_disabled = 0;
+
+ ubi_free_vid_hdr(ubi, vh);
+ kfree(ech);
+out:
+ mutex_unlock(&ubi->fm_mutex);
+ if (ret == UBI_BAD_FASTMAP)
+ ubi_err("Attach by fastmap failed, doing a full scan!");
+ return ret;
+
+free_hdr:
+ ubi_free_vid_hdr(ubi, vh);
+ kfree(ech);
+free_fm_sb:
+ kfree(fmsb);
+ kfree(fm);
+ goto out;
+}
+
+/**
+ * ubi_write_fastmap - writes a fastmap.
+ * @ubi: UBI device object
+ * @new_fm: the to be written fastmap
+ *
+ * Returns 0 on success, < 0 indicates an internal error.
+ */
+static int ubi_write_fastmap(struct ubi_device *ubi,
+ struct ubi_fastmap_layout *new_fm)
+{
+ size_t fm_pos = 0;
+ void *fm_raw;
+ struct ubi_fm_sb *fmsb;
+ struct ubi_fm_hdr *fmh;
+ struct ubi_fm_scan_pool *fmpl1, *fmpl2;
+ struct ubi_fm_ec *fec;
+ struct ubi_fm_volhdr *fvh;
+ struct ubi_fm_eba *feba;
+ struct rb_node *node;
+ struct ubi_wl_entry *wl_e;
+ struct ubi_volume *vol;
+ struct ubi_vid_hdr *avhdr, *dvhdr;
+ struct ubi_work *ubi_wrk;
+ int ret, i, j, free_peb_count, used_peb_count, vol_count;
+ int scrub_peb_count, erase_peb_count;
+
+ fm_raw = ubi->fm_buf;
+ memset(ubi->fm_buf, 0, ubi->fm_size);
+
+ avhdr = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
+ if (!avhdr) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ dvhdr = new_fm_vhdr(ubi, UBI_FM_DATA_VOLUME_ID);
+ if (!dvhdr) {
+ ret = -ENOMEM;
+ goto out_kfree;
+ }
+
+ spin_lock(&ubi->volumes_lock);
+ spin_lock(&ubi->wl_lock);
+
+ fmsb = (struct ubi_fm_sb *)fm_raw;
+ fm_pos += sizeof(*fmsb);
+ ubi_assert(fm_pos <= ubi->fm_size);
+
+ fmh = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmh);
+ ubi_assert(fm_pos <= ubi->fm_size);
+
+ fmsb->magic = cpu_to_be32(UBI_FM_SB_MAGIC);
+ fmsb->version = UBI_FM_FMT_VERSION;
+ fmsb->used_blocks = cpu_to_be32(new_fm->used_blocks);
+ /* the max sqnum will be filled in while *reading* the fastmap */
+ fmsb->sqnum = 0;
+
+ fmh->magic = cpu_to_be32(UBI_FM_HDR_MAGIC);
+ free_peb_count = 0;
+ used_peb_count = 0;
+ scrub_peb_count = 0;
+ erase_peb_count = 0;
+ vol_count = 0;
+
+ fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmpl1);
+ fmpl1->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
+ fmpl1->size = cpu_to_be16(ubi->fm_pool.size);
+ fmpl1->max_size = cpu_to_be16(ubi->fm_pool.max_size);
+
+ for (i = 0; i < ubi->fm_pool.size; i++)
+ fmpl1->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]);
+
+ fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmpl2);
+ fmpl2->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
+ fmpl2->size = cpu_to_be16(ubi->fm_wl_pool.size);
+ fmpl2->max_size = cpu_to_be16(ubi->fm_wl_pool.max_size);
+
+ for (i = 0; i < ubi->fm_wl_pool.size; i++)
+ fmpl2->pebs[i] = cpu_to_be32(ubi->fm_wl_pool.pebs[i]);
+
+ for (node = rb_first(&ubi->free); node; node = rb_next(node)) {
+ wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
+ fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+
+ fec->pnum = cpu_to_be32(wl_e->pnum);
+ fec->ec = cpu_to_be32(wl_e->ec);
+
+ free_peb_count++;
+ fm_pos += sizeof(*fec);
+ ubi_assert(fm_pos <= ubi->fm_size);
+ }
+ fmh->free_peb_count = cpu_to_be32(free_peb_count);
+
+ for (node = rb_first(&ubi->used); node; node = rb_next(node)) {
+ wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
+ fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+
+ fec->pnum = cpu_to_be32(wl_e->pnum);
+ fec->ec = cpu_to_be32(wl_e->ec);
+
+ used_peb_count++;
+ fm_pos += sizeof(*fec);
+ ubi_assert(fm_pos <= ubi->fm_size);
+ }
+ fmh->used_peb_count = cpu_to_be32(used_peb_count);
+
+ for (node = rb_first(&ubi->scrub); node; node = rb_next(node)) {
+ wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
+ fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+
+ fec->pnum = cpu_to_be32(wl_e->pnum);
+ fec->ec = cpu_to_be32(wl_e->ec);
+
+ scrub_peb_count++;
+ fm_pos += sizeof(*fec);
+ ubi_assert(fm_pos <= ubi->fm_size);
+ }
+ fmh->scrub_peb_count = cpu_to_be32(scrub_peb_count);
+
+
+ list_for_each_entry(ubi_wrk, &ubi->works, list) {
+ if (ubi_is_erase_work(ubi_wrk)) {
+ wl_e = ubi_wrk->e;
+ ubi_assert(wl_e);
+
+ fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+
+ fec->pnum = cpu_to_be32(wl_e->pnum);
+ fec->ec = cpu_to_be32(wl_e->ec);
+
+ erase_peb_count++;
+ fm_pos += sizeof(*fec);
+ ubi_assert(fm_pos <= ubi->fm_size);
+ }
+ }
+ fmh->erase_peb_count = cpu_to_be32(erase_peb_count);
+
+ for (i = 0; i < UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT; i++) {
+ vol = ubi->volumes[i];
+
+ if (!vol)
+ continue;
+
+ vol_count++;
+
+ fvh = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fvh);
+ ubi_assert(fm_pos <= ubi->fm_size);
+
+ fvh->magic = cpu_to_be32(UBI_FM_VHDR_MAGIC);
+ fvh->vol_id = cpu_to_be32(vol->vol_id);
+ fvh->vol_type = vol->vol_type;
+ fvh->used_ebs = cpu_to_be32(vol->used_ebs);
+ fvh->data_pad = cpu_to_be32(vol->data_pad);
+ fvh->last_eb_bytes = cpu_to_be32(vol->last_eb_bytes);
+
+ ubi_assert(vol->vol_type == UBI_DYNAMIC_VOLUME ||
+ vol->vol_type == UBI_STATIC_VOLUME);
+
+ feba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*feba) + (sizeof(__be32) * vol->reserved_pebs);
+ ubi_assert(fm_pos <= ubi->fm_size);
+
+ for (j = 0; j < vol->reserved_pebs; j++)
+ feba->pnum[j] = cpu_to_be32(vol->eba_tbl[j]);
+
+ feba->reserved_pebs = cpu_to_be32(j);
+ feba->magic = cpu_to_be32(UBI_FM_EBA_MAGIC);
+ }
+ fmh->vol_count = cpu_to_be32(vol_count);
+ fmh->bad_peb_count = cpu_to_be32(ubi->bad_peb_count);
+
+ avhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+ avhdr->lnum = 0;
+
+ spin_unlock(&ubi->wl_lock);
+ spin_unlock(&ubi->volumes_lock);
+
+ dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum);
+ ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avhdr);
+ if (ret) {
+ ubi_err("unable to write vid_hdr to fastmap SB!");
+ goto out_kfree;
+ }
+
+ for (i = 0; i < new_fm->used_blocks; i++) {
+ fmsb->block_loc[i] = cpu_to_be32(new_fm->e[i]->pnum);
+ fmsb->block_ec[i] = cpu_to_be32(new_fm->e[i]->ec);
+ }
+
+ fmsb->data_crc = 0;
+ fmsb->data_crc = cpu_to_be32(crc32(UBI_CRC32_INIT, fm_raw,
+ ubi->fm_size));
+
+ for (i = 1; i < new_fm->used_blocks; i++) {
+ dvhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+ dvhdr->lnum = cpu_to_be32(i);
+ dbg_bld("writing fastmap data to PEB %i sqnum %llu",
+ new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum));
+ ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvhdr);
+ if (ret) {
+ ubi_err("unable to write vid_hdr to PEB %i!",
+ new_fm->e[i]->pnum);
+ goto out_kfree;
+ }
+ }
+
+ for (i = 0; i < new_fm->used_blocks; i++) {
+ ret = ubi_io_write(ubi, fm_raw + (i * ubi->leb_size),
+ new_fm->e[i]->pnum, ubi->leb_start, ubi->leb_size);
+ if (ret) {
+ ubi_err("unable to write fastmap to PEB %i!",
+ new_fm->e[i]->pnum);
+ goto out_kfree;
+ }
+ }
+
+ ubi_assert(new_fm);
+ ubi->fm = new_fm;
+
+ dbg_bld("fastmap written!");
+
+out_kfree:
+ ubi_free_vid_hdr(ubi, avhdr);
+ ubi_free_vid_hdr(ubi, dvhdr);
+out:
+ return ret;
+}
+
+/**
+ * erase_block - Manually erase a PEB.
+ * @ubi: UBI device object
+ * @pnum: PEB to be erased
+ *
+ * Returns the new EC value on success, < 0 indicates an internal error.
+ */
+static int erase_block(struct ubi_device *ubi, int pnum)
+{
+ int ret;
+ struct ubi_ec_hdr *ec_hdr;
+ long long ec;
+
+ ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+ if (!ec_hdr)
+ return -ENOMEM;
+
+ ret = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
+ if (ret < 0)
+ goto out;
+ else if (ret && ret != UBI_IO_BITFLIPS) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = ubi_io_sync_erase(ubi, pnum, 0);
+ if (ret < 0)
+ goto out;
+
+ ec = be64_to_cpu(ec_hdr->ec);
+ ec += ret;
+ if (ec > UBI_MAX_ERASECOUNTER) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ec_hdr->ec = cpu_to_be64(ec);
+ ret = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
+ if (ret < 0)
+ goto out;
+
+ ret = ec;
+out:
+ kfree(ec_hdr);
+ return ret;
+}
+
+/**
+ * invalidate_fastmap - destroys a fastmap.
+ * @ubi: UBI device object
+ * @fm: the fastmap to be destroyed
+ *
+ * Returns 0 on success, < 0 indicates an internal error.
+ */
+static int invalidate_fastmap(struct ubi_device *ubi,
+ struct ubi_fastmap_layout *fm)
+{
+ int ret, i;
+ struct ubi_vid_hdr *vh;
+
+ ret = erase_block(ubi, fm->e[0]->pnum);
+ if (ret < 0)
+ return ret;
+
+ vh = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
+ if (!vh)
+ return -ENOMEM;
+
+ /* deleting the current fastmap SB is not enough, an old SB may exist,
+ * so create a (corrupted) SB such that fastmap will find it and fall
+ * back to scanning mode in any case */
+ vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+ ret = ubi_io_write_vid_hdr(ubi, fm->e[0]->pnum, vh);
+
+ for (i = 0; i < fm->used_blocks; i++)
+ ubi_wl_put_fm_peb(ubi, fm->e[i], i, fm->to_be_tortured[i]);
+
+ return ret;
+}
+
+/**
+ * ubi_update_fastmap - will be called by UBI if a volume changes or
+ * a fastmap pool becomes full.
+ * @ubi: UBI device object
+ *
+ * Returns 0 on success, < 0 indicates an internal error.
+ */
+int ubi_update_fastmap(struct ubi_device *ubi)
+{
+ int ret, i;
+ struct ubi_fastmap_layout *new_fm, *old_fm;
+ struct ubi_wl_entry *tmp_e;
+
+ mutex_lock(&ubi->fm_mutex);
+
+ ubi_refill_pools(ubi);
+
+ if (ubi->ro_mode || ubi->fm_disabled) {
+ mutex_unlock(&ubi->fm_mutex);
+ return 0;
+ }
+
+ ret = ubi_ensure_anchor_pebs(ubi);
+ if (ret) {
+ mutex_unlock(&ubi->fm_mutex);
+ return ret;
+ }
+
+ new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
+ if (!new_fm) {
+ mutex_unlock(&ubi->fm_mutex);
+ return -ENOMEM;
+ }
+
+ new_fm->used_blocks = ubi->fm_size / ubi->leb_size;
+
+ for (i = 0; i < new_fm->used_blocks; i++) {
+ new_fm->e[i] = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
+ if (!new_fm->e[i]) {
+ while (i--)
+ kfree(new_fm->e[i]);
+
+ kfree(new_fm);
+ mutex_unlock(&ubi->fm_mutex);
+ return -ENOMEM;
+ }
+ }
+
+ old_fm = ubi->fm;
+ ubi->fm = NULL;
+
+ if (new_fm->used_blocks > UBI_FM_MAX_BLOCKS) {
+ ubi_err("fastmap too large");
+ ret = -ENOSPC;
+ goto err;
+ }
+
+ for (i = 1; i < new_fm->used_blocks; i++) {
+ spin_lock(&ubi->wl_lock);
+ tmp_e = ubi_wl_get_fm_peb(ubi, 0);
+ spin_unlock(&ubi->wl_lock);
+
+ if (!tmp_e && !old_fm) {
+ int j;
+ ubi_err("could not get any free erase block");
+
+ for (j = 1; j < i; j++)
+ ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0);
+
+ ret = -ENOSPC;
+ goto err;
+ } else if (!tmp_e && old_fm) {
+ ret = erase_block(ubi, old_fm->e[i]->pnum);
+ if (ret < 0) {
+ int j;
+
+ for (j = 1; j < i; j++)
+ ubi_wl_put_fm_peb(ubi, new_fm->e[j],
+ j, 0);
+
+ ubi_err("could not erase old fastmap PEB");
+ goto err;
+ }
+
+ new_fm->e[i]->pnum = old_fm->e[i]->pnum;
+ new_fm->e[i]->ec = old_fm->e[i]->ec;
+ } else {
+ new_fm->e[i]->pnum = tmp_e->pnum;
+ new_fm->e[i]->ec = tmp_e->ec;
+
+ if (old_fm)
+ ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
+ old_fm->to_be_tortured[i]);
+ }
+ }
+
+ spin_lock(&ubi->wl_lock);
+ tmp_e = ubi_wl_get_fm_peb(ubi, 1);
+ spin_unlock(&ubi->wl_lock);
+
+ if (old_fm) {
+ /* no fresh anchor PEB was found, reuse the old one */
+ if (!tmp_e) {
+ ret = erase_block(ubi, old_fm->e[0]->pnum);
+ if (ret < 0) {
+ int i;
+ ubi_err("could not erase old anchor PEB");
+
+ for (i = 1; i < new_fm->used_blocks; i++)
+ ubi_wl_put_fm_peb(ubi, new_fm->e[i],
+ i, 0);
+ goto err;
+ }
+
+ new_fm->e[0]->pnum = old_fm->e[0]->pnum;
+ new_fm->e[0]->ec = ret;
+ } else {
+ /* we've got a new anchor PEB, return the old one */
+ ubi_wl_put_fm_peb(ubi, old_fm->e[0], 0,
+ old_fm->to_be_tortured[0]);
+
+ new_fm->e[0]->pnum = tmp_e->pnum;
+ new_fm->e[0]->ec = tmp_e->ec;
+ }
+ } else {
+ if (!tmp_e) {
+ int i;
+ ubi_err("could not find any anchor PEB");
+
+ for (i = 1; i < new_fm->used_blocks; i++)
+ ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0);
+
+ ret = -ENOSPC;
+ goto err;
+ }
+
+ new_fm->e[0]->pnum = tmp_e->pnum;
+ new_fm->e[0]->ec = tmp_e->ec;
+ }
+
+ down_write(&ubi->work_sem);
+ down_write(&ubi->fm_sem);
+ ret = ubi_write_fastmap(ubi, new_fm);
+ up_write(&ubi->fm_sem);
+ up_write(&ubi->work_sem);
+
+ if (ret)
+ goto err;
+
+out_unlock:
+ mutex_unlock(&ubi->fm_mutex);
+ kfree(old_fm);
+ return ret;
+
+err:
+ kfree(new_fm);
+
+ ubi_warn("Unable to write new fastmap, err=%i", ret);
+
+ ret = 0;
+ if (old_fm) {
+ ret = invalidate_fastmap(ubi, old_fm);
+ if (ret < 0)
+ ubi_err("Unable to invalidiate current fastmap!");
+ else if (ret)
+ ret = 0;
+ }
+ goto out_unlock;
+}
diff --git a/drivers/mtd/ubi/ubi-media.h b/drivers/mtd/ubi/ubi-media.h
index 468ffbc..ac2b24d 100644
--- a/drivers/mtd/ubi/ubi-media.h
+++ b/drivers/mtd/ubi/ubi-media.h
@@ -375,4 +375,141 @@ struct ubi_vtbl_record {
__be32 crc;
} __packed;
+/* UBI fastmap on-flash data structures */
+
+#define UBI_FM_SB_VOLUME_ID (UBI_LAYOUT_VOLUME_ID + 1)
+#define UBI_FM_DATA_VOLUME_ID (UBI_LAYOUT_VOLUME_ID + 2)
+
+/* fastmap on-flash data structure format version */
+#define UBI_FM_FMT_VERSION 1
+
+#define UBI_FM_SB_MAGIC 0x7B11D69F
+#define UBI_FM_HDR_MAGIC 0xD4B82EF7
+#define UBI_FM_VHDR_MAGIC 0xFA370ED1
+#define UBI_FM_POOL_MAGIC 0x67AF4D08
+#define UBI_FM_EBA_MAGIC 0xf0c040a8
+
+/* A fastmap supber block can be located between PEB 0 and
+ * UBI_FM_MAX_START */
+#define UBI_FM_MAX_START 64
+
+/* A fastmap can use up to UBI_FM_MAX_BLOCKS PEBs */
+#define UBI_FM_MAX_BLOCKS 32
+
+/* 5% of the total number of PEBs have to be scanned while attaching
+ * from a fastmap.
+ * But the size of this pool is limited to be between UBI_FM_MIN_POOL_SIZE and
+ * UBI_FM_MAX_POOL_SIZE */
+#define UBI_FM_MIN_POOL_SIZE 8
+#define UBI_FM_MAX_POOL_SIZE 256
+
+#define UBI_FM_WL_POOL_SIZE 25
+
+/**
+ * struct ubi_fm_sb - UBI fastmap super block
+ * @magic: fastmap super block magic number (%UBI_FM_SB_MAGIC)
+ * @version: format version of this fastmap
+ * @data_crc: CRC over the fastmap data
+ * @used_blocks: number of PEBs used by this fastmap
+ * @block_loc: an array containing the location of all PEBs of the fastmap
+ * @block_ec: the erase counter of each used PEB
+ * @sqnum: highest sequence number value at the time while taking the fastmap
+ *
+ */
+struct ubi_fm_sb {
+ __be32 magic;
+ __u8 version;
+ __u8 padding1[3];
+ __be32 data_crc;
+ __be32 used_blocks;
+ __be32 block_loc[UBI_FM_MAX_BLOCKS];
+ __be32 block_ec[UBI_FM_MAX_BLOCKS];
+ __be64 sqnum;
+ __u8 padding2[32];
+} __packed;
+
+/**
+ * struct ubi_fm_hdr - header of the fastmap data set
+ * @magic: fastmap header magic number (%UBI_FM_HDR_MAGIC)
+ * @free_peb_count: number of free PEBs known by this fastmap
+ * @used_peb_count: number of used PEBs known by this fastmap
+ * @scrub_peb_count: number of to be scrubbed PEBs known by this fastmap
+ * @bad_peb_count: number of bad PEBs known by this fastmap
+ * @erase_peb_count: number of bad PEBs which have to be erased
+ * @vol_count: number of UBI volumes known by this fastmap
+ */
+struct ubi_fm_hdr {
+ __be32 magic;
+ __be32 free_peb_count;
+ __be32 used_peb_count;
+ __be32 scrub_peb_count;
+ __be32 bad_peb_count;
+ __be32 erase_peb_count;
+ __be32 vol_count;
+ __u8 padding[4];
+} __packed;
+
+/* struct ubi_fm_hdr is followed by two struct ubi_fm_scan_pool */
+
+/**
+ * struct ubi_fm_scan_pool - Fastmap pool PEBs to be scanned while attaching
+ * @magic: pool magic numer (%UBI_FM_POOL_MAGIC)
+ * @size: current pool size
+ * @max_size: maximal pool size
+ * @pebs: an array containing the location of all PEBs in this pool
+ */
+struct ubi_fm_scan_pool {
+ __be32 magic;
+ __be16 size;
+ __be16 max_size;
+ __be32 pebs[UBI_FM_MAX_POOL_SIZE];
+ __be32 padding[4];
+} __packed;
+
+/* ubi_fm_scan_pool is followed by nfree+nused struct ubi_fm_ec records */
+
+/**
+ * struct ubi_fm_ec - stores the erase counter of a PEB
+ * @pnum: PEB number
+ * @ec: ec of this PEB
+ */
+struct ubi_fm_ec {
+ __be32 pnum;
+ __be32 ec;
+} __packed;
+
+/**
+ * struct ubi_fm_volhdr - Fastmap volume header
+ * it identifies the start of an eba table
+ * @magic: Fastmap volume header magic number (%UBI_FM_VHDR_MAGIC)
+ * @vol_id: volume id of the fastmapped volume
+ * @vol_type: type of the fastmapped volume
+ * @data_pad: data_pad value of the fastmapped volume
+ * @used_ebs: number of used LEBs within this volume
+ * @last_eb_bytes: number of bytes used in the last LEB
+ */
+struct ubi_fm_volhdr {
+ __be32 magic;
+ __be32 vol_id;
+ __u8 vol_type;
+ __u8 padding1[3];
+ __be32 data_pad;
+ __be32 used_ebs;
+ __be32 last_eb_bytes;
+ __u8 padding2[8];
+} __packed;
+
+/* struct ubi_fm_volhdr is followed by one struct ubi_fm_eba records */
+
+/**
+ * struct ubi_fm_eba - denotes an association beween a PEB and LEB
+ * @magic: EBA table magic number
+ * @reserved_pebs: number of table entries
+ * @pnum: PEB number of LEB (LEB is the index)
+ */
+struct ubi_fm_eba {
+ __be32 magic;
+ __be32 reserved_pebs;
+ __be32 pnum[0];
+} __packed;
#endif /* !__UBI_MEDIA_H__ */
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 383ee43..7d57469 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -133,6 +133,17 @@ enum {
MOVE_RETRY,
};
+/*
+ * Return codes of the fastmap sub-system
+ *
+ * UBI_NO_FASTMAP: No fastmap super block was found
+ * UBI_BAD_FASTMAP: A fastmap was found but it's unusable
+ */
+enum {
+ UBI_NO_FASTMAP = 1,
+ UBI_BAD_FASTMAP,
+};
+
/**
* struct ubi_wl_entry - wear-leveling entry.
* @u.rb: link in the corresponding (free/used) RB-tree
@@ -199,6 +210,41 @@ struct ubi_rename_entry {
struct ubi_volume_desc;
/**
+ * struct ubi_fastmap_layout - in-memory fastmap data structure.
+ * @e: PEBs used by the current fastmap
+ * @to_be_tortured: if non-zero tortured this PEB
+ * @used_blocks: number of used PEBs
+ * @max_pool_size: maximal size of the user pool
+ * @max_wl_pool_size: maximal size of the pool used by the WL sub-system
+ */
+struct ubi_fastmap_layout {
+ struct ubi_wl_entry *e[UBI_FM_MAX_BLOCKS];
+ int to_be_tortured[UBI_FM_MAX_BLOCKS];
+ int used_blocks;
+ int max_pool_size;
+ int max_wl_pool_size;
+};
+
+/**
+ * struct ubi_fm_pool - in-memory fastmap pool
+ * @pebs: PEBs in this pool
+ * @used: number of used PEBs
+ * @size: total number of PEBs in this pool
+ * @max_size: maximal size of the pool
+ *
+ * A pool gets filled with up to max_size.
+ * If all PEBs within the pool are used a new fastmap will be written
+ * to the flash and the pool gets refilled with empty PEBs.
+ *
+ */
+struct ubi_fm_pool {
+ int pebs[UBI_FM_MAX_POOL_SIZE];
+ int used;
+ int size;
+ int max_size;
+};
+
+/**
* struct ubi_volume - UBI volume description data structure.
* @dev: device object to make use of the the Linux device model
* @cdev: character device object to create character device
@@ -333,9 +379,21 @@ struct ubi_wl_entry;
* @ltree: the lock tree
* @alc_mutex: serializes "atomic LEB change" operations
*
+ * @fm_disabled: non-zero if fastmap is disabled (default)
+ * @fm: in-memory data structure of the currently used fastmap
+ * @fm_pool: in-memory data structure of the fastmap pool
+ * @fm_wl_pool: in-memory data structure of the fastmap pool used by the WL
+ * sub-system
+ * @fm_mutex: serializes ubi_update_fastmap() and protects @fm_buf
+ * @fm_buf: vmalloc()'d buffer which holds the raw fastmap
+ * @fm_size: fastmap size in bytes
+ * @fm_sem: allows ubi_update_fastmap() to block EBA table changes
+ * @fm_work: fastmap work queue
+ *
* @used: RB-tree of used physical eraseblocks
* @erroneous: RB-tree of erroneous used physical eraseblocks
* @free: RB-tree of free physical eraseblocks
+ * @free_count: Contains the number of elements in @free
* @scrub: RB-tree of physical eraseblocks which need scrubbing
* @pq: protection queue (contain physical eraseblocks which are temporarily
* protected from the wear-leveling worker)
@@ -426,10 +484,22 @@ struct ubi_device {
struct rb_root ltree;
struct mutex alc_mutex;
+ /* Fastmap stuff */
+ int fm_disabled;
+ struct ubi_fastmap_layout *fm;
+ struct ubi_fm_pool fm_pool;
+ struct ubi_fm_pool fm_wl_pool;
+ struct rw_semaphore fm_sem;
+ struct mutex fm_mutex;
+ void *fm_buf;
+ size_t fm_size;
+ struct work_struct fm_work;
+
/* Wear-leveling sub-system's stuff */
struct rb_root used;
struct rb_root erroneous;
struct rb_root free;
+ int free_count;
struct rb_root scrub;
struct list_head pq[UBI_PROT_QUEUE_LEN];
int pq_head;
@@ -596,6 +666,32 @@ struct ubi_attach_info {
struct kmem_cache *aeb_slab_cache;
};
+/**
+ * struct ubi_work - UBI work description data structure.
+ * @list: a link in the list of pending works
+ * @func: worker function
+ * @e: physical eraseblock to erase
+ * @vol_id: the volume ID on which this erasure is being performed
+ * @lnum: the logical eraseblock number
+ * @torture: if the physical eraseblock has to be tortured
+ * @anchor: produce a anchor PEB to by used by fastmap
+ *
+ * The @func pointer points to the worker function. If the @cancel argument is
+ * not zero, the worker has to free the resources and exit immediately. The
+ * worker has to return zero in case of success and a negative error code in
+ * case of failure.
+ */
+struct ubi_work {
+ struct list_head list;
+ int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int cancel);
+ /* The below fields are only relevant to erasure works */
+ struct ubi_wl_entry *e;
+ int vol_id;
+ int lnum;
+ int torture;
+ int anchor;
+};
+
#include "debug.h"
extern struct kmem_cache *ubi_wl_entry_slab;
@@ -606,7 +702,7 @@ extern struct class *ubi_class;
extern struct mutex ubi_devices_mutex;
extern struct blocking_notifier_head ubi_notifiers;
-/* scan.c */
+/* attach.c */
int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum,
int ec, const struct ubi_vid_hdr *vid_hdr, int bitflips);
struct ubi_ainf_volume *ubi_find_av(const struct ubi_attach_info *ai,
@@ -614,7 +710,7 @@ struct ubi_ainf_volume *ubi_find_av(const struct ubi_attach_info *ai,
void ubi_remove_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av);
struct ubi_ainf_peb *ubi_early_get_peb(struct ubi_device *ubi,
struct ubi_attach_info *ai);
-int ubi_attach(struct ubi_device *ubi);
+int ubi_attach(struct ubi_device *ubi, int force_scan);
void ubi_destroy_ai(struct ubi_attach_info *ai);
/* vtbl.c */
@@ -664,6 +760,9 @@ int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
struct ubi_vid_hdr *vid_hdr);
int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai);
+unsigned long long ubi_next_sqnum(struct ubi_device *ubi);
+int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap,
+ struct ubi_attach_info *ai_scan);
/* wl.c */
int ubi_wl_get_peb(struct ubi_device *ubi);
@@ -674,6 +773,12 @@ int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum);
int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai);
void ubi_wl_close(struct ubi_device *ubi);
int ubi_thread(void *u);
+struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor);
+int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *used_e,
+ int lnum, int torture);
+int ubi_is_erase_work(struct ubi_work *wrk);
+void ubi_refill_pools(struct ubi_device *ubi);
+int ubi_ensure_anchor_pebs(struct ubi_device *ubi);
/* io.c */
int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
@@ -711,6 +816,15 @@ void ubi_free_internal_volumes(struct ubi_device *ubi);
void ubi_do_get_device_info(struct ubi_device *ubi, struct ubi_device_info *di);
void ubi_do_get_volume_info(struct ubi_device *ubi, struct ubi_volume *vol,
struct ubi_volume_info *vi);
+/* scan.c */
+int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
+ int pnum, const struct ubi_vid_hdr *vid_hdr);
+
+/* fastmap.c */
+size_t ubi_calc_fm_size(struct ubi_device *ubi);
+int ubi_update_fastmap(struct ubi_device *ubi);
+int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ int fm_anchor);
/*
* ubi_rb_for_each_entry - walk an RB-tree.
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 032fc57..da7b449 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -135,36 +135,48 @@
*/
#define WL_MAX_FAILURES 32
-/**
- * struct ubi_work - UBI work description data structure.
- * @list: a link in the list of pending works
- * @func: worker function
- * @e: physical eraseblock to erase
- * @vol_id: the volume ID on which this erasure is being performed
- * @lnum: the logical eraseblock number
- * @torture: if the physical eraseblock has to be tortured
- *
- * The @func pointer points to the worker function. If the @cancel argument is
- * not zero, the worker has to free the resources and exit immediately. The
- * worker has to return zero in case of success and a negative error code in
- * case of failure.
- */
-struct ubi_work {
- struct list_head list;
- int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int cancel);
- /* The below fields are only relevant to erasure works */
- struct ubi_wl_entry *e;
- int vol_id;
- int lnum;
- int torture;
-};
-
static int self_check_ec(struct ubi_device *ubi, int pnum, int ec);
static int self_check_in_wl_tree(const struct ubi_device *ubi,
struct ubi_wl_entry *e, struct rb_root *root);
static int self_check_in_pq(const struct ubi_device *ubi,
struct ubi_wl_entry *e);
+#ifdef CONFIG_MTD_UBI_FASTMAP
+/**
+ * update_fastmap_work_fn - calls ubi_update_fastmap from a work queue
+ * @wrk: the work description object
+ */
+static void update_fastmap_work_fn(struct work_struct *wrk)
+{
+ struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work);
+ ubi_update_fastmap(ubi);
+}
+
+/**
+ * ubi_ubi_is_fm_block - returns 1 if a PEB is currently used in a fastmap.
+ * @ubi: UBI device description object
+ * @pnum: the to be checked PEB
+ */
+static int ubi_is_fm_block(struct ubi_device *ubi, int pnum)
+{
+ int i;
+
+ if (!ubi->fm)
+ return 0;
+
+ for (i = 0; i < ubi->fm->used_blocks; i++)
+ if (ubi->fm->e[i]->pnum == pnum)
+ return 1;
+
+ return 0;
+}
+#else
+static int ubi_is_fm_block(struct ubi_device *ubi, int pnum)
+{
+ return 0;
+}
+#endif
+
/**
* wl_tree_add - add a wear-leveling entry to a WL RB-tree.
* @e: the wear-leveling entry to add
@@ -261,18 +273,16 @@ static int produce_free_peb(struct ubi_device *ubi)
{
int err;
- spin_lock(&ubi->wl_lock);
while (!ubi->free.rb_node) {
spin_unlock(&ubi->wl_lock);
dbg_wl("do one work synchronously");
err = do_work(ubi);
- if (err)
- return err;
spin_lock(&ubi->wl_lock);
+ if (err)
+ return err;
}
- spin_unlock(&ubi->wl_lock);
return 0;
}
@@ -339,16 +349,18 @@ static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
/**
* find_wl_entry - find wear-leveling entry closest to certain erase counter.
+ * @ubi: UBI device description object
* @root: the RB-tree where to look for
* @diff: maximum possible difference from the smallest erase counter
*
* This function looks for a wear leveling entry with erase counter closest to
* min + @diff, where min is the smallest erase counter.
*/
-static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int diff)
+static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi,
+ struct rb_root *root, int diff)
{
struct rb_node *p;
- struct ubi_wl_entry *e;
+ struct ubi_wl_entry *e, *prev_e = NULL;
int max;
e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
@@ -363,35 +375,143 @@ static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int diff)
p = p->rb_left;
else {
p = p->rb_right;
+ prev_e = e;
e = e1;
}
}
+ /* If no fastmap has been written and this WL entry can be used
+ * as anchor PEB, hold it back and return the second best WL entry
+ * such that fastmap can use the anchor PEB later. */
+ if (prev_e && !ubi->fm_disabled &&
+ !ubi->fm && e->pnum < UBI_FM_MAX_START)
+ return prev_e;
+
return e;
}
/**
- * ubi_wl_get_peb - get a physical eraseblock.
+ * find_mean_wl_entry - find wear-leveling entry with medium erase counter.
+ * @ubi: UBI device description object
+ * @root: the RB-tree where to look for
+ *
+ * This function looks for a wear leveling entry with medium erase counter,
+ * but not greater or equivalent than the lowest erase counter plus
+ * %WL_FREE_MAX_DIFF/2.
+ */
+static struct ubi_wl_entry *find_mean_wl_entry(struct ubi_device *ubi,
+ struct rb_root *root)
+{
+ struct ubi_wl_entry *e, *first, *last;
+
+ first = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
+ last = rb_entry(rb_last(root), struct ubi_wl_entry, u.rb);
+
+ if (last->ec - first->ec < WL_FREE_MAX_DIFF) {
+ e = rb_entry(root->rb_node, struct ubi_wl_entry, u.rb);
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ /* If no fastmap has been written and this WL entry can be used
+ * as anchor PEB, hold it back and return the second best
+ * WL entry such that fastmap can use the anchor PEB later. */
+ if (e && !ubi->fm_disabled && !ubi->fm &&
+ e->pnum < UBI_FM_MAX_START)
+ e = rb_entry(rb_next(root->rb_node),
+ struct ubi_wl_entry, u.rb);
+#endif
+ } else
+ e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2);
+
+ return e;
+}
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+/**
+ * find_anchor_wl_entry - find wear-leveling entry to used as anchor PEB.
+ * @root: the RB-tree where to look for
+ */
+static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root)
+{
+ struct rb_node *p;
+ struct ubi_wl_entry *e, *victim = NULL;
+ int max_ec = UBI_MAX_ERASECOUNTER;
+
+ ubi_rb_for_each_entry(p, e, root, u.rb) {
+ if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) {
+ victim = e;
+ max_ec = e->ec;
+ }
+ }
+
+ return victim;
+}
+
+static int anchor_pebs_avalible(struct rb_root *root)
+{
+ struct rb_node *p;
+ struct ubi_wl_entry *e;
+
+ ubi_rb_for_each_entry(p, e, root, u.rb)
+ if (e->pnum < UBI_FM_MAX_START)
+ return 1;
+
+ return 0;
+}
+
+/**
+ * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number.
+ * @ubi: UBI device description object
+ * @anchor: This PEB will be used as anchor PEB by fastmap
+ *
+ * The function returns a physical erase block with a given maximal number
+ * and removes it from the wl subsystem.
+ * Must be called with wl_lock held!
+ */
+struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
+{
+ struct ubi_wl_entry *e = NULL;
+
+ if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1))
+ goto out;
+
+ if (anchor)
+ e = find_anchor_wl_entry(&ubi->free);
+ else
+ e = find_mean_wl_entry(ubi, &ubi->free);
+
+ if (!e)
+ goto out;
+
+ self_check_in_wl_tree(ubi, e, &ubi->free);
+
+ /* remove it from the free list,
+ * the wl subsystem does no longer know this erase block */
+ rb_erase(&e->u.rb, &ubi->free);
+ ubi->free_count--;
+out:
+ return e;
+}
+#endif
+
+/**
+ * __wl_get_peb - get a physical eraseblock.
* @ubi: UBI device description object
*
* This function returns a physical eraseblock in case of success and a
* negative error code in case of failure. Might sleep.
*/
-int ubi_wl_get_peb(struct ubi_device *ubi)
+static int __wl_get_peb(struct ubi_device *ubi)
{
int err;
- struct ubi_wl_entry *e, *first, *last;
+ struct ubi_wl_entry *e;
retry:
- spin_lock(&ubi->wl_lock);
if (!ubi->free.rb_node) {
if (ubi->works_count == 0) {
- ubi_assert(list_empty(&ubi->works));
ubi_err("no free eraseblocks");
- spin_unlock(&ubi->wl_lock);
+ ubi_assert(list_empty(&ubi->works));
return -ENOSPC;
}
- spin_unlock(&ubi->wl_lock);
err = produce_free_peb(ubi);
if (err < 0)
@@ -399,13 +519,11 @@ retry:
goto retry;
}
- first = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, u.rb);
- last = rb_entry(rb_last(&ubi->free), struct ubi_wl_entry, u.rb);
-
- if (last->ec - first->ec < WL_FREE_MAX_DIFF)
- e = rb_entry(ubi->free.rb_node, struct ubi_wl_entry, u.rb);
- else
- e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF/2);
+ e = find_mean_wl_entry(ubi, &ubi->free);
+ if (!e) {
+ ubi_err("no free eraseblocks");
+ return -ENOSPC;
+ }
self_check_in_wl_tree(ubi, e, &ubi->free);
@@ -414,10 +532,14 @@ retry:
* be protected from being moved for some time.
*/
rb_erase(&e->u.rb, &ubi->free);
+ ubi->free_count--;
dbg_wl("PEB %d EC %d", e->pnum, e->ec);
+#ifndef CONFIG_MTD_UBI_FASTMAP
+ /* We have to enqueue e only if fastmap is disabled,
+ * is fastmap enabled prot_queue_add() will be called by
+ * ubi_wl_get_peb() after removing e from the pool. */
prot_queue_add(ubi, e);
- spin_unlock(&ubi->wl_lock);
-
+#endif
err = ubi_self_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset,
ubi->peb_size - ubi->vid_hdr_aloffset);
if (err) {
@@ -428,6 +550,150 @@ retry:
return e->pnum;
}
+#ifdef CONFIG_MTD_UBI_FASTMAP
+/**
+ * return_unused_pool_pebs - returns unused PEB to the free tree.
+ * @ubi: UBI device description object
+ * @pool: fastmap pool description object
+ */
+static void return_unused_pool_pebs(struct ubi_device *ubi,
+ struct ubi_fm_pool *pool)
+{
+ int i;
+ struct ubi_wl_entry *e;
+
+ for (i = pool->used; i < pool->size; i++) {
+ e = ubi->lookuptbl[pool->pebs[i]];
+ wl_tree_add(e, &ubi->free);
+ ubi->free_count++;
+ }
+}
+
+/**
+ * refill_wl_pool - refills all the fastmap pool used by the
+ * WL sub-system.
+ * @ubi: UBI device description object
+ */
+static void refill_wl_pool(struct ubi_device *ubi)
+{
+ struct ubi_wl_entry *e;
+ struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
+
+ return_unused_pool_pebs(ubi, pool);
+
+ for (pool->size = 0; pool->size < pool->max_size; pool->size++) {
+ if (!ubi->free.rb_node ||
+ (ubi->free_count - ubi->beb_rsvd_pebs < 5))
+ break;
+
+ e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
+ self_check_in_wl_tree(ubi, e, &ubi->free);
+ rb_erase(&e->u.rb, &ubi->free);
+ ubi->free_count--;
+
+ pool->pebs[pool->size] = e->pnum;
+ }
+ pool->used = 0;
+}
+
+/**
+ * refill_wl_user_pool - refills all the fastmap pool used by ubi_wl_get_peb.
+ * @ubi: UBI device description object
+ */
+static void refill_wl_user_pool(struct ubi_device *ubi)
+{
+ struct ubi_fm_pool *pool = &ubi->fm_pool;
+
+ return_unused_pool_pebs(ubi, pool);
+
+ for (pool->size = 0; pool->size < pool->max_size; pool->size++) {
+ if (!ubi->free.rb_node ||
+ (ubi->free_count - ubi->beb_rsvd_pebs < 1))
+ break;
+
+ pool->pebs[pool->size] = __wl_get_peb(ubi);
+ if (pool->pebs[pool->size] < 0)
+ break;
+ }
+ pool->used = 0;
+}
+
+/**
+ * ubi_refill_pools - refills all fastmap PEB pools.
+ * @ubi: UBI device description object
+ */
+void ubi_refill_pools(struct ubi_device *ubi)
+{
+ spin_lock(&ubi->wl_lock);
+ refill_wl_pool(ubi);
+ refill_wl_user_pool(ubi);
+ spin_unlock(&ubi->wl_lock);
+}
+
+/* ubi_wl_get_peb - works exaclty like __wl_get_peb but keeps track of
+ * the fastmap pool.
+ */
+int ubi_wl_get_peb(struct ubi_device *ubi)
+{
+ int ret;
+ struct ubi_fm_pool *pool = &ubi->fm_pool;
+ struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
+
+ if (!pool->size || !wl_pool->size || pool->used == pool->size ||
+ wl_pool->used == wl_pool->size)
+ ubi_update_fastmap(ubi);
+
+ /* we got not a single free PEB */
+ if (!pool->size)
+ ret = -ENOSPC;
+ else {
+ spin_lock(&ubi->wl_lock);
+ ret = pool->pebs[pool->used++];
+ prot_queue_add(ubi, ubi->lookuptbl[ret]);
+ spin_unlock(&ubi->wl_lock);
+ }
+
+ return ret;
+}
+
+/* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system.
+ *
+ * @ubi: UBI device description object
+ */
+static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
+{
+ struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
+ int pnum;
+
+ if (pool->used == pool->size || !pool->size) {
+ /* We cannot update the fastmap here because this
+ * function is called in atomic context.
+ * Let's fail here and refill/update it as soon as possible. */
+ schedule_work(&ubi->fm_work);
+ return NULL;
+ } else {
+ pnum = pool->pebs[pool->used++];
+ return ubi->lookuptbl[pnum];
+ }
+}
+#else
+static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
+{
+ return find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
+}
+
+int ubi_wl_get_peb(struct ubi_device *ubi)
+{
+ int peb;
+
+ spin_lock(&ubi->wl_lock);
+ peb = __wl_get_peb(ubi);
+ spin_unlock(&ubi->wl_lock);
+
+ return peb;
+}
+#endif
+
/**
* prot_queue_del - remove a physical eraseblock from the protection queue.
* @ubi: UBI device description object
@@ -558,14 +824,14 @@ repeat:
}
/**
- * schedule_ubi_work - schedule a work.
+ * __schedule_ubi_work - schedule a work.
* @ubi: UBI device description object
* @wrk: the work to schedule
*
* This function adds a work defined by @wrk to the tail of the pending works
- * list.
+ * list. Can only be used of ubi->work_sem is already held in read mode!
*/
-static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
+static void __schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
{
spin_lock(&ubi->wl_lock);
list_add_tail(&wrk->list, &ubi->works);
@@ -576,9 +842,35 @@ static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
spin_unlock(&ubi->wl_lock);
}
+/**
+ * schedule_ubi_work - schedule a work.
+ * @ubi: UBI device description object
+ * @wrk: the work to schedule
+ *
+ * This function adds a work defined by @wrk to the tail of the pending works
+ * list.
+ */
+static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
+{
+ down_read(&ubi->work_sem);
+ __schedule_ubi_work(ubi, wrk);
+ up_read(&ubi->work_sem);
+}
+
static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
int cancel);
+#ifdef CONFIG_MTD_UBI_FASTMAP
+/**
+ * ubi_is_erase_work - checks whether a work is erase work.
+ * @wrk: The work object to be checked
+ */
+int ubi_is_erase_work(struct ubi_work *wrk)
+{
+ return wrk->func == erase_worker;
+}
+#endif
+
/**
* schedule_erase - schedule an erase work.
* @ubi: UBI device description object
@@ -595,6 +887,9 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
{
struct ubi_work *wl_wrk;
+ ubi_assert(e);
+ ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
+
dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
e->pnum, e->ec, torture);
@@ -613,6 +908,79 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
}
/**
+ * do_sync_erase - run the erase worker synchronously.
+ * @ubi: UBI device description object
+ * @e: the WL entry of the physical eraseblock to erase
+ * @vol_id: the volume ID that last used this PEB
+ * @lnum: the last used logical eraseblock number for the PEB
+ * @torture: if the physical eraseblock has to be tortured
+ *
+ */
+static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
+ int vol_id, int lnum, int torture)
+{
+ struct ubi_work *wl_wrk;
+
+ dbg_wl("sync erase of PEB %i", e->pnum);
+
+ wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
+ if (!wl_wrk)
+ return -ENOMEM;
+
+ wl_wrk->e = e;
+ wl_wrk->vol_id = vol_id;
+ wl_wrk->lnum = lnum;
+ wl_wrk->torture = torture;
+
+ return erase_worker(ubi, wl_wrk, 0);
+}
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+/**
+ * ubi_wl_put_fm_peb - returns a PEB used in a fastmap to the wear-leveling
+ * sub-system.
+ * see: ubi_wl_put_peb()
+ *
+ * @ubi: UBI device description object
+ * @fm_e: physical eraseblock to return
+ * @lnum: the last used logical eraseblock number for the PEB
+ * @torture: if this physical eraseblock has to be tortured
+ */
+int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
+ int lnum, int torture)
+{
+ struct ubi_wl_entry *e;
+ int vol_id, pnum = fm_e->pnum;
+
+ dbg_wl("PEB %d", pnum);
+
+ ubi_assert(pnum >= 0);
+ ubi_assert(pnum < ubi->peb_count);
+
+ spin_lock(&ubi->wl_lock);
+ e = ubi->lookuptbl[pnum];
+
+ /* This can happen if we recovered from a fastmap the very
+ * first time and writing now a new one. In this case the wl system
+ * has never seen any PEB used by the original fastmap.
+ */
+ if (!e) {
+ e = fm_e;
+ ubi_assert(e->ec >= 0);
+ ubi->lookuptbl[pnum] = e;
+ } else {
+ e->ec = fm_e->ec;
+ kfree(fm_e);
+ }
+
+ spin_unlock(&ubi->wl_lock);
+
+ vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID;
+ return schedule_erase(ubi, e, vol_id, lnum, torture);
+}
+#endif
+
+/**
* wear_leveling_worker - wear-leveling worker function.
* @ubi: UBI device description object
* @wrk: the work object
@@ -627,6 +995,9 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
{
int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
int vol_id = -1, uninitialized_var(lnum);
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ int anchor = wrk->anchor;
+#endif
struct ubi_wl_entry *e1, *e2;
struct ubi_vid_hdr *vid_hdr;
@@ -660,14 +1031,35 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
goto out_cancel;
}
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ /* Check whether we need to produce an anchor PEB */
+ if (!anchor)
+ anchor = !anchor_pebs_avalible(&ubi->free);
+
+ if (anchor) {
+ e1 = find_anchor_wl_entry(&ubi->used);
+ if (!e1)
+ goto out_cancel;
+ e2 = get_peb_for_wl(ubi);
+ if (!e2)
+ goto out_cancel;
+
+ self_check_in_wl_tree(ubi, e1, &ubi->used);
+ rb_erase(&e1->u.rb, &ubi->used);
+ dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum);
+ } else if (!ubi->scrub.rb_node) {
+#else
if (!ubi->scrub.rb_node) {
+#endif
/*
* Now pick the least worn-out used physical eraseblock and a
* highly worn-out free physical eraseblock. If the erase
* counters differ much enough, start wear-leveling.
*/
e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
- e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
+ e2 = get_peb_for_wl(ubi);
+ if (!e2)
+ goto out_cancel;
if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
dbg_wl("no WL needed: min used EC %d, max free EC %d",
@@ -682,14 +1074,15 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
/* Perform scrubbing */
scrubbing = 1;
e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
- e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
+ e2 = get_peb_for_wl(ubi);
+ if (!e2)
+ goto out_cancel;
+
self_check_in_wl_tree(ubi, e1, &ubi->scrub);
rb_erase(&e1->u.rb, &ubi->scrub);
dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
}
- self_check_in_wl_tree(ubi, e2, &ubi->free);
- rb_erase(&e2->u.rb, &ubi->free);
ubi->move_from = e1;
ubi->move_to = e2;
spin_unlock(&ubi->wl_lock);
@@ -806,7 +1199,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
ubi->move_to_put = ubi->wl_scheduled = 0;
spin_unlock(&ubi->wl_lock);
- err = schedule_erase(ubi, e1, vol_id, lnum, 0);
+ err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
if (err) {
kmem_cache_free(ubi_wl_entry_slab, e1);
if (e2)
@@ -821,7 +1214,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
*/
dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase",
e2->pnum, vol_id, lnum);
- err = schedule_erase(ubi, e2, vol_id, lnum, 0);
+ err = do_sync_erase(ubi, e2, vol_id, lnum, 0);
if (err) {
kmem_cache_free(ubi_wl_entry_slab, e2);
goto out_ro;
@@ -860,7 +1253,7 @@ out_not_moved:
spin_unlock(&ubi->wl_lock);
ubi_free_vid_hdr(ubi, vid_hdr);
- err = schedule_erase(ubi, e2, vol_id, lnum, torture);
+ err = do_sync_erase(ubi, e2, vol_id, lnum, torture);
if (err) {
kmem_cache_free(ubi_wl_entry_slab, e2);
goto out_ro;
@@ -901,12 +1294,13 @@ out_cancel:
/**
* ensure_wear_leveling - schedule wear-leveling if it is needed.
* @ubi: UBI device description object
+ * @nested: set to non-zero if this function is called from UBI worker
*
* This function checks if it is time to start wear-leveling and schedules it
* if yes. This function returns zero in case of success and a negative error
* code in case of failure.
*/
-static int ensure_wear_leveling(struct ubi_device *ubi)
+static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
{
int err = 0;
struct ubi_wl_entry *e1;
@@ -934,7 +1328,7 @@ static int ensure_wear_leveling(struct ubi_device *ubi)
* %UBI_WL_THRESHOLD.
*/
e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
- e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
+ e2 = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
goto out_unlock;
@@ -951,8 +1345,12 @@ static int ensure_wear_leveling(struct ubi_device *ubi)
goto out_cancel;
}
+ wrk->anchor = 0;
wrk->func = &wear_leveling_worker;
- schedule_ubi_work(ubi, wrk);
+ if (nested)
+ __schedule_ubi_work(ubi, wrk);
+ else
+ schedule_ubi_work(ubi, wrk);
return err;
out_cancel:
@@ -963,6 +1361,38 @@ out_unlock:
return err;
}
+#ifdef CONFIG_MTD_UBI_FASTMAP
+/**
+ * ubi_ensure_anchor_pebs - schedule wear-leveling to produce an anchor PEB.
+ * @ubi: UBI device description object
+ */
+int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
+{
+ struct ubi_work *wrk;
+
+ spin_lock(&ubi->wl_lock);
+ if (ubi->wl_scheduled) {
+ spin_unlock(&ubi->wl_lock);
+ return 0;
+ }
+ ubi->wl_scheduled = 1;
+ spin_unlock(&ubi->wl_lock);
+
+ wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
+ if (!wrk) {
+ spin_lock(&ubi->wl_lock);
+ ubi->wl_scheduled = 0;
+ spin_unlock(&ubi->wl_lock);
+ return -ENOMEM;
+ }
+
+ wrk->anchor = 1;
+ wrk->func = &wear_leveling_worker;
+ schedule_ubi_work(ubi, wrk);
+ return 0;
+}
+#endif
+
/**
* erase_worker - physical eraseblock erase worker function.
* @ubi: UBI device description object
@@ -993,6 +1423,8 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
dbg_wl("erase PEB %d EC %d LEB %d:%d",
pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum);
+ ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
+
err = sync_erase(ubi, e, wl_wrk->torture);
if (!err) {
/* Fine, we've erased it successfully */
@@ -1000,6 +1432,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
spin_lock(&ubi->wl_lock);
wl_tree_add(e, &ubi->free);
+ ubi->free_count++;
spin_unlock(&ubi->wl_lock);
/*
@@ -1009,7 +1442,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
serve_prot_queue(ubi);
/* And take care about wear-leveling */
- err = ensure_wear_leveling(ubi);
+ err = ensure_wear_leveling(ubi, 1);
return err;
}
@@ -1247,7 +1680,7 @@ retry:
* Technically scrubbing is the same as wear-leveling, so it is done
* by the WL worker.
*/
- return ensure_wear_leveling(ubi);
+ return ensure_wear_leveling(ubi, 0);
}
/**
@@ -1428,7 +1861,7 @@ static void cancel_pending(struct ubi_device *ubi)
*/
int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
{
- int err, i;
+ int err, i, reserved_pebs, found_pebs = 0;
struct rb_node *rb1, *rb2;
struct ubi_ainf_volume *av;
struct ubi_ainf_peb *aeb, *tmp;
@@ -1440,6 +1873,9 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
init_rwsem(&ubi->work_sem);
ubi->max_ec = ai->max_ec;
INIT_LIST_HEAD(&ubi->works);
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ INIT_WORK(&ubi->fm_work, update_fastmap_work_fn);
+#endif
sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
@@ -1461,13 +1897,17 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
e->pnum = aeb->pnum;
e->ec = aeb->ec;
+ ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
ubi->lookuptbl[e->pnum] = e;
if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) {
kmem_cache_free(ubi_wl_entry_slab, e);
goto out_free;
}
+
+ found_pebs++;
}
+ ubi->free_count = 0;
list_for_each_entry(aeb, &ai->free, u.list) {
cond_resched();
@@ -1478,8 +1918,14 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
e->pnum = aeb->pnum;
e->ec = aeb->ec;
ubi_assert(e->ec >= 0);
+ ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
+
wl_tree_add(e, &ubi->free);
+ ubi->free_count++;
+
ubi->lookuptbl[e->pnum] = e;
+
+ found_pebs++;
}
ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
@@ -1493,6 +1939,7 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
e->pnum = aeb->pnum;
e->ec = aeb->ec;
ubi->lookuptbl[e->pnum] = e;
+
if (!aeb->scrub) {
dbg_wl("add PEB %d EC %d to the used tree",
e->pnum, e->ec);
@@ -1502,22 +1949,38 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
e->pnum, e->ec);
wl_tree_add(e, &ubi->scrub);
}
+
+ found_pebs++;
}
}
- if (ubi->avail_pebs < WL_RESERVED_PEBS) {
+ dbg_wl("found %i PEBs", found_pebs);
+
+ if (ubi->fm)
+ ubi_assert(ubi->good_peb_count == \
+ found_pebs + ubi->fm->used_blocks);
+ else
+ ubi_assert(ubi->good_peb_count == found_pebs);
+
+ reserved_pebs = WL_RESERVED_PEBS;
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ /* Reserve enough LEBs to store two fastmaps. */
+ reserved_pebs += (ubi->fm_size / ubi->leb_size) * 2;
+#endif
+
+ if (ubi->avail_pebs < reserved_pebs) {
ubi_err("no enough physical eraseblocks (%d, need %d)",
- ubi->avail_pebs, WL_RESERVED_PEBS);
+ ubi->avail_pebs, reserved_pebs);
if (ubi->corr_peb_count)
ubi_err("%d PEBs are corrupted and not used",
ubi->corr_peb_count);
goto out_free;
}
- ubi->avail_pebs -= WL_RESERVED_PEBS;
- ubi->rsvd_pebs += WL_RESERVED_PEBS;
+ ubi->avail_pebs -= reserved_pebs;
+ ubi->rsvd_pebs += reserved_pebs;
/* Schedule wear-leveling if needed */
- err = ensure_wear_leveling(ubi);
+ err = ensure_wear_leveling(ubi, 0);
if (err)
goto out_free;
@@ -1596,7 +2059,7 @@ static int self_check_ec(struct ubi_device *ubi, int pnum, int ec)
}
read_ec = be64_to_cpu(ec_hdr->ec);
- if (ec != read_ec) {
+ if (ec != read_ec && read_ec - ec > 1) {
ubi_err("self-check failed for PEB %d", pnum);
ubi_err("read EC is %lld, should be %d", read_ec, ec);
dump_stack();
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index 64d0d9c..3491d43 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -1845,6 +1845,7 @@ static int __devinit amd8111e_probe_one(struct pci_dev *pdev,
if((pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM))==0){
printk(KERN_ERR "amd8111e: No Power Management capability, "
"exiting.\n");
+ err = -ENODEV;
goto err_free_reg;
}
@@ -1852,6 +1853,7 @@ static int __devinit amd8111e_probe_one(struct pci_dev *pdev,
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) < 0) {
printk(KERN_ERR "amd8111e: DMA not supported,"
"exiting.\n");
+ err = -ENODEV;
goto err_free_reg;
}
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index 397596b..f195acf 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -1174,8 +1174,10 @@ static int __devinit au1000_probe(struct platform_device *pdev)
snprintf(aup->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
pdev->name, aup->mac_id);
aup->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
- if (aup->mii_bus->irq == NULL)
+ if (aup->mii_bus->irq == NULL) {
+ err = -ENOMEM;
goto err_out;
+ }
for (i = 0; i < PHY_MAX_ADDR; ++i)
aup->mii_bus->irq[i] = PHY_POLL;
@@ -1190,7 +1192,8 @@ static int __devinit au1000_probe(struct platform_device *pdev)
goto err_mdiobus_reg;
}
- if (au1000_mii_probe(dev) != 0)
+ err = au1000_mii_probe(dev);
+ if (err != 0)
goto err_out;
pDBfree = NULL;
@@ -1205,6 +1208,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
}
aup->pDBfree = pDBfree;
+ err = -ENODEV;
for (i = 0; i < NUM_RX_DMA; i++) {
pDB = au1000_GetFreeDB(aup);
if (!pDB)
@@ -1213,6 +1217,8 @@ static int __devinit au1000_probe(struct platform_device *pdev)
aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
aup->rx_db_inuse[i] = pDB;
}
+
+ err = -ENODEV;
for (i = 0; i < NUM_TX_DMA; i++) {
pDB = au1000_GetFreeDB(aup);
if (!pDB)
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.h b/drivers/net/ethernet/broadcom/bcm63xx_enet.h
index 0e3048b..133d585 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.h
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.h
@@ -10,6 +10,7 @@
#include <bcm63xx_regs.h>
#include <bcm63xx_irq.h>
#include <bcm63xx_io.h>
+#include <bcm63xx_iudma.h>
/* default number of descriptor */
#define BCMENET_DEF_RX_DESC 64
@@ -31,35 +32,6 @@
#define BCMENET_MAX_MTU 2046
/*
- * rx/tx dma descriptor
- */
-struct bcm_enet_desc {
- u32 len_stat;
- u32 address;
-};
-
-#define DMADESC_LENGTH_SHIFT 16
-#define DMADESC_LENGTH_MASK (0xfff << DMADESC_LENGTH_SHIFT)
-#define DMADESC_OWNER_MASK (1 << 15)
-#define DMADESC_EOP_MASK (1 << 14)
-#define DMADESC_SOP_MASK (1 << 13)
-#define DMADESC_ESOP_MASK (DMADESC_EOP_MASK | DMADESC_SOP_MASK)
-#define DMADESC_WRAP_MASK (1 << 12)
-
-#define DMADESC_UNDER_MASK (1 << 9)
-#define DMADESC_APPEND_CRC (1 << 8)
-#define DMADESC_OVSIZE_MASK (1 << 4)
-#define DMADESC_RXER_MASK (1 << 2)
-#define DMADESC_CRC_MASK (1 << 1)
-#define DMADESC_OV_MASK (1 << 0)
-#define DMADESC_ERR_MASK (DMADESC_UNDER_MASK | \
- DMADESC_OVSIZE_MASK | \
- DMADESC_RXER_MASK | \
- DMADESC_CRC_MASK | \
- DMADESC_OV_MASK)
-
-
-/*
* MIB Counters register definitions
*/
#define ETH_MIB_TX_GD_OCTETS 0
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 2b4b4f5..16814b3 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -375,7 +375,6 @@ struct xgmac_priv {
unsigned int tx_tail;
void __iomem *base;
- struct sk_buff_head rx_recycle;
unsigned int dma_buf_sz;
dma_addr_t dma_rx_phy;
dma_addr_t dma_tx_phy;
@@ -672,9 +671,7 @@ static void xgmac_rx_refill(struct xgmac_priv *priv)
p = priv->dma_rx + entry;
if (priv->rx_skbuff[entry] == NULL) {
- skb = __skb_dequeue(&priv->rx_recycle);
- if (skb == NULL)
- skb = netdev_alloc_skb(priv->dev, priv->dma_buf_sz);
+ skb = netdev_alloc_skb(priv->dev, priv->dma_buf_sz);
if (unlikely(skb == NULL))
break;
@@ -887,17 +884,7 @@ static void xgmac_tx_complete(struct xgmac_priv *priv)
desc_get_buf_len(p), DMA_TO_DEVICE);
}
- /*
- * If there's room in the queue (limit it to size)
- * we add this skb back into the pool,
- * if it's the right size.
- */
- if ((skb_queue_len(&priv->rx_recycle) <
- DMA_RX_RING_SZ) &&
- skb_recycle_check(skb, priv->dma_buf_sz))
- __skb_queue_head(&priv->rx_recycle, skb);
- else
- dev_kfree_skb(skb);
+ dev_kfree_skb(skb);
}
if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) >
@@ -1016,7 +1003,6 @@ static int xgmac_open(struct net_device *dev)
dev->dev_addr);
}
- skb_queue_head_init(&priv->rx_recycle);
memset(&priv->xstats, 0, sizeof(struct xgmac_extra_stats));
/* Initialize the XGMAC and descriptors */
@@ -1053,7 +1039,6 @@ static int xgmac_stop(struct net_device *dev)
napi_disable(&priv->napi);
writel(0, priv->base + XGMAC_DMA_INTR_ENA);
- skb_queue_purge(&priv->rx_recycle);
/* Disable the MAC core */
xgmac_mac_disable(priv->base);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 31752b2..a4da893 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -696,6 +696,7 @@ int t4_seeprom_wp(struct adapter *adapter, bool enable);
int get_vpd_params(struct adapter *adapter, struct vpd_params *p);
int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
unsigned int t4_flash_cfg_addr(struct adapter *adapter);
+int t4_load_cfg(struct adapter *adapter, const u8 *cfg_data, unsigned int size);
int t4_check_fw_version(struct adapter *adapter);
int t4_prep_adapter(struct adapter *adapter);
int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 6b9f6bb..604f4f8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -443,7 +443,10 @@ int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
module_param(dbfifo_int_thresh, int, 0644);
MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
-int dbfifo_drain_delay = 1000; /* usecs to sleep while draining the dbfifo */
+/*
+ * usecs to sleep while draining the dbfifo
+ */
+static int dbfifo_drain_delay = 1000;
module_param(dbfifo_drain_delay, int, 0644);
MODULE_PARM_DESC(dbfifo_drain_delay,
"usecs to sleep while draining the dbfifo");
@@ -636,7 +639,7 @@ static void name_msix_vecs(struct adapter *adap)
static int request_msix_queue_irqs(struct adapter *adap)
{
struct sge *s = &adap->sge;
- int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi = 2;
+ int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi_index = 2;
err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
adap->msix_info[1].desc, &s->fw_evtq);
@@ -644,56 +647,60 @@ static int request_msix_queue_irqs(struct adapter *adap)
return err;
for_each_ethrxq(s, ethqidx) {
- err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
- adap->msix_info[msi].desc,
+ err = request_irq(adap->msix_info[msi_index].vec,
+ t4_sge_intr_msix, 0,
+ adap->msix_info[msi_index].desc,
&s->ethrxq[ethqidx].rspq);
if (err)
goto unwind;
- msi++;
+ msi_index++;
}
for_each_ofldrxq(s, ofldqidx) {
- err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
- adap->msix_info[msi].desc,
+ err = request_irq(adap->msix_info[msi_index].vec,
+ t4_sge_intr_msix, 0,
+ adap->msix_info[msi_index].desc,
&s->ofldrxq[ofldqidx].rspq);
if (err)
goto unwind;
- msi++;
+ msi_index++;
}
for_each_rdmarxq(s, rdmaqidx) {
- err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
- adap->msix_info[msi].desc,
+ err = request_irq(adap->msix_info[msi_index].vec,
+ t4_sge_intr_msix, 0,
+ adap->msix_info[msi_index].desc,
&s->rdmarxq[rdmaqidx].rspq);
if (err)
goto unwind;
- msi++;
+ msi_index++;
}
return 0;
unwind:
while (--rdmaqidx >= 0)
- free_irq(adap->msix_info[--msi].vec,
+ free_irq(adap->msix_info[--msi_index].vec,
&s->rdmarxq[rdmaqidx].rspq);
while (--ofldqidx >= 0)
- free_irq(adap->msix_info[--msi].vec,
+ free_irq(adap->msix_info[--msi_index].vec,
&s->ofldrxq[ofldqidx].rspq);
while (--ethqidx >= 0)
- free_irq(adap->msix_info[--msi].vec, &s->ethrxq[ethqidx].rspq);
+ free_irq(adap->msix_info[--msi_index].vec,
+ &s->ethrxq[ethqidx].rspq);
free_irq(adap->msix_info[1].vec, &s->fw_evtq);
return err;
}
static void free_msix_queue_irqs(struct adapter *adap)
{
- int i, msi = 2;
+ int i, msi_index = 2;
struct sge *s = &adap->sge;
free_irq(adap->msix_info[1].vec, &s->fw_evtq);
for_each_ethrxq(s, i)
- free_irq(adap->msix_info[msi++].vec, &s->ethrxq[i].rspq);
+ free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
for_each_ofldrxq(s, i)
- free_irq(adap->msix_info[msi++].vec, &s->ofldrxq[i].rspq);
+ free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
for_each_rdmarxq(s, i)
- free_irq(adap->msix_info[msi++].vec, &s->rdmarxq[i].rspq);
+ free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
}
/**
@@ -2535,9 +2542,8 @@ static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
ret = t4_mem_win_read_len(adap, addr, (__be32 *)&indices, 8);
if (!ret) {
- indices = be64_to_cpu(indices);
- *cidx = (indices >> 25) & 0xffff;
- *pidx = (indices >> 9) & 0xffff;
+ *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
+ *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
}
return ret;
}
@@ -3634,10 +3640,10 @@ static int adap_init0_no_config(struct adapter *adapter, int reset)
* field selections will fit in the 36-bit budget.
*/
if (tp_vlan_pri_map != TP_VLAN_PRI_MAP_DEFAULT) {
- int i, bits = 0;
+ int j, bits = 0;
- for (i = TP_VLAN_PRI_MAP_FIRST; i <= TP_VLAN_PRI_MAP_LAST; i++)
- switch (tp_vlan_pri_map & (1 << i)) {
+ for (j = TP_VLAN_PRI_MAP_FIRST; j <= TP_VLAN_PRI_MAP_LAST; j++)
+ switch (tp_vlan_pri_map & (1 << j)) {
case 0:
/* compressed filter field not enabled */
break;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 137a244..32eec15 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -380,9 +380,11 @@ static int t4_mem_win_rw(struct adapter *adap, u32 addr, __be32 *data, int dir)
/* Collecting data 4 bytes at a time upto MEMWIN0_APERTURE */
for (i = 0; i < MEMWIN0_APERTURE; i = i+0x4) {
if (dir)
- *data++ = t4_read_reg(adap, (MEMWIN0_BASE + i));
+ *data++ = (__force __be32) t4_read_reg(adap,
+ (MEMWIN0_BASE + i));
else
- t4_write_reg(adap, (MEMWIN0_BASE + i), *data++);
+ t4_write_reg(adap, (MEMWIN0_BASE + i),
+ (__force u32) *data++);
}
return 0;
@@ -417,7 +419,7 @@ static int t4_memory_rw(struct adapter *adap, int mtype, u32 addr, u32 len,
if ((addr & 0x3) || (len & 0x3))
return -EINVAL;
- data = vmalloc(MEMWIN0_APERTURE/sizeof(__be32));
+ data = vmalloc(MEMWIN0_APERTURE);
if (!data)
return -ENOMEM;
@@ -744,7 +746,7 @@ static int t4_read_flash(struct adapter *adapter, unsigned int addr,
if (ret)
return ret;
if (byte_oriented)
- *data = htonl(*data);
+ *data = (__force __u32) (htonl(*data));
}
return 0;
}
@@ -992,7 +994,7 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
int ret, addr;
unsigned int i;
u8 first_page[SF_PAGE_SIZE];
- const u32 *p = (const u32 *)fw_data;
+ const __be32 *p = (const __be32 *)fw_data;
const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
unsigned int fw_img_start = adap->params.sf_fw_start;
@@ -2315,7 +2317,8 @@ int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len)
t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET);
for (i = 0; i < len; i += 4)
- *data++ = t4_read_reg(adap, (MEMWIN0_BASE + off + i));
+ *data++ = (__force __be32) t4_read_reg(adap,
+ (MEMWIN0_BASE + off + i));
return 0;
}
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index 4d6fe60..d23755e 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -446,13 +446,17 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
/* Allocate Tx/Rx descriptor memory */
db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) *
DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr);
- if (!db->desc_pool_ptr)
+ if (!db->desc_pool_ptr) {
+ err = -ENOMEM;
goto err_out_res;
+ }
db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC *
TX_DESC_CNT + 4, &db->buf_pool_dma_ptr);
- if (!db->buf_pool_ptr)
+ if (!db->buf_pool_ptr) {
+ err = -ENOMEM;
goto err_out_free_desc;
+ }
db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr;
db->first_tx_desc_dma = db->desc_pool_dma_ptr;
@@ -462,8 +466,10 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
db->chip_id = ent->driver_data;
/* IO type range. */
db->ioaddr = pci_iomap(pdev, 0, 0);
- if (!db->ioaddr)
+ if (!db->ioaddr) {
+ err = -ENOMEM;
goto err_out_free_buf;
+ }
db->chip_revision = pdev->revision;
db->wol_mode = 0;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index eb3f2cb..d1b6cc5 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -2129,8 +2129,11 @@ void be_detect_error(struct be_adapter *adapter)
ue_hi = (ue_hi & ~ue_hi_mask);
}
- if (ue_lo || ue_hi ||
- sliport_status & SLIPORT_STATUS_ERR_MASK) {
+ /* On certain platforms BE hardware can indicate spurious UEs.
+ * Allow the h/w to stop working completely in case of a real UE.
+ * Hence not setting the hw_error for UE detection.
+ */
+ if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
adapter->hw_error = true;
dev_err(&adapter->pdev->dev,
"Error detected in the card\n");
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index a1b52ec..1d03dcd 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1765,7 +1765,6 @@ static void free_skb_resources(struct gfar_private *priv)
sizeof(struct rxbd8) * priv->total_rx_ring_size,
priv->tx_queue[0]->tx_bd_base,
priv->tx_queue[0]->tx_bd_dma_base);
- skb_queue_purge(&priv->rx_recycle);
}
void gfar_start(struct net_device *dev)
@@ -1943,8 +1942,6 @@ static int gfar_enet_open(struct net_device *dev)
enable_napi(priv);
- skb_queue_head_init(&priv->rx_recycle);
-
/* Initialize a bunch of registers */
init_registers(dev);
@@ -2533,16 +2530,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
bytes_sent += skb->len;
- /* If there's room in the queue (limit it to rx_buffer_size)
- * we add this skb back into the pool, if it's the right size
- */
- if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size &&
- skb_recycle_check(skb, priv->rx_buffer_size +
- RXBUF_ALIGNMENT)) {
- gfar_align_skb(skb);
- skb_queue_head(&priv->rx_recycle, skb);
- } else
- dev_kfree_skb_any(skb);
+ dev_kfree_skb_any(skb);
tx_queue->tx_skbuff[skb_dirtytx] = NULL;
@@ -2608,7 +2596,7 @@ static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
- struct sk_buff *skb = NULL;
+ struct sk_buff *skb;
skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
if (!skb)
@@ -2621,14 +2609,7 @@ static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
struct sk_buff *gfar_new_skb(struct net_device *dev)
{
- struct gfar_private *priv = netdev_priv(dev);
- struct sk_buff *skb = NULL;
-
- skb = skb_dequeue(&priv->rx_recycle);
- if (!skb)
- skb = gfar_alloc_skb(dev);
-
- return skb;
+ return gfar_alloc_skb(dev);
}
static inline void count_errors(unsigned short status, struct net_device *dev)
@@ -2787,7 +2768,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
if (unlikely(!newskb))
newskb = skb;
else if (skb)
- skb_queue_head(&priv->rx_recycle, skb);
+ dev_kfree_skb(skb);
} else {
/* Increment the number of packets */
rx_queue->stats.rx_packets++;
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 4141ef2..22eabc1 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -1080,8 +1080,6 @@ struct gfar_private {
u32 cur_filer_idx;
- struct sk_buff_head rx_recycle;
-
/* RX queue filer rule set*/
struct ethtool_rx_list rx_list;
struct mutex rx_queue_access;
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 1642884..0a70bb5 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -209,14 +209,12 @@ static struct list_head *dequeue(struct list_head *lh)
static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth,
u8 __iomem *bd)
{
- struct sk_buff *skb = NULL;
+ struct sk_buff *skb;
- skb = __skb_dequeue(&ugeth->rx_recycle);
+ skb = netdev_alloc_skb(ugeth->ndev,
+ ugeth->ug_info->uf_info.max_rx_buf_length +
+ UCC_GETH_RX_DATA_BUF_ALIGNMENT);
if (!skb)
- skb = netdev_alloc_skb(ugeth->ndev,
- ugeth->ug_info->uf_info.max_rx_buf_length +
- UCC_GETH_RX_DATA_BUF_ALIGNMENT);
- if (skb == NULL)
return NULL;
/* We need the data buffer to be aligned properly. We will reserve
@@ -2020,8 +2018,6 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
iounmap(ugeth->ug_regs);
ugeth->ug_regs = NULL;
}
-
- skb_queue_purge(&ugeth->rx_recycle);
}
static void ucc_geth_set_multi(struct net_device *dev)
@@ -2230,8 +2226,6 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
return -ENOMEM;
}
- skb_queue_head_init(&ugeth->rx_recycle);
-
return 0;
}
@@ -3274,12 +3268,7 @@ static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit
if (netif_msg_rx_err(ugeth))
ugeth_err("%s, %d: ERROR!!! skb - 0x%08x",
__func__, __LINE__, (u32) skb);
- if (skb) {
- skb->data = skb->head + NET_SKB_PAD;
- skb->len = 0;
- skb_reset_tail_pointer(skb);
- __skb_queue_head(&ugeth->rx_recycle, skb);
- }
+ dev_kfree_skb(skb);
ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL;
dev->stats.rx_dropped++;
@@ -3349,13 +3338,7 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ)
dev->stats.tx_packets++;
- if (skb_queue_len(&ugeth->rx_recycle) < RX_BD_RING_LEN &&
- skb_recycle_check(skb,
- ugeth->ug_info->uf_info.max_rx_buf_length +
- UCC_GETH_RX_DATA_BUF_ALIGNMENT))
- __skb_queue_head(&ugeth->rx_recycle, skb);
- else
- dev_kfree_skb(skb);
+ dev_kfree_skb(skb);
ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL;
ugeth->skb_dirtytx[txQ] =
diff --git a/drivers/net/ethernet/freescale/ucc_geth.h b/drivers/net/ethernet/freescale/ucc_geth.h
index f71b3e7..75f3371 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.h
+++ b/drivers/net/ethernet/freescale/ucc_geth.h
@@ -1214,8 +1214,6 @@ struct ucc_geth_private {
/* index of the first skb which hasn't been transmitted yet. */
u16 skb_dirtytx[NUM_TX_QUEUES];
- struct sk_buff_head rx_recycle;
-
struct ugeth_mii_info *mii_info;
struct phy_device *phydev;
phy_interface_t phy_interface;
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index ed5b409..d37bfd9 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -412,6 +412,8 @@ enum e1e_registers {
#define E1000_DEV_ID_PCH2_LV_V 0x1503
#define E1000_DEV_ID_PCH_LPT_I217_LM 0x153A
#define E1000_DEV_ID_PCH_LPT_I217_V 0x153B
+#define E1000_DEV_ID_PCH_LPTLP_I218_LM 0x155A
+#define E1000_DEV_ID_PCH_LPTLP_I218_V 0x1559
#define E1000_REVISION_4 4
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index fb659dd..de57a2b 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -6558,6 +6558,8 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_LM), board_pch_lpt },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_V), board_pch_lpt },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_LM), board_pch_lpt },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_V), board_pch_lpt },
{ 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
};
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 5bd2676..30efc9f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -410,7 +410,7 @@ static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring)
#define IXGBE_TX_CTXTDESC(R, i) \
(&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i]))
-#define IXGBE_MAX_JUMBO_FRAME_SIZE 16128
+#define IXGBE_MAX_JUMBO_FRAME_SIZE 9728 /* Maximum Supported Size 9.5KB */
#ifdef IXGBE_FCOE
/* Use 3K as the baby jumbo frame size for FCoE */
#define IXGBE_FCOE_JUMBO_FRAME_SIZE 3072
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 383b4e1..4a9c9c2 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -175,7 +175,7 @@ struct ixgbevf_q_vector {
#define IXGBEVF_TX_CTXTDESC(R, i) \
(&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i]))
-#define IXGBE_MAX_JUMBO_FRAME_SIZE 16128
+#define IXGBE_MAX_JUMBO_FRAME_SIZE 9728 /* Maximum Supported Size 9.5KB */
#define OTHER_VECTOR 1
#define NON_Q_VECTORS (OTHER_VECTOR)
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 0ee9bd4..de1ad50 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1747,6 +1747,7 @@ err_tx_ring_allocation:
**/
static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
{
+ struct net_device *netdev = adapter->netdev;
int err = 0;
int vector, v_budget;
@@ -1775,6 +1776,12 @@ static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
ixgbevf_acquire_msix_vectors(adapter, v_budget);
+ err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
+ if (err)
+ goto out;
+
+ err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
+
out:
return err;
}
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 087b9e0..84c1326 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -412,7 +412,6 @@ struct mv643xx_eth_private {
u8 work_rx_refill;
int skb_size;
- struct sk_buff_head rx_recycle;
/*
* RX state.
@@ -673,9 +672,7 @@ static int rxq_refill(struct rx_queue *rxq, int budget)
struct rx_desc *rx_desc;
int size;
- skb = __skb_dequeue(&mp->rx_recycle);
- if (skb == NULL)
- skb = netdev_alloc_skb(mp->dev, mp->skb_size);
+ skb = netdev_alloc_skb(mp->dev, mp->skb_size);
if (skb == NULL) {
mp->oom = 1;
@@ -989,14 +986,7 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
desc->byte_cnt, DMA_TO_DEVICE);
}
- if (skb != NULL) {
- if (skb_queue_len(&mp->rx_recycle) <
- mp->rx_ring_size &&
- skb_recycle_check(skb, mp->skb_size))
- __skb_queue_head(&mp->rx_recycle, skb);
- else
- dev_kfree_skb(skb);
- }
+ dev_kfree_skb(skb);
}
__netif_tx_unlock(nq);
@@ -2349,8 +2339,6 @@ static int mv643xx_eth_open(struct net_device *dev)
napi_enable(&mp->napi);
- skb_queue_head_init(&mp->rx_recycle);
-
mp->int_mask = INT_EXT;
for (i = 0; i < mp->rxq_count; i++) {
@@ -2445,8 +2433,6 @@ static int mv643xx_eth_stop(struct net_device *dev)
mib_counters_update(mp);
del_timer_sync(&mp->mib_counters_timer);
- skb_queue_purge(&mp->rx_recycle);
-
for (i = 0; i < mp->rxq_count; i++)
rxq_deinit(mp->rxq + i);
for (i = 0; i < mp->txq_count; i++)
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 5a30bf8..9b9c2ac 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -3189,7 +3189,7 @@ static int skge_poll(struct napi_struct *napi, int to_do)
if (work_done < to_do) {
unsigned long flags;
- napi_gro_flush(napi);
+ napi_gro_flush(napi, false);
spin_lock_irqsave(&hw->hw_lock, flags);
__napi_complete(napi);
hw->intr_mask |= napimask[skge->port];
@@ -3945,8 +3945,10 @@ static int __devinit skge_probe(struct pci_dev *pdev,
skge_board_name(hw), hw->chip_rev);
dev = skge_devinit(hw, 0, using_dac);
- if (!dev)
+ if (!dev) {
+ err = -ENOMEM;
goto err_out_led_off;
+ }
/* Some motherboards are broken and has zero in ROM. */
if (!is_valid_ether_addr(dev->dev_addr))
@@ -4153,6 +4155,13 @@ static struct dmi_system_id skge_32bit_dma_boards[] = {
DMI_MATCH(DMI_BOARD_NAME, "nForce"),
},
},
+ {
+ .ident = "ASUS P5NSLI",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "P5NSLI")
+ },
+ },
{}
};
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 2b0748d..78946fe 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -4924,6 +4924,7 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
if (~reg == 0) {
dev_err(&pdev->dev, "PCI configuration read error\n");
+ err = -EIO;
goto err_out;
}
@@ -4993,8 +4994,10 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
hw->st_size = hw->ports * roundup_pow_of_two(3*RX_MAX_PENDING + TX_MAX_PENDING);
hw->st_le = pci_alloc_consistent(pdev, hw->st_size * sizeof(struct sky2_status_le),
&hw->st_dma);
- if (!hw->st_le)
+ if (!hw->st_le) {
+ err = -ENOMEM;
goto err_out_reset;
+ }
dev_info(&pdev->dev, "Yukon-2 %s chip revision %d\n",
sky2_name(hw->chip_id, buf1, sizeof(buf1)), hw->chip_rev);
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index 5b61d12..dbaaa99 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -947,8 +947,8 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev,
i = register_netdev(dev);
if (i)
goto err_register_netdev;
-
- if (NATSEMI_CREATE_FILE(pdev, dspcfg_workaround))
+ i = NATSEMI_CREATE_FILE(pdev, dspcfg_workaround);
+ if (i)
goto err_create_file;
if (netif_msg_drv(np)) {
diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c
index e01c0a0..7dfe883 100644
--- a/drivers/net/ethernet/natsemi/xtsonic.c
+++ b/drivers/net/ethernet/natsemi/xtsonic.c
@@ -205,6 +205,7 @@ static int __init sonic_probe1(struct net_device *dev)
if (lp->descriptors == NULL) {
printk(KERN_ERR "%s: couldn't alloc DMA memory for "
" descriptors.\n", dev_name(lp->device));
+ err = -ENOMEM;
goto out;
}
diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c
index a688a2d..f97719c 100644
--- a/drivers/net/ethernet/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/octeon/octeon_mgmt.c
@@ -3,13 +3,14 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2009 Cavium Networks
+ * Copyright (C) 2009-2012 Cavium, Inc
*/
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/etherdevice.h>
#include <linux/capability.h>
+#include <linux/net_tstamp.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/spinlock.h>
@@ -33,8 +34,7 @@
#define OCTEON_MGMT_NAPI_WEIGHT 16
-/*
- * Ring sizes that are powers of two allow for more efficient modulo
+/* Ring sizes that are powers of two allow for more efficient modulo
* opertions.
*/
#define OCTEON_MGMT_RX_RING_SIZE 512
@@ -93,6 +93,7 @@ union mgmt_port_ring_entry {
#define AGL_GMX_RX_ADR_CAM4 0x1a0
#define AGL_GMX_RX_ADR_CAM5 0x1a8
+#define AGL_GMX_TX_CLK 0x208
#define AGL_GMX_TX_STATS_CTL 0x268
#define AGL_GMX_TX_CTL 0x270
#define AGL_GMX_TX_STAT0 0x280
@@ -110,8 +111,10 @@ struct octeon_mgmt {
struct net_device *netdev;
u64 mix;
u64 agl;
+ u64 agl_prt_ctl;
int port;
int irq;
+ bool has_rx_tstamp;
u64 *tx_ring;
dma_addr_t tx_ring_handle;
unsigned int tx_next;
@@ -131,6 +134,7 @@ struct octeon_mgmt {
spinlock_t lock;
unsigned int last_duplex;
unsigned int last_link;
+ unsigned int last_speed;
struct device *dev;
struct napi_struct napi;
struct tasklet_struct tx_clean_tasklet;
@@ -140,6 +144,8 @@ struct octeon_mgmt {
resource_size_t mix_size;
resource_size_t agl_phys;
resource_size_t agl_size;
+ resource_size_t agl_prt_ctl_phys;
+ resource_size_t agl_prt_ctl_size;
};
static void octeon_mgmt_set_rx_irq(struct octeon_mgmt *p, int enable)
@@ -166,22 +172,22 @@ static void octeon_mgmt_set_tx_irq(struct octeon_mgmt *p, int enable)
spin_unlock_irqrestore(&p->lock, flags);
}
-static inline void octeon_mgmt_enable_rx_irq(struct octeon_mgmt *p)
+static void octeon_mgmt_enable_rx_irq(struct octeon_mgmt *p)
{
octeon_mgmt_set_rx_irq(p, 1);
}
-static inline void octeon_mgmt_disable_rx_irq(struct octeon_mgmt *p)
+static void octeon_mgmt_disable_rx_irq(struct octeon_mgmt *p)
{
octeon_mgmt_set_rx_irq(p, 0);
}
-static inline void octeon_mgmt_enable_tx_irq(struct octeon_mgmt *p)
+static void octeon_mgmt_enable_tx_irq(struct octeon_mgmt *p)
{
octeon_mgmt_set_tx_irq(p, 1);
}
-static inline void octeon_mgmt_disable_tx_irq(struct octeon_mgmt *p)
+static void octeon_mgmt_disable_tx_irq(struct octeon_mgmt *p)
{
octeon_mgmt_set_tx_irq(p, 0);
}
@@ -233,6 +239,28 @@ static void octeon_mgmt_rx_fill_ring(struct net_device *netdev)
}
}
+static ktime_t ptp_to_ktime(u64 ptptime)
+{
+ ktime_t ktimebase;
+ u64 ptpbase;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ /* Fill the icache with the code */
+ ktime_get_real();
+ /* Flush all pending operations */
+ mb();
+ /* Read the time and PTP clock as close together as
+ * possible. It is important that this sequence take the same
+ * amount of time to reduce jitter
+ */
+ ktimebase = ktime_get_real();
+ ptpbase = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_HI);
+ local_irq_restore(flags);
+
+ return ktime_sub_ns(ktimebase, ptpbase - ptptime);
+}
+
static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
{
union cvmx_mixx_orcnt mix_orcnt;
@@ -272,6 +300,20 @@ static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
dma_unmap_single(p->dev, re.s.addr, re.s.len,
DMA_TO_DEVICE);
+
+ /* Read the hardware TX timestamp if one was recorded */
+ if (unlikely(re.s.tstamp)) {
+ struct skb_shared_hwtstamps ts;
+ /* Read the timestamp */
+ u64 ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port));
+ /* Remove the timestamp from the FIFO */
+ cvmx_write_csr(CVMX_MIXX_TSCTL(p->port), 0);
+ /* Tell the kernel about the timestamp */
+ ts.syststamp = ptp_to_ktime(ns);
+ ts.hwtstamp = ns_to_ktime(ns);
+ skb_tstamp_tx(skb, &ts);
+ }
+
dev_kfree_skb_any(skb);
cleaned++;
@@ -372,14 +414,23 @@ static int octeon_mgmt_receive_one(struct octeon_mgmt *p)
/* A good packet, send it up. */
skb_put(skb, re.s.len);
good:
+ /* Process the RX timestamp if it was recorded */
+ if (p->has_rx_tstamp) {
+ /* The first 8 bytes are the timestamp */
+ u64 ns = *(u64 *)skb->data;
+ struct skb_shared_hwtstamps *ts;
+ ts = skb_hwtstamps(skb);
+ ts->hwtstamp = ns_to_ktime(ns);
+ ts->syststamp = ptp_to_ktime(ns);
+ __skb_pull(skb, 8);
+ }
skb->protocol = eth_type_trans(skb, netdev);
netdev->stats.rx_packets++;
netdev->stats.rx_bytes += skb->len;
netif_receive_skb(skb);
rc = 0;
} else if (re.s.code == RING_ENTRY_CODE_MORE) {
- /*
- * Packet split across skbs. This can happen if we
+ /* Packet split across skbs. This can happen if we
* increase the MTU. Buffers that are already in the
* rx ring can then end up being too small. As the rx
* ring is refilled, buffers sized for the new MTU
@@ -409,8 +460,7 @@ good:
} else {
/* Some other error, discard it. */
dev_kfree_skb_any(skb);
- /*
- * Error statistics are accumulated in
+ /* Error statistics are accumulated in
* octeon_mgmt_update_rx_stats.
*/
}
@@ -488,7 +538,7 @@ static void octeon_mgmt_reset_hw(struct octeon_mgmt *p)
mix_ctl.s.reset = 1;
cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
cvmx_read_csr(p->mix + MIX_CTL);
- cvmx_wait(64);
+ octeon_io_clk_delay(64);
mix_bist.u64 = cvmx_read_csr(p->mix + MIX_BIST);
if (mix_bist.u64)
@@ -537,8 +587,7 @@ static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
cam_mode = 0;
available_cam_entries = 8;
} else {
- /*
- * One CAM entry for the primary address, leaves seven
+ /* One CAM entry for the primary address, leaves seven
* for the secondary addresses.
*/
available_cam_entries = 7 - netdev->uc.count;
@@ -595,12 +644,10 @@ static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr)
{
- struct sockaddr *sa = addr;
+ int r = eth_mac_addr(netdev, addr);
- if (!is_valid_ether_addr(sa->sa_data))
- return -EADDRNOTAVAIL;
-
- memcpy(netdev->dev_addr, sa->sa_data, ETH_ALEN);
+ if (r)
+ return r;
octeon_mgmt_set_rx_filtering(netdev);
@@ -612,8 +659,7 @@ static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu)
struct octeon_mgmt *p = netdev_priv(netdev);
int size_without_fcs = new_mtu + OCTEON_MGMT_RX_HEADROOM;
- /*
- * Limit the MTU to make sure the ethernet packets are between
+ /* Limit the MTU to make sure the ethernet packets are between
* 64 bytes and 16383 bytes.
*/
if (size_without_fcs < 64 || size_without_fcs > 16383) {
@@ -656,53 +702,258 @@ static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id)
return IRQ_HANDLED;
}
+static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev,
+ struct ifreq *rq, int cmd)
+{
+ struct octeon_mgmt *p = netdev_priv(netdev);
+ struct hwtstamp_config config;
+ union cvmx_mio_ptp_clock_cfg ptp;
+ union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
+ bool have_hw_timestamps = false;
+
+ if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ if (config.flags) /* reserved for future extensions */
+ return -EINVAL;
+
+ /* Check the status of hardware for tiemstamps */
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
+ /* Get the current state of the PTP clock */
+ ptp.u64 = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_CFG);
+ if (!ptp.s.ext_clk_en) {
+ /* The clock has not been configured to use an
+ * external source. Program it to use the main clock
+ * reference.
+ */
+ u64 clock_comp = (NSEC_PER_SEC << 32) / octeon_get_io_clock_rate();
+ if (!ptp.s.ptp_en)
+ cvmx_write_csr(CVMX_MIO_PTP_CLOCK_COMP, clock_comp);
+ pr_info("PTP Clock: Using sclk reference at %lld Hz\n",
+ (NSEC_PER_SEC << 32) / clock_comp);
+ } else {
+ /* The clock is already programmed to use a GPIO */
+ u64 clock_comp = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_COMP);
+ pr_info("PTP Clock: Using GPIO %d at %lld Hz\n",
+ ptp.s.ext_clk_in,
+ (NSEC_PER_SEC << 32) / clock_comp);
+ }
+
+ /* Enable the clock if it wasn't done already */
+ if (!ptp.s.ptp_en) {
+ ptp.s.ptp_en = 1;
+ cvmx_write_csr(CVMX_MIO_PTP_CLOCK_CFG, ptp.u64);
+ }
+ have_hw_timestamps = true;
+ }
+
+ if (!have_hw_timestamps)
+ return -EINVAL;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ case HWTSTAMP_TX_ON:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ p->has_rx_tstamp = false;
+ rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL);
+ rxx_frm_ctl.s.ptp_mode = 0;
+ cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ p->has_rx_tstamp = have_hw_timestamps;
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ if (p->has_rx_tstamp) {
+ rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL);
+ rxx_frm_ctl.s.ptp_mode = 1;
+ cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
+ }
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ if (copy_to_user(rq->ifr_data, &config, sizeof(config)))
+ return -EFAULT;
+
+ return 0;
+}
+
static int octeon_mgmt_ioctl(struct net_device *netdev,
struct ifreq *rq, int cmd)
{
struct octeon_mgmt *p = netdev_priv(netdev);
- if (!netif_running(netdev))
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return octeon_mgmt_ioctl_hwtstamp(netdev, rq, cmd);
+ default:
+ if (p->phydev)
+ return phy_mii_ioctl(p->phydev, rq, cmd);
return -EINVAL;
+ }
+}
- if (!p->phydev)
- return -EINVAL;
+static void octeon_mgmt_disable_link(struct octeon_mgmt *p)
+{
+ union cvmx_agl_gmx_prtx_cfg prtx_cfg;
- return phy_mii_ioctl(p->phydev, rq, cmd);
+ /* Disable GMX before we make any changes. */
+ prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
+ prtx_cfg.s.en = 0;
+ prtx_cfg.s.tx_en = 0;
+ prtx_cfg.s.rx_en = 0;
+ cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
+
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
+ int i;
+ for (i = 0; i < 10; i++) {
+ prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
+ if (prtx_cfg.s.tx_idle == 1 || prtx_cfg.s.rx_idle == 1)
+ break;
+ mdelay(1);
+ i++;
+ }
+ }
+}
+
+static void octeon_mgmt_enable_link(struct octeon_mgmt *p)
+{
+ union cvmx_agl_gmx_prtx_cfg prtx_cfg;
+
+ /* Restore the GMX enable state only if link is set */
+ prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
+ prtx_cfg.s.tx_en = 1;
+ prtx_cfg.s.rx_en = 1;
+ prtx_cfg.s.en = 1;
+ cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
+}
+
+static void octeon_mgmt_update_link(struct octeon_mgmt *p)
+{
+ union cvmx_agl_gmx_prtx_cfg prtx_cfg;
+
+ prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
+
+ if (!p->phydev->link)
+ prtx_cfg.s.duplex = 1;
+ else
+ prtx_cfg.s.duplex = p->phydev->duplex;
+
+ switch (p->phydev->speed) {
+ case 10:
+ prtx_cfg.s.speed = 0;
+ prtx_cfg.s.slottime = 0;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
+ prtx_cfg.s.burst = 1;
+ prtx_cfg.s.speed_msb = 1;
+ }
+ break;
+ case 100:
+ prtx_cfg.s.speed = 0;
+ prtx_cfg.s.slottime = 0;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
+ prtx_cfg.s.burst = 1;
+ prtx_cfg.s.speed_msb = 0;
+ }
+ break;
+ case 1000:
+ /* 1000 MBits is only supported on 6XXX chips */
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
+ prtx_cfg.s.speed = 1;
+ prtx_cfg.s.speed_msb = 0;
+ /* Only matters for half-duplex */
+ prtx_cfg.s.slottime = 1;
+ prtx_cfg.s.burst = p->phydev->duplex;
+ }
+ break;
+ case 0: /* No link */
+ default:
+ break;
+ }
+
+ /* Write the new GMX setting with the port still disabled. */
+ cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
+
+ /* Read GMX CFG again to make sure the config is completed. */
+ prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
+
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
+ union cvmx_agl_gmx_txx_clk agl_clk;
+ union cvmx_agl_prtx_ctl prtx_ctl;
+
+ prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
+ agl_clk.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_CLK);
+ /* MII (both speeds) and RGMII 1000 speed. */
+ agl_clk.s.clk_cnt = 1;
+ if (prtx_ctl.s.mode == 0) { /* RGMII mode */
+ if (p->phydev->speed == 10)
+ agl_clk.s.clk_cnt = 50;
+ else if (p->phydev->speed == 100)
+ agl_clk.s.clk_cnt = 5;
+ }
+ cvmx_write_csr(p->agl + AGL_GMX_TX_CLK, agl_clk.u64);
+ }
}
static void octeon_mgmt_adjust_link(struct net_device *netdev)
{
struct octeon_mgmt *p = netdev_priv(netdev);
- union cvmx_agl_gmx_prtx_cfg prtx_cfg;
unsigned long flags;
int link_changed = 0;
+ if (!p->phydev)
+ return;
+
spin_lock_irqsave(&p->lock, flags);
- if (p->phydev->link) {
- if (!p->last_link)
- link_changed = 1;
- if (p->last_duplex != p->phydev->duplex) {
- p->last_duplex = p->phydev->duplex;
- prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
- prtx_cfg.s.duplex = p->phydev->duplex;
- cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
- }
- } else {
- if (p->last_link)
- link_changed = -1;
+
+
+ if (!p->phydev->link && p->last_link)
+ link_changed = -1;
+
+ if (p->phydev->link
+ && (p->last_duplex != p->phydev->duplex
+ || p->last_link != p->phydev->link
+ || p->last_speed != p->phydev->speed)) {
+ octeon_mgmt_disable_link(p);
+ link_changed = 1;
+ octeon_mgmt_update_link(p);
+ octeon_mgmt_enable_link(p);
}
+
p->last_link = p->phydev->link;
+ p->last_speed = p->phydev->speed;
+ p->last_duplex = p->phydev->duplex;
+
spin_unlock_irqrestore(&p->lock, flags);
if (link_changed != 0) {
if (link_changed > 0) {
- netif_carrier_on(netdev);
pr_info("%s: Link is up - %d/%s\n", netdev->name,
p->phydev->speed,
DUPLEX_FULL == p->phydev->duplex ?
"Full" : "Half");
} else {
- netif_carrier_off(netdev);
pr_info("%s: Link is down\n", netdev->name);
}
}
@@ -723,9 +974,7 @@ static int octeon_mgmt_init_phy(struct net_device *netdev)
PHY_INTERFACE_MODE_MII);
if (!p->phydev)
- return -1;
-
- phy_start_aneg(p->phydev);
+ return -ENODEV;
return 0;
}
@@ -733,12 +982,10 @@ static int octeon_mgmt_init_phy(struct net_device *netdev)
static int octeon_mgmt_open(struct net_device *netdev)
{
struct octeon_mgmt *p = netdev_priv(netdev);
- int port = p->port;
union cvmx_mixx_ctl mix_ctl;
union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode;
union cvmx_mixx_oring1 oring1;
union cvmx_mixx_iring1 iring1;
- union cvmx_agl_gmx_prtx_cfg prtx_cfg;
union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
union cvmx_mixx_irhwm mix_irhwm;
union cvmx_mixx_orhwm mix_orhwm;
@@ -785,9 +1032,30 @@ static int octeon_mgmt_open(struct net_device *netdev)
} while (mix_ctl.s.reset);
}
- agl_gmx_inf_mode.u64 = 0;
- agl_gmx_inf_mode.s.en = 1;
- cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);
+ if (OCTEON_IS_MODEL(OCTEON_CN5XXX)) {
+ agl_gmx_inf_mode.u64 = 0;
+ agl_gmx_inf_mode.s.en = 1;
+ cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);
+ }
+ if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)
+ || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
+ /* Force compensation values, as they are not
+ * determined properly by HW
+ */
+ union cvmx_agl_gmx_drv_ctl drv_ctl;
+
+ drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL);
+ if (p->port) {
+ drv_ctl.s.byp_en1 = 1;
+ drv_ctl.s.nctl1 = 6;
+ drv_ctl.s.pctl1 = 6;
+ } else {
+ drv_ctl.s.byp_en = 1;
+ drv_ctl.s.nctl = 6;
+ drv_ctl.s.pctl = 6;
+ }
+ cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64);
+ }
oring1.u64 = 0;
oring1.s.obase = p->tx_ring_handle >> 3;
@@ -799,18 +1067,12 @@ static int octeon_mgmt_open(struct net_device *netdev)
iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE;
cvmx_write_csr(p->mix + MIX_IRING1, iring1.u64);
- /* Disable packet I/O. */
- prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
- prtx_cfg.s.en = 0;
- cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
-
memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN);
octeon_mgmt_set_mac_address(netdev, &sa);
octeon_mgmt_change_mtu(netdev, netdev->mtu);
- /*
- * Enable the port HW. Packets are not allowed until
+ /* Enable the port HW. Packets are not allowed until
* cvmx_mgmt_port_enable() is called.
*/
mix_ctl.u64 = 0;
@@ -819,27 +1081,70 @@ static int octeon_mgmt_open(struct net_device *netdev)
mix_ctl.s.nbtarb = 0; /* Arbitration mode */
/* MII CB-request FIFO programmable high watermark */
mix_ctl.s.mrq_hwm = 1;
+#ifdef __LITTLE_ENDIAN
+ mix_ctl.s.lendian = 1;
+#endif
cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
- if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)
- || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
- /*
- * Force compensation values, as they are not
- * determined properly by HW
- */
- union cvmx_agl_gmx_drv_ctl drv_ctl;
+ /* Read the PHY to find the mode of the interface. */
+ if (octeon_mgmt_init_phy(netdev)) {
+ dev_err(p->dev, "Cannot initialize PHY on MIX%d.\n", p->port);
+ goto err_noirq;
+ }
- drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL);
- if (port) {
- drv_ctl.s.byp_en1 = 1;
- drv_ctl.s.nctl1 = 6;
- drv_ctl.s.pctl1 = 6;
- } else {
- drv_ctl.s.byp_en = 1;
- drv_ctl.s.nctl = 6;
- drv_ctl.s.pctl = 6;
+ /* Set the mode of the interface, RGMII/MII. */
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && p->phydev) {
+ union cvmx_agl_prtx_ctl agl_prtx_ctl;
+ int rgmii_mode = (p->phydev->supported &
+ (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)) != 0;
+
+ agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
+ agl_prtx_ctl.s.mode = rgmii_mode ? 0 : 1;
+ cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
+
+ /* MII clocks counts are based on the 125Mhz
+ * reference, which has an 8nS period. So our delays
+ * need to be multiplied by this factor.
+ */
+#define NS_PER_PHY_CLK 8
+
+ /* Take the DLL and clock tree out of reset */
+ agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
+ agl_prtx_ctl.s.clkrst = 0;
+ if (rgmii_mode) {
+ agl_prtx_ctl.s.dllrst = 0;
+ agl_prtx_ctl.s.clktx_byp = 0;
}
- cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64);
+ cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
+ cvmx_read_csr(p->agl_prt_ctl); /* Force write out before wait */
+
+ /* Wait for the DLL to lock. External 125 MHz
+ * reference clock must be stable at this point.
+ */
+ ndelay(256 * NS_PER_PHY_CLK);
+
+ /* Enable the interface */
+ agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
+ agl_prtx_ctl.s.enable = 1;
+ cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
+
+ /* Read the value back to force the previous write */
+ agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
+
+ /* Enable the compensation controller */
+ agl_prtx_ctl.s.comp = 1;
+ agl_prtx_ctl.s.drv_byp = 0;
+ cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
+ /* Force write out before wait. */
+ cvmx_read_csr(p->agl_prt_ctl);
+
+ /* For compensation state to lock. */
+ ndelay(1040 * NS_PER_PHY_CLK);
+
+ /* Some Ethernet switches cannot handle standard
+ * Interframe Gap, increase to 16 bytes.
+ */
+ cvmx_write_csr(CVMX_AGL_GMX_TX_IFG, 0x88);
}
octeon_mgmt_rx_fill_ring(netdev);
@@ -870,7 +1175,7 @@ static int octeon_mgmt_open(struct net_device *netdev)
/* Interrupt when we have 1 or more packets to clean. */
mix_orhwm.u64 = 0;
- mix_orhwm.s.orhwm = 1;
+ mix_orhwm.s.orhwm = 0;
cvmx_write_csr(p->mix + MIX_ORHWM, mix_orhwm.u64);
/* Enable receive and transmit interrupts */
@@ -879,13 +1184,12 @@ static int octeon_mgmt_open(struct net_device *netdev)
mix_intena.s.othena = 1;
cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
-
/* Enable packet I/O. */
rxx_frm_ctl.u64 = 0;
+ rxx_frm_ctl.s.ptp_mode = p->has_rx_tstamp ? 1 : 0;
rxx_frm_ctl.s.pre_align = 1;
- /*
- * When set, disables the length check for non-min sized pkts
+ /* When set, disables the length check for non-min sized pkts
* with padding in the client data.
*/
rxx_frm_ctl.s.pad_len = 1;
@@ -903,33 +1207,26 @@ static int octeon_mgmt_open(struct net_device *netdev)
rxx_frm_ctl.s.ctl_drp = 1;
/* Strip off the preamble */
rxx_frm_ctl.s.pre_strp = 1;
- /*
- * This port is configured to send PREAMBLE+SFD to begin every
+ /* This port is configured to send PREAMBLE+SFD to begin every
* frame. GMX checks that the PREAMBLE is sent correctly.
*/
rxx_frm_ctl.s.pre_chk = 1;
cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
- /* Enable the AGL block */
- agl_gmx_inf_mode.u64 = 0;
- agl_gmx_inf_mode.s.en = 1;
- cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);
-
- /* Configure the port duplex and enables */
- prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
- prtx_cfg.s.tx_en = 1;
- prtx_cfg.s.rx_en = 1;
- prtx_cfg.s.en = 1;
- p->last_duplex = 1;
- prtx_cfg.s.duplex = p->last_duplex;
- cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
+ /* Configure the port duplex, speed and enables */
+ octeon_mgmt_disable_link(p);
+ if (p->phydev)
+ octeon_mgmt_update_link(p);
+ octeon_mgmt_enable_link(p);
p->last_link = 0;
- netif_carrier_off(netdev);
-
- if (octeon_mgmt_init_phy(netdev)) {
- dev_err(p->dev, "Cannot initialize PHY.\n");
- goto err_noirq;
+ p->last_speed = 0;
+ /* PHY is not present in simulator. The carrier is enabled
+ * while initializing the phy for simulator, leave it enabled.
+ */
+ if (p->phydev) {
+ netif_carrier_off(netdev);
+ phy_start_aneg(p->phydev);
}
netif_wake_queue(netdev);
@@ -959,6 +1256,7 @@ static int octeon_mgmt_stop(struct net_device *netdev)
if (p->phydev)
phy_disconnect(p->phydev);
+ p->phydev = NULL;
netif_carrier_off(netdev);
@@ -991,6 +1289,7 @@ static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
int rv = NETDEV_TX_BUSY;
re.d64 = 0;
+ re.s.tstamp = ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) != 0);
re.s.len = skb->len;
re.s.addr = dma_map_single(p->dev, skb->data,
skb->len,
@@ -1031,6 +1330,7 @@ static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
/* Ring the bell. */
cvmx_write_csr(p->mix + MIX_ORING2, 1);
+ netdev->trans_start = jiffies;
rv = NETDEV_TX_OK;
out:
octeon_mgmt_update_tx_stats(netdev);
@@ -1068,7 +1368,7 @@ static int octeon_mgmt_get_settings(struct net_device *netdev,
if (p->phydev)
return phy_ethtool_gset(p->phydev, cmd);
- return -EINVAL;
+ return -EOPNOTSUPP;
}
static int octeon_mgmt_set_settings(struct net_device *netdev,
@@ -1082,23 +1382,37 @@ static int octeon_mgmt_set_settings(struct net_device *netdev,
if (p->phydev)
return phy_ethtool_sset(p->phydev, cmd);
- return -EINVAL;
+ return -EOPNOTSUPP;
+}
+
+static int octeon_mgmt_nway_reset(struct net_device *dev)
+{
+ struct octeon_mgmt *p = netdev_priv(dev);
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (p->phydev)
+ return phy_start_aneg(p->phydev);
+
+ return -EOPNOTSUPP;
}
static const struct ethtool_ops octeon_mgmt_ethtool_ops = {
.get_drvinfo = octeon_mgmt_get_drvinfo,
- .get_link = ethtool_op_get_link,
.get_settings = octeon_mgmt_get_settings,
- .set_settings = octeon_mgmt_set_settings
+ .set_settings = octeon_mgmt_set_settings,
+ .nway_reset = octeon_mgmt_nway_reset,
+ .get_link = ethtool_op_get_link,
};
static const struct net_device_ops octeon_mgmt_ops = {
.ndo_open = octeon_mgmt_open,
.ndo_stop = octeon_mgmt_stop,
.ndo_start_xmit = octeon_mgmt_xmit,
- .ndo_set_rx_mode = octeon_mgmt_set_rx_filtering,
+ .ndo_set_rx_mode = octeon_mgmt_set_rx_filtering,
.ndo_set_mac_address = octeon_mgmt_set_mac_address,
- .ndo_do_ioctl = octeon_mgmt_ioctl,
+ .ndo_do_ioctl = octeon_mgmt_ioctl,
.ndo_change_mtu = octeon_mgmt_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = octeon_mgmt_poll_controller,
@@ -1113,6 +1427,7 @@ static int __devinit octeon_mgmt_probe(struct platform_device *pdev)
const u8 *mac;
struct resource *res_mix;
struct resource *res_agl;
+ struct resource *res_agl_prt_ctl;
int len;
int result;
@@ -1120,6 +1435,8 @@ static int __devinit octeon_mgmt_probe(struct platform_device *pdev)
if (netdev == NULL)
return -ENOMEM;
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
dev_set_drvdata(&pdev->dev, netdev);
p = netdev_priv(netdev);
netif_napi_add(netdev, &p->napi, octeon_mgmt_napi_poll,
@@ -1127,6 +1444,7 @@ static int __devinit octeon_mgmt_probe(struct platform_device *pdev)
p->netdev = netdev;
p->dev = &pdev->dev;
+ p->has_rx_tstamp = false;
data = of_get_property(pdev->dev.of_node, "cell-index", &len);
if (data && len == sizeof(*data)) {
@@ -1159,10 +1477,19 @@ static int __devinit octeon_mgmt_probe(struct platform_device *pdev)
goto err;
}
+ res_agl_prt_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 3);
+ if (res_agl_prt_ctl == NULL) {
+ dev_err(&pdev->dev, "no 'reg' resource\n");
+ result = -ENXIO;
+ goto err;
+ }
+
p->mix_phys = res_mix->start;
p->mix_size = resource_size(res_mix);
p->agl_phys = res_agl->start;
p->agl_size = resource_size(res_agl);
+ p->agl_prt_ctl_phys = res_agl_prt_ctl->start;
+ p->agl_prt_ctl_size = resource_size(res_agl_prt_ctl);
if (!devm_request_mem_region(&pdev->dev, p->mix_phys, p->mix_size,
@@ -1181,10 +1508,18 @@ static int __devinit octeon_mgmt_probe(struct platform_device *pdev)
goto err;
}
+ if (!devm_request_mem_region(&pdev->dev, p->agl_prt_ctl_phys,
+ p->agl_prt_ctl_size, res_agl_prt_ctl->name)) {
+ result = -ENXIO;
+ dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
+ res_agl_prt_ctl->name);
+ goto err;
+ }
p->mix = (u64)devm_ioremap(&pdev->dev, p->mix_phys, p->mix_size);
p->agl = (u64)devm_ioremap(&pdev->dev, p->agl_phys, p->agl_size);
-
+ p->agl_prt_ctl = (u64)devm_ioremap(&pdev->dev, p->agl_prt_ctl_phys,
+ p->agl_prt_ctl_size);
spin_lock_init(&p->lock);
skb_queue_head_init(&p->tx_list);
@@ -1199,14 +1534,19 @@ static int __devinit octeon_mgmt_probe(struct platform_device *pdev)
mac = of_get_mac_address(pdev->dev.of_node);
- if (mac)
- memcpy(netdev->dev_addr, mac, 6);
+ if (mac && is_valid_ether_addr(mac)) {
+ memcpy(netdev->dev_addr, mac, ETH_ALEN);
+ netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
+ } else {
+ eth_hw_addr_random(netdev);
+ }
p->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
pdev->dev.coherent_dma_mask = DMA_BIT_MASK(64);
pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+ netif_carrier_off(netdev);
result = register_netdev(netdev);
if (result)
goto err;
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
index 9730241..5296cc8 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
@@ -26,6 +26,9 @@ if PCH_GBE
config PCH_PTP
bool "PCH PTP clock support"
default n
+ depends on EXPERIMENTAL
+ select PPS
+ select PTP_1588_CLOCK
select PTP_1588_CLOCK_PCH
---help---
Say Y here if you want to use Precision Time Protocol (PTP) in the
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 473ce13..24ad17e 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -1601,7 +1601,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->netdev = netdev;
adapter->pdev = pdev;
- if (qlcnic_alloc_adapter_resources(adapter))
+ err = qlcnic_alloc_adapter_resources(adapter);
+ if (err)
goto err_out_free_netdev;
adapter->dev_rst_time = jiffies;
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 995d0cf..1c81825 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -563,7 +563,7 @@ rx_next:
if (cpr16(IntrStatus) & cp_rx_intr_mask)
goto rx_status_loop;
- napi_gro_flush(napi);
+ napi_gro_flush(napi, false);
spin_lock_irqsave(&cp->lock, flags);
__napi_complete(napi);
cpw16_f(IntrMask, cp_intr_mask);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index bad8f2e..c8bfea0 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2438,6 +2438,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!rtsu) {
dev_err(&pdev->dev, "Not found TSU resource\n");
+ ret = -ENODEV;
goto out_release;
}
mdp->tsu_addr = ioremap(rtsu->start,
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index 5b3dd02..0767043f 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -640,8 +640,7 @@ static void efx_ptp_drop_time_expired_events(struct efx_nic *efx)
evt = list_entry(cursor, struct efx_ptp_event_rx,
link);
if (time_after(jiffies, evt->expiry)) {
- list_del(&evt->link);
- list_add(&evt->link, &ptp->evt_free_list);
+ list_move(&evt->link, &ptp->evt_free_list);
netif_warn(efx, hw, efx->net_dev,
"PTP rx event dropped\n");
}
@@ -684,8 +683,7 @@ static enum ptp_packet_state efx_ptp_match_rx(struct efx_nic *efx,
match->state = PTP_PACKET_STATE_MATCHED;
rc = PTP_PACKET_STATE_MATCHED;
- list_del(&evt->link);
- list_add(&evt->link, &ptp->evt_free_list);
+ list_move(&evt->link, &ptp->evt_free_list);
break;
}
}
@@ -820,8 +818,7 @@ static int efx_ptp_stop(struct efx_nic *efx)
/* Drop any pending receive events */
spin_lock_bh(&efx->ptp_data->evt_lock);
list_for_each_safe(cursor, next, &efx->ptp_data->evt_list) {
- list_del(cursor);
- list_add(cursor, &efx->ptp_data->evt_free_list);
+ list_move(cursor, &efx->ptp_data->evt_free_list);
}
spin_unlock_bh(&efx->ptp_data->evt_lock);
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index 203d9c6..fb9f6b3 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -478,8 +478,10 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev,
/* IO region. */
ioaddr = pci_iomap(pci_dev, 0, 0);
- if (!ioaddr)
+ if (!ioaddr) {
+ ret = -ENOMEM;
goto err_out_cleardev;
+ }
sis_priv = netdev_priv(net_dev);
sis_priv->ioaddr = ioaddr;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index e872e1d..7d51a65 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -50,7 +50,6 @@ struct stmmac_priv {
unsigned int dirty_rx;
struct sk_buff **rx_skbuff;
dma_addr_t *rx_skbuff_dma;
- struct sk_buff_head rx_recycle;
struct net_device *dev;
dma_addr_t dma_rx_phy;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 3be8833..c6cdbc4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -747,18 +747,7 @@ static void stmmac_tx(struct stmmac_priv *priv)
priv->hw->ring->clean_desc3(p);
if (likely(skb != NULL)) {
- /*
- * If there's room in the queue (limit it to size)
- * we add this skb back into the pool,
- * if it's the right size.
- */
- if ((skb_queue_len(&priv->rx_recycle) <
- priv->dma_rx_size) &&
- skb_recycle_check(skb, priv->dma_buf_sz))
- __skb_queue_head(&priv->rx_recycle, skb);
- else
- dev_kfree_skb(skb);
-
+ dev_kfree_skb(skb);
priv->tx_skbuff[entry] = NULL;
}
@@ -1169,7 +1158,6 @@ static int stmmac_open(struct net_device *dev)
priv->eee_enabled = stmmac_eee_init(priv);
napi_enable(&priv->napi);
- skb_queue_head_init(&priv->rx_recycle);
netif_start_queue(dev);
return 0;
@@ -1222,7 +1210,6 @@ static int stmmac_release(struct net_device *dev)
kfree(priv->tm);
#endif
napi_disable(&priv->napi);
- skb_queue_purge(&priv->rx_recycle);
/* Free the IRQ lines */
free_irq(dev->irq, dev);
@@ -1388,10 +1375,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
if (likely(priv->rx_skbuff[entry] == NULL)) {
struct sk_buff *skb;
- skb = __skb_dequeue(&priv->rx_recycle);
- if (skb == NULL)
- skb = netdev_alloc_skb_ip_align(priv->dev,
- bfsize);
+ skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
if (unlikely(skb == NULL))
break;
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 8419bf3..275b430 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -9788,6 +9788,7 @@ static int __devinit niu_pci_init_one(struct pci_dev *pdev,
if (!pci_is_pcie(pdev)) {
dev_err(&pdev->dev, "Cannot find PCI Express capability, aborting\n");
+ err = -ENODEV;
goto err_out_free_res;
}
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index 9ae12d0..6c8695e 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -2963,7 +2963,8 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
goto err_out_iounmap;
}
- if (gem_get_device_address(gp))
+ err = gem_get_device_address(gp);
+ if (err)
goto err_out_free_consistent;
dev->netdev_ops = &gem_netdev_ops;
diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c
index 30087ca..6e4d4b6 100644
--- a/drivers/net/irda/irtty-sir.c
+++ b/drivers/net/irda/irtty-sir.c
@@ -459,8 +459,10 @@ static int irtty_open(struct tty_struct *tty)
/* allocate private device info block */
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
+ if (!priv) {
+ ret = -ENOMEM;
goto out_put;
+ }
priv->magic = IRTTY_MAGIC;
priv->tty = tty;
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
index 1a00b59..f07c340 100644
--- a/drivers/net/irda/mcs7780.c
+++ b/drivers/net/irda/mcs7780.c
@@ -920,8 +920,10 @@ static int mcs_probe(struct usb_interface *intf,
ndev->netdev_ops = &mcs_netdev_ops;
- if (!intf->cur_altsetting)
+ if (!intf->cur_altsetting) {
+ ret = -ENOMEM;
goto error2;
+ }
ret = mcs_find_endpoints(mcs, intf->cur_altsetting->endpoint,
intf->cur_altsetting->desc.bNumEndpoints);
diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c
index 002a442..858de05 100644
--- a/drivers/net/irda/pxaficp_ir.c
+++ b/drivers/net/irda/pxaficp_ir.c
@@ -846,8 +846,10 @@ static int pxa_irda_probe(struct platform_device *pdev)
goto err_mem_2;
dev = alloc_irdadev(sizeof(struct pxa_irda));
- if (!dev)
+ if (!dev) {
+ err = -ENOMEM;
goto err_mem_3;
+ }
SET_NETDEV_DEV(dev, &pdev->dev);
si = netdev_priv(dev);
diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c
index e250675..42fde9e 100644
--- a/drivers/net/irda/sa1100_ir.c
+++ b/drivers/net/irda/sa1100_ir.c
@@ -940,8 +940,10 @@ static int sa1100_irda_probe(struct platform_device *pdev)
goto err_mem_3;
dev = alloc_irdadev(sizeof(struct sa1100_irda));
- if (!dev)
+ if (!dev) {
+ err = -ENOMEM;
goto err_mem_4;
+ }
SET_NETDEV_DEV(dev, &pdev->dev);
diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c
index eb315b8..4b746d9 100644
--- a/drivers/net/irda/sh_irda.c
+++ b/drivers/net/irda/sh_irda.c
@@ -808,8 +808,8 @@ static int __devinit sh_irda_probe(struct platform_device *pdev)
goto err_mem_4;
platform_set_drvdata(pdev, ndev);
-
- if (request_irq(irq, sh_irda_irq, IRQF_DISABLED, "sh_irda", self)) {
+ err = request_irq(irq, sh_irda_irq, IRQF_DISABLED, "sh_irda", self);
+ if (err) {
dev_warn(&pdev->dev, "Unable to attach sh_irda interrupt\n");
goto err_mem_4;
}
diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c
index 7951094..624ac19 100644
--- a/drivers/net/irda/sh_sir.c
+++ b/drivers/net/irda/sh_sir.c
@@ -741,6 +741,7 @@ static int __devinit sh_sir_probe(struct platform_device *pdev)
self->clk = clk_get(&pdev->dev, clk_name);
if (IS_ERR(self->clk)) {
dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name);
+ err = -ENODEV;
goto err_mem_3;
}
@@ -760,8 +761,8 @@ static int __devinit sh_sir_probe(struct platform_device *pdev)
goto err_mem_4;
platform_set_drvdata(pdev, ndev);
-
- if (request_irq(irq, sh_sir_irq, IRQF_DISABLED, "sh_sir", self)) {
+ err = request_irq(irq, sh_sir_irq, IRQF_DISABLED, "sh_sir", self);
+ if (err) {
dev_warn(&pdev->dev, "Unable to attach sh_sir interrupt\n");
goto err_mem_4;
}
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 170eb41..c1ef300 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -26,6 +26,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/of_device.h>
+#include <linux/of_mdio.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 51de9ed..8be9bf0 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -28,7 +28,6 @@
#include <linux/igmp.h>
#include <linux/etherdevice.h>
#include <linux/if_ether.h>
-#include <linux/version.h>
#include <linux/hash.h>
#include <net/ip.h>
#include <net/icmp.h>
@@ -1084,13 +1083,13 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
if (nla_put_u32(skb, IFLA_VXLAN_ID, vxlan->vni))
goto nla_put_failure;
- if (vxlan->gaddr && nla_put_u32(skb, IFLA_VXLAN_GROUP, vxlan->gaddr))
+ if (vxlan->gaddr && nla_put_be32(skb, IFLA_VXLAN_GROUP, vxlan->gaddr))
goto nla_put_failure;
if (vxlan->link && nla_put_u32(skb, IFLA_VXLAN_LINK, vxlan->link))
goto nla_put_failure;
- if (vxlan->saddr && nla_put_u32(skb, IFLA_VXLAN_LOCAL, vxlan->saddr))
+ if (vxlan->saddr && nla_put_be32(skb, IFLA_VXLAN_LOCAL, vxlan->saddr))
goto nla_put_failure;
if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->ttl) ||
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 1a62318..b627132 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -597,7 +597,7 @@ fst_q_work_item(u64 * queue, int card_index)
* bottom half for the card. Note the limitation of 64 cards.
* That ought to be enough
*/
- mask = 1 << card_index;
+ mask = (u64)1 << card_index;
*queue |= mask;
spin_unlock_irqrestore(&fst_work_q_lock, flags);
}
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
index f34b5b2..d93b2b6 100644
--- a/drivers/oprofile/buffer_sync.c
+++ b/drivers/oprofile/buffer_sync.c
@@ -216,7 +216,7 @@ static inline unsigned long fast_get_dcookie(struct path *path)
}
-/* Look up the dcookie for the task's first VM_EXECUTABLE mapping,
+/* Look up the dcookie for the task's mm->exe_file,
* which corresponds loosely to "application name". This is
* not strictly necessary but allows oprofile to associate
* shared-library samples with particular applications
@@ -224,21 +224,10 @@ static inline unsigned long fast_get_dcookie(struct path *path)
static unsigned long get_exec_dcookie(struct mm_struct *mm)
{
unsigned long cookie = NO_COOKIE;
- struct vm_area_struct *vma;
-
- if (!mm)
- goto out;
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
- if (!vma->vm_file)
- continue;
- if (!(vma->vm_flags & VM_EXECUTABLE))
- continue;
- cookie = fast_get_dcookie(&vma->vm_file->f_path);
- break;
- }
+ if (mm && mm->exe_file)
+ cookie = fast_get_dcookie(&mm->exe_file->f_path);
-out:
return cookie;
}
diff --git a/drivers/parport/Kconfig b/drivers/parport/Kconfig
index d92185a..4b6e4e7 100644
--- a/drivers/parport/Kconfig
+++ b/drivers/parport/Kconfig
@@ -36,7 +36,7 @@ if PARPORT
config PARPORT_PC
tristate "PC-style hardware"
depends on (!SPARC64 || PCI) && !SPARC32 && !M32R && !FRV && \
- (!M68K || ISA) && !MN10300 && !AVR32 && !BLACKFIN
+ (!M68K || ISA) && !MN10300 && !AVR32 && !BLACKFIN && !XTENSA
---help---
You should say Y here if you have a PC-style parallel port. All
IBM PC compatible computers and some Alphas have PC-style
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index 1ef6e1e..7bf914d 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -68,10 +68,21 @@ config PINCTRL_IMX6Q
help
Say Y here to enable the imx6q pinctrl driver
+config PINCTRL_LANTIQ
+ bool
+ depends on LANTIQ
+ select PINMUX
+ select PINCONF
+
config PINCTRL_PXA3xx
bool
select PINMUX
+config PINCTRL_FALCON
+ bool
+ depends on SOC_FALCON
+ depends on PINCTRL_LANTIQ
+
config PINCTRL_MMP2
bool "MMP2 pin controller driver"
depends on ARCH_MMP
@@ -175,8 +186,35 @@ config PINCTRL_EXYNOS4
bool "Pinctrl driver data for Exynos4 SoC"
select PINCTRL_SAMSUNG
+config PINCTRL_MVEBU
+ bool
+ depends on ARCH_MVEBU
+ select PINMUX
+ select PINCONF
+
+config PINCTRL_DOVE
+ bool
+ select PINCTRL_MVEBU
+
+config PINCTRL_KIRKWOOD
+ bool
+ select PINCTRL_MVEBU
+
+config PINCTRL_ARMADA_370
+ bool
+ select PINCTRL_MVEBU
+
+config PINCTRL_ARMADA_XP
+ bool
+ select PINCTRL_MVEBU
+
source "drivers/pinctrl/spear/Kconfig"
+config PINCTRL_XWAY
+ bool
+ depends on SOC_TYPE_XWAY
+ depends on PINCTRL_LANTIQ
+
endmenu
endif
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index 698527d..f395ba5 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_PINCTRL_IMX51) += pinctrl-imx51.o
obj-$(CONFIG_PINCTRL_IMX53) += pinctrl-imx53.o
obj-$(CONFIG_PINCTRL_IMX6Q) += pinctrl-imx6q.o
obj-$(CONFIG_PINCTRL_PXA3xx) += pinctrl-pxa3xx.o
+obj-$(CONFIG_PINCTRL_FALCON) += pinctrl-falcon.o
obj-$(CONFIG_PINCTRL_MMP2) += pinctrl-mmp2.o
obj-$(CONFIG_PINCTRL_MXS) += pinctrl-mxs.o
obj-$(CONFIG_PINCTRL_IMX23) += pinctrl-imx23.o
@@ -35,5 +36,12 @@ obj-$(CONFIG_PINCTRL_U300) += pinctrl-u300.o
obj-$(CONFIG_PINCTRL_COH901) += pinctrl-coh901.o
obj-$(CONFIG_PINCTRL_SAMSUNG) += pinctrl-samsung.o
obj-$(CONFIG_PINCTRL_EXYNOS4) += pinctrl-exynos.o
+obj-$(CONFIG_PINCTRL_MVEBU) += pinctrl-mvebu.o
+obj-$(CONFIG_PINCTRL_DOVE) += pinctrl-dove.o
+obj-$(CONFIG_PINCTRL_KIRKWOOD) += pinctrl-kirkwood.o
+obj-$(CONFIG_PINCTRL_ARMADA_370) += pinctrl-armada-370.o
+obj-$(CONFIG_PINCTRL_ARMADA_XP) += pinctrl-armada-xp.o
+obj-$(CONFIG_PINCTRL_XWAY) += pinctrl-xway.o
+obj-$(CONFIG_PINCTRL_LANTIQ) += pinctrl-lantiq.o
obj-$(CONFIG_PLAT_SPEAR) += spear/
diff --git a/drivers/pinctrl/pinctrl-armada-370.c b/drivers/pinctrl/pinctrl-armada-370.c
new file mode 100644
index 0000000..c907647
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-armada-370.c
@@ -0,0 +1,421 @@
+/*
+ * Marvell Armada 370 pinctrl driver based on mvebu pinctrl core
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-mvebu.h"
+
+static struct mvebu_mpp_mode mv88f6710_mpp_modes[] = {
+ MPP_MODE(0,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "uart0", "rxd")),
+ MPP_MODE(1,
+ MPP_FUNCTION(0x0, "gpo", NULL),
+ MPP_FUNCTION(0x1, "uart0", "txd")),
+ MPP_MODE(2,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "i2c0", "sck"),
+ MPP_FUNCTION(0x2, "uart0", "txd")),
+ MPP_MODE(3,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "i2c0", "sda"),
+ MPP_FUNCTION(0x2, "uart0", "rxd")),
+ MPP_MODE(4,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "cpu_pd", "vdd")),
+ MPP_MODE(5,
+ MPP_FUNCTION(0x0, "gpo", NULL),
+ MPP_FUNCTION(0x1, "ge0", "txclko"),
+ MPP_FUNCTION(0x2, "uart1", "txd"),
+ MPP_FUNCTION(0x4, "spi1", "clk"),
+ MPP_FUNCTION(0x5, "audio", "mclk")),
+ MPP_MODE(6,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "ge0", "txd0"),
+ MPP_FUNCTION(0x2, "sata0", "prsnt"),
+ MPP_FUNCTION(0x4, "tdm", "rst"),
+ MPP_FUNCTION(0x5, "audio", "sdo")),
+ MPP_MODE(7,
+ MPP_FUNCTION(0x0, "gpo", NULL),
+ MPP_FUNCTION(0x1, "ge0", "txd1"),
+ MPP_FUNCTION(0x4, "tdm", "tdx"),
+ MPP_FUNCTION(0x5, "audio", "lrclk")),
+ MPP_MODE(8,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "ge0", "txd2"),
+ MPP_FUNCTION(0x2, "uart0", "rts"),
+ MPP_FUNCTION(0x4, "tdm", "drx"),
+ MPP_FUNCTION(0x5, "audio", "bclk")),
+ MPP_MODE(9,
+ MPP_FUNCTION(0x0, "gpo", NULL),
+ MPP_FUNCTION(0x1, "ge0", "txd3"),
+ MPP_FUNCTION(0x2, "uart1", "txd"),
+ MPP_FUNCTION(0x3, "sd0", "clk"),
+ MPP_FUNCTION(0x5, "audio", "spdifo")),
+ MPP_MODE(10,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "ge0", "txctl"),
+ MPP_FUNCTION(0x2, "uart0", "cts"),
+ MPP_FUNCTION(0x4, "tdm", "fsync"),
+ MPP_FUNCTION(0x5, "audio", "sdi")),
+ MPP_MODE(11,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "ge0", "rxd0"),
+ MPP_FUNCTION(0x2, "uart1", "rxd"),
+ MPP_FUNCTION(0x3, "sd0", "cmd"),
+ MPP_FUNCTION(0x4, "spi0", "cs1"),
+ MPP_FUNCTION(0x5, "sata1", "prsnt"),
+ MPP_FUNCTION(0x6, "spi1", "cs1")),
+ MPP_MODE(12,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "ge0", "rxd1"),
+ MPP_FUNCTION(0x2, "i2c1", "sda"),
+ MPP_FUNCTION(0x3, "sd0", "d0"),
+ MPP_FUNCTION(0x4, "spi1", "cs0"),
+ MPP_FUNCTION(0x5, "audio", "spdifi")),
+ MPP_MODE(13,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "ge0", "rxd2"),
+ MPP_FUNCTION(0x2, "i2c1", "sck"),
+ MPP_FUNCTION(0x3, "sd0", "d1"),
+ MPP_FUNCTION(0x4, "tdm", "pclk"),
+ MPP_FUNCTION(0x5, "audio", "rmclk")),
+ MPP_MODE(14,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "ge0", "rxd3"),
+ MPP_FUNCTION(0x2, "pcie", "clkreq0"),
+ MPP_FUNCTION(0x3, "sd0", "d2"),
+ MPP_FUNCTION(0x4, "spi1", "mosi"),
+ MPP_FUNCTION(0x5, "spi0", "cs2")),
+ MPP_MODE(15,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "ge0", "rxctl"),
+ MPP_FUNCTION(0x2, "pcie", "clkreq1"),
+ MPP_FUNCTION(0x3, "sd0", "d3"),
+ MPP_FUNCTION(0x4, "spi1", "miso"),
+ MPP_FUNCTION(0x5, "spi0", "cs3")),
+ MPP_MODE(16,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "ge0", "rxclk"),
+ MPP_FUNCTION(0x2, "uart1", "rxd"),
+ MPP_FUNCTION(0x4, "tdm", "int"),
+ MPP_FUNCTION(0x5, "audio", "extclk")),
+ MPP_MODE(17,
+ MPP_FUNCTION(0x0, "gpo", NULL),
+ MPP_FUNCTION(0x1, "ge", "mdc")),
+ MPP_MODE(18,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "ge", "mdio")),
+ MPP_MODE(19,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "ge0", "txclk"),
+ MPP_FUNCTION(0x2, "ge1", "txclkout"),
+ MPP_FUNCTION(0x4, "tdm", "pclk")),
+ MPP_MODE(20,
+ MPP_FUNCTION(0x0, "gpo", NULL),
+ MPP_FUNCTION(0x1, "ge0", "txd4"),
+ MPP_FUNCTION(0x2, "ge1", "txd0")),
+ MPP_MODE(21,
+ MPP_FUNCTION(0x0, "gpo", NULL),
+ MPP_FUNCTION(0x1, "ge0", "txd5"),
+ MPP_FUNCTION(0x2, "ge1", "txd1"),
+ MPP_FUNCTION(0x4, "uart1", "txd")),
+ MPP_MODE(22,
+ MPP_FUNCTION(0x0, "gpo", NULL),
+ MPP_FUNCTION(0x1, "ge0", "txd6"),
+ MPP_FUNCTION(0x2, "ge1", "txd2"),
+ MPP_FUNCTION(0x4, "uart0", "rts")),
+ MPP_MODE(23,
+ MPP_FUNCTION(0x0, "gpo", NULL),
+ MPP_FUNCTION(0x1, "ge0", "txd7"),
+ MPP_FUNCTION(0x2, "ge1", "txd3"),
+ MPP_FUNCTION(0x4, "spi1", "mosi")),
+ MPP_MODE(24,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "ge0", "col"),
+ MPP_FUNCTION(0x2, "ge1", "txctl"),
+ MPP_FUNCTION(0x4, "spi1", "cs0")),
+ MPP_MODE(25,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "ge0", "rxerr"),
+ MPP_FUNCTION(0x2, "ge1", "rxd0"),
+ MPP_FUNCTION(0x4, "uart1", "rxd")),
+ MPP_MODE(26,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "ge0", "crs"),
+ MPP_FUNCTION(0x2, "ge1", "rxd1"),
+ MPP_FUNCTION(0x4, "spi1", "miso")),
+ MPP_MODE(27,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "ge0", "rxd4"),
+ MPP_FUNCTION(0x2, "ge1", "rxd2"),
+ MPP_FUNCTION(0x4, "uart0", "cts")),
+ MPP_MODE(28,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "ge0", "rxd5"),
+ MPP_FUNCTION(0x2, "ge1", "rxd3")),
+ MPP_MODE(29,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "ge0", "rxd6"),
+ MPP_FUNCTION(0x2, "ge1", "rxctl"),
+ MPP_FUNCTION(0x4, "i2c1", "sda")),
+ MPP_MODE(30,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "ge0", "rxd7"),
+ MPP_FUNCTION(0x2, "ge1", "rxclk"),
+ MPP_FUNCTION(0x4, "i2c1", "sck")),
+ MPP_MODE(31,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x3, "tclk", NULL),
+ MPP_FUNCTION(0x4, "ge0", "txerr")),
+ MPP_MODE(32,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "spi0", "cs0")),
+ MPP_MODE(33,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "dev", "bootcs"),
+ MPP_FUNCTION(0x2, "spi0", "cs0")),
+ MPP_MODE(34,
+ MPP_FUNCTION(0x0, "gpo", NULL),
+ MPP_FUNCTION(0x1, "dev", "wen0"),
+ MPP_FUNCTION(0x2, "spi0", "mosi")),
+ MPP_MODE(35,
+ MPP_FUNCTION(0x0, "gpo", NULL),
+ MPP_FUNCTION(0x1, "dev", "oen"),
+ MPP_FUNCTION(0x2, "spi0", "sck")),
+ MPP_MODE(36,
+ MPP_FUNCTION(0x0, "gpo", NULL),
+ MPP_FUNCTION(0x1, "dev", "a1"),
+ MPP_FUNCTION(0x2, "spi0", "miso")),
+ MPP_MODE(37,
+ MPP_FUNCTION(0x0, "gpo", NULL),
+ MPP_FUNCTION(0x1, "dev", "a0"),
+ MPP_FUNCTION(0x2, "sata0", "prsnt")),
+ MPP_MODE(38,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "dev", "ready"),
+ MPP_FUNCTION(0x2, "uart1", "cts"),
+ MPP_FUNCTION(0x3, "uart0", "cts")),
+ MPP_MODE(39,
+ MPP_FUNCTION(0x0, "gpo", NULL),
+ MPP_FUNCTION(0x1, "dev", "ad0"),
+ MPP_FUNCTION(0x2, "audio", "spdifo")),
+ MPP_MODE(40,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "dev", "ad1"),
+ MPP_FUNCTION(0x2, "uart1", "rts"),
+ MPP_FUNCTION(0x3, "uart0", "rts")),
+ MPP_MODE(41,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "dev", "ad2"),
+ MPP_FUNCTION(0x2, "uart1", "rxd")),
+ MPP_MODE(42,
+ MPP_FUNCTION(0x0, "gpo", NULL),
+ MPP_FUNCTION(0x1, "dev", "ad3"),
+ MPP_FUNCTION(0x2, "uart1", "txd")),
+ MPP_MODE(43,
+ MPP_FUNCTION(0x0, "gpo", NULL),
+ MPP_FUNCTION(0x1, "dev", "ad4"),
+ MPP_FUNCTION(0x2, "audio", "bclk")),
+ MPP_MODE(44,
+ MPP_FUNCTION(0x0, "gpo", NULL),
+ MPP_FUNCTION(0x1, "dev", "ad5"),
+ MPP_FUNCTION(0x2, "audio", "mclk")),
+ MPP_MODE(45,
+ MPP_FUNCTION(0x0, "gpo", NULL),
+ MPP_FUNCTION(0x1, "dev", "ad6"),
+ MPP_FUNCTION(0x2, "audio", "lrclk")),
+ MPP_MODE(46,
+ MPP_FUNCTION(0x0, "gpo", NULL),
+ MPP_FUNCTION(0x1, "dev", "ad7"),
+ MPP_FUNCTION(0x2, "audio", "sdo")),
+ MPP_MODE(47,
+ MPP_FUNCTION(0x0, "gpo", NULL),
+ MPP_FUNCTION(0x1, "dev", "ad8"),
+ MPP_FUNCTION(0x3, "sd0", "clk"),
+ MPP_FUNCTION(0x5, "audio", "spdifo")),
+ MPP_MODE(48,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "dev", "ad9"),
+ MPP_FUNCTION(0x2, "uart0", "rts"),
+ MPP_FUNCTION(0x3, "sd0", "cmd"),
+ MPP_FUNCTION(0x4, "sata1", "prsnt"),
+ MPP_FUNCTION(0x5, "spi0", "cs1")),
+ MPP_MODE(49,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "dev", "ad10"),
+ MPP_FUNCTION(0x2, "pcie", "clkreq1"),
+ MPP_FUNCTION(0x3, "sd0", "d0"),
+ MPP_FUNCTION(0x4, "spi1", "cs0"),
+ MPP_FUNCTION(0x5, "audio", "spdifi")),
+ MPP_MODE(50,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "dev", "ad11"),
+ MPP_FUNCTION(0x2, "uart0", "cts"),
+ MPP_FUNCTION(0x3, "sd0", "d1"),
+ MPP_FUNCTION(0x4, "spi1", "miso"),
+ MPP_FUNCTION(0x5, "audio", "rmclk")),
+ MPP_MODE(51,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "dev", "ad12"),
+ MPP_FUNCTION(0x2, "i2c1", "sda"),
+ MPP_FUNCTION(0x3, "sd0", "d2"),
+ MPP_FUNCTION(0x4, "spi1", "mosi")),
+ MPP_MODE(52,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "dev", "ad13"),
+ MPP_FUNCTION(0x2, "i2c1", "sck"),
+ MPP_FUNCTION(0x3, "sd0", "d3"),
+ MPP_FUNCTION(0x4, "spi1", "sck")),
+ MPP_MODE(53,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "dev", "ad14"),
+ MPP_FUNCTION(0x2, "sd0", "clk"),
+ MPP_FUNCTION(0x3, "tdm", "pclk"),
+ MPP_FUNCTION(0x4, "spi0", "cs2"),
+ MPP_FUNCTION(0x5, "pcie", "clkreq1")),
+ MPP_MODE(54,
+ MPP_FUNCTION(0x0, "gpo", NULL),
+ MPP_FUNCTION(0x1, "dev", "ad15"),
+ MPP_FUNCTION(0x3, "tdm", "dtx")),
+ MPP_MODE(55,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "dev", "cs1"),
+ MPP_FUNCTION(0x2, "uart1", "txd"),
+ MPP_FUNCTION(0x3, "tdm", "rst"),
+ MPP_FUNCTION(0x4, "sata1", "prsnt"),
+ MPP_FUNCTION(0x5, "sata0", "prsnt")),
+ MPP_MODE(56,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "dev", "cs2"),
+ MPP_FUNCTION(0x2, "uart1", "cts"),
+ MPP_FUNCTION(0x3, "uart0", "cts"),
+ MPP_FUNCTION(0x4, "spi0", "cs3"),
+ MPP_FUNCTION(0x5, "pcie", "clkreq0"),
+ MPP_FUNCTION(0x6, "spi1", "cs1")),
+ MPP_MODE(57,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "dev", "cs3"),
+ MPP_FUNCTION(0x2, "uart1", "rxd"),
+ MPP_FUNCTION(0x3, "tdm", "fsync"),
+ MPP_FUNCTION(0x4, "sata0", "prsnt"),
+ MPP_FUNCTION(0x5, "audio", "sdo")),
+ MPP_MODE(58,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "dev", "cs0"),
+ MPP_FUNCTION(0x2, "uart1", "rts"),
+ MPP_FUNCTION(0x3, "tdm", "int"),
+ MPP_FUNCTION(0x5, "audio", "extclk"),
+ MPP_FUNCTION(0x6, "uart0", "rts")),
+ MPP_MODE(59,
+ MPP_FUNCTION(0x0, "gpo", NULL),
+ MPP_FUNCTION(0x1, "dev", "ale0"),
+ MPP_FUNCTION(0x2, "uart1", "rts"),
+ MPP_FUNCTION(0x3, "uart0", "rts"),
+ MPP_FUNCTION(0x5, "audio", "bclk")),
+ MPP_MODE(60,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "dev", "ale1"),
+ MPP_FUNCTION(0x2, "uart1", "rxd"),
+ MPP_FUNCTION(0x3, "sata0", "prsnt"),
+ MPP_FUNCTION(0x4, "pcie", "rst-out"),
+ MPP_FUNCTION(0x5, "audio", "sdi")),
+ MPP_MODE(61,
+ MPP_FUNCTION(0x0, "gpo", NULL),
+ MPP_FUNCTION(0x1, "dev", "wen1"),
+ MPP_FUNCTION(0x2, "uart1", "txd"),
+ MPP_FUNCTION(0x5, "audio", "rclk")),
+ MPP_MODE(62,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "dev", "a2"),
+ MPP_FUNCTION(0x2, "uart1", "cts"),
+ MPP_FUNCTION(0x3, "tdm", "drx"),
+ MPP_FUNCTION(0x4, "pcie", "clkreq0"),
+ MPP_FUNCTION(0x5, "audio", "mclk"),
+ MPP_FUNCTION(0x6, "uart0", "cts")),
+ MPP_MODE(63,
+ MPP_FUNCTION(0x0, "gpo", NULL),
+ MPP_FUNCTION(0x1, "spi0", "sck"),
+ MPP_FUNCTION(0x2, "tclk", NULL)),
+ MPP_MODE(64,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "spi0", "miso"),
+ MPP_FUNCTION(0x2, "spi0-1", "cs1")),
+ MPP_MODE(65,
+ MPP_FUNCTION(0x0, "gpio", NULL),
+ MPP_FUNCTION(0x1, "spi0", "mosi"),
+ MPP_FUNCTION(0x2, "spi0-1", "cs2")),
+};
+
+static struct mvebu_pinctrl_soc_info armada_370_pinctrl_info;
+
+static struct of_device_id armada_370_pinctrl_of_match[] __devinitdata = {
+ { .compatible = "marvell,mv88f6710-pinctrl" },
+ { },
+};
+
+static struct mvebu_mpp_ctrl mv88f6710_mpp_controls[] = {
+ MPP_REG_CTRL(0, 65),
+};
+
+static struct pinctrl_gpio_range mv88f6710_mpp_gpio_ranges[] = {
+ MPP_GPIO_RANGE(0, 0, 0, 32),
+ MPP_GPIO_RANGE(1, 32, 32, 32),
+ MPP_GPIO_RANGE(2, 64, 64, 2),
+};
+
+static int __devinit armada_370_pinctrl_probe(struct platform_device *pdev)
+{
+ struct mvebu_pinctrl_soc_info *soc = &armada_370_pinctrl_info;
+
+ soc->variant = 0; /* no variants for Armada 370 */
+ soc->controls = mv88f6710_mpp_controls;
+ soc->ncontrols = ARRAY_SIZE(mv88f6710_mpp_controls);
+ soc->modes = mv88f6710_mpp_modes;
+ soc->nmodes = ARRAY_SIZE(mv88f6710_mpp_modes);
+ soc->gpioranges = mv88f6710_mpp_gpio_ranges;
+ soc->ngpioranges = ARRAY_SIZE(mv88f6710_mpp_gpio_ranges);
+
+ pdev->dev.platform_data = soc;
+
+ return mvebu_pinctrl_probe(pdev);
+}
+
+static int __devexit armada_370_pinctrl_remove(struct platform_device *pdev)
+{
+ return mvebu_pinctrl_remove(pdev);
+}
+
+static struct platform_driver armada_370_pinctrl_driver = {
+ .driver = {
+ .name = "armada-370-pinctrl",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(armada_370_pinctrl_of_match),
+ },
+ .probe = armada_370_pinctrl_probe,
+ .remove = __devexit_p(armada_370_pinctrl_remove),
+};
+
+module_platform_driver(armada_370_pinctrl_driver);
+
+MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
+MODULE_DESCRIPTION("Marvell Armada 370 pinctrl driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/pinctrl-armada-xp.c b/drivers/pinctrl/pinctrl-armada-xp.c
new file mode 100644
index 0000000..40bd52a
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-armada-xp.c
@@ -0,0 +1,468 @@
+/*
+ * Marvell Armada XP pinctrl driver based on mvebu pinctrl core
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This file supports the three variants of Armada XP SoCs that are
+ * available: mv78230, mv78260 and mv78460. From a pin muxing
+ * perspective, the mv78230 has 49 MPP pins. The mv78260 and mv78460
+ * both have 67 MPP pins (more GPIOs and address lines for the memory
+ * bus mainly). The only difference between the mv78260 and the
+ * mv78460 in terms of pin muxing is the addition of two functions on
+ * pins 43 and 56 to access the VDD of the CPU2 and 3 (mv78260 has two
+ * cores, mv78460 has four cores).
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/bitops.h>
+
+#include "pinctrl-mvebu.h"
+
+enum armada_xp_variant {
+ V_MV78230 = BIT(0),
+ V_MV78260 = BIT(1),
+ V_MV78460 = BIT(2),
+ V_MV78230_PLUS = (V_MV78230 | V_MV78260 | V_MV78460),
+ V_MV78260_PLUS = (V_MV78260 | V_MV78460),
+};
+
+static struct mvebu_mpp_mode armada_xp_mpp_modes[] = {
+ MPP_MODE(0,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x1, "ge0", "txclko", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x4, "lcd", "d0", V_MV78230_PLUS)),
+ MPP_MODE(1,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x1, "ge0", "txd0", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x4, "lcd", "d1", V_MV78230_PLUS)),
+ MPP_MODE(2,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x1, "ge0", "txd1", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x4, "lcd", "d2", V_MV78230_PLUS)),
+ MPP_MODE(3,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x1, "ge0", "txd2", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x4, "lcd", "d3", V_MV78230_PLUS)),
+ MPP_MODE(4,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x1, "ge0", "txd3", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x4, "lcd", "d4", V_MV78230_PLUS)),
+ MPP_MODE(5,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x1, "ge0", "txctl", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x4, "lcd", "d5", V_MV78230_PLUS)),
+ MPP_MODE(6,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x1, "ge0", "rxd0", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x4, "lcd", "d6", V_MV78230_PLUS)),
+ MPP_MODE(7,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x1, "ge0", "rxd1", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x4, "lcd", "d7", V_MV78230_PLUS)),
+ MPP_MODE(8,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x1, "ge0", "rxd2", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x4, "lcd", "d8", V_MV78230_PLUS)),
+ MPP_MODE(9,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x1, "ge0", "rxd3", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x4, "lcd", "d9", V_MV78230_PLUS)),
+ MPP_MODE(10,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x1, "ge0", "rxctl", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x4, "lcd", "d10", V_MV78230_PLUS)),
+ MPP_MODE(11,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x1, "ge0", "rxclk", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x4, "lcd", "d11", V_MV78230_PLUS)),
+ MPP_MODE(12,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x1, "ge0", "txd4", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x2, "ge1", "clkout", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x4, "lcd", "d12", V_MV78230_PLUS)),
+ MPP_MODE(13,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x1, "ge0", "txd5", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x2, "ge1", "txd0", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x4, "lcd", "d13", V_MV78230_PLUS)),
+ MPP_MODE(14,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x1, "ge0", "txd6", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x2, "ge1", "txd1", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x4, "lcd", "d14", V_MV78230_PLUS)),
+ MPP_MODE(15,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x1, "ge0", "txd7", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x2, "ge1", "txd2", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x4, "lcd", "d15", V_MV78230_PLUS)),
+ MPP_MODE(16,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x1, "ge0", "txclk", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x2, "ge1", "txd3", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x4, "lcd", "d16", V_MV78230_PLUS)),
+ MPP_MODE(17,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x1, "ge0", "col", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x2, "ge1", "txctl", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x4, "lcd", "d17", V_MV78230_PLUS)),
+ MPP_MODE(18,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x1, "ge0", "rxerr", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x2, "ge1", "rxd0", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x3, "ptp", "trig", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x4, "lcd", "d18", V_MV78230_PLUS)),
+ MPP_MODE(19,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x1, "ge0", "crs", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x2, "ge1", "rxd1", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x3, "ptp", "evreq", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x4, "lcd", "d19", V_MV78230_PLUS)),
+ MPP_MODE(20,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x1, "ge0", "rxd4", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x2, "ge1", "rxd2", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x3, "ptp", "clk", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x4, "lcd", "d20", V_MV78230_PLUS)),
+ MPP_MODE(21,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x1, "ge0", "rxd5", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x2, "ge1", "rxd3", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x3, "mem", "bat", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x4, "lcd", "d21", V_MV78230_PLUS)),
+ MPP_MODE(22,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x1, "ge0", "rxd6", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x2, "ge1", "rxctl", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x3, "sata0", "prsnt", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x4, "lcd", "d22", V_MV78230_PLUS)),
+ MPP_MODE(23,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x1, "ge0", "rxd7", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x2, "ge1", "rxclk", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x3, "sata1", "prsnt", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x4, "lcd", "d23", V_MV78230_PLUS)),
+ MPP_MODE(24,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x1, "sata1", "prsnt", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x2, "nf", "bootcs-re", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x3, "tdm", "rst", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x4, "lcd", "hsync", V_MV78230_PLUS)),
+ MPP_MODE(25,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x1, "sata0", "prsnt", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x2, "nf", "bootcs-we", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x3, "tdm", "pclk", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x4, "lcd", "vsync", V_MV78230_PLUS)),
+ MPP_MODE(26,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x3, "tdm", "fsync", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x4, "lcd", "clk", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x5, "vdd", "cpu1-pd", V_MV78230_PLUS)),
+ MPP_MODE(27,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x1, "ptp", "trig", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x3, "tdm", "dtx", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x4, "lcd", "e", V_MV78230_PLUS)),
+ MPP_MODE(28,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x1, "ptp", "evreq", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x3, "tdm", "drx", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x4, "lcd", "pwm", V_MV78230_PLUS)),
+ MPP_MODE(29,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x1, "ptp", "clk", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x3, "tdm", "int0", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x4, "lcd", "ref-clk", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x5, "vdd", "cpu0-pd", V_MV78230_PLUS)),
+ MPP_MODE(30,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x1, "sd0", "clk", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x3, "tdm", "int1", V_MV78230_PLUS)),
+ MPP_MODE(31,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x1, "sd0", "cmd", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x3, "tdm", "int2", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x5, "vdd", "cpu0-pd", V_MV78230_PLUS)),
+ MPP_MODE(32,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x1, "sd0", "d0", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x3, "tdm", "int3", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x5, "vdd", "cpu1-pd", V_MV78230_PLUS)),
+ MPP_MODE(33,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x1, "sd0", "d1", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x3, "tdm", "int4", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x4, "mem", "bat", V_MV78230_PLUS)),
+ MPP_MODE(34,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x1, "sd0", "d2", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x2, "sata0", "prsnt", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x3, "tdm", "int5", V_MV78230_PLUS)),
+ MPP_MODE(35,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x1, "sd0", "d3", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x2, "sata1", "prsnt", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x3, "tdm", "int6", V_MV78230_PLUS)),
+ MPP_MODE(36,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x1, "spi", "mosi", V_MV78230_PLUS)),
+ MPP_MODE(37,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x1, "spi", "miso", V_MV78230_PLUS)),
+ MPP_MODE(38,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x1, "spi", "sck", V_MV78230_PLUS)),
+ MPP_MODE(39,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x1, "spi", "cs0", V_MV78230_PLUS)),
+ MPP_MODE(40,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x1, "spi", "cs1", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x2, "uart2", "cts", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x3, "vdd", "cpu1-pd", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x4, "lcd", "vga-hsync", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x5, "pcie", "clkreq0", V_MV78230_PLUS)),
+ MPP_MODE(41,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x1, "spi", "cs2", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x2, "uart2", "rts", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x3, "sata1", "prsnt", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x4, "lcd", "vga-vsync", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x5, "pcie", "clkreq1", V_MV78230_PLUS)),
+ MPP_MODE(42,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x1, "uart2", "rxd", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x2, "uart0", "cts", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x3, "tdm", "int7", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x4, "tdm-1", "timer", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x5, "vdd", "cpu0-pd", V_MV78230_PLUS)),
+ MPP_MODE(43,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x1, "uart2", "txd", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x2, "uart0", "rts", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x3, "spi", "cs3", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x4, "pcie", "rstout", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x5, "vdd", "cpu2-3-pd", V_MV78460)),
+ MPP_MODE(44,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x1, "uart2", "cts", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x2, "uart3", "rxd", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x3, "spi", "cs4", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x4, "mem", "bat", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x5, "pcie", "clkreq2", V_MV78230_PLUS)),
+ MPP_MODE(45,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x1, "uart2", "rts", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x2, "uart3", "txd", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x3, "spi", "cs5", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x4, "sata1", "prsnt", V_MV78230_PLUS)),
+ MPP_MODE(46,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x1, "uart3", "rts", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x2, "uart1", "rts", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x3, "spi", "cs6", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x4, "sata0", "prsnt", V_MV78230_PLUS)),
+ MPP_MODE(47,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x1, "uart3", "cts", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x2, "uart1", "cts", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x3, "spi", "cs7", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x4, "ref", "clkout", V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x5, "pcie", "clkreq3", V_MV78230_PLUS)),
+ MPP_MODE(48,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x1, "tclk", NULL, V_MV78230_PLUS),
+ MPP_VAR_FUNCTION(0x2, "dev", "burst/last", V_MV78230_PLUS)),
+ MPP_MODE(49,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78260_PLUS),
+ MPP_VAR_FUNCTION(0x1, "dev", "we3", V_MV78260_PLUS)),
+ MPP_MODE(50,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78260_PLUS),
+ MPP_VAR_FUNCTION(0x1, "dev", "we2", V_MV78260_PLUS)),
+ MPP_MODE(51,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78260_PLUS),
+ MPP_VAR_FUNCTION(0x1, "dev", "ad16", V_MV78260_PLUS)),
+ MPP_MODE(52,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78260_PLUS),
+ MPP_VAR_FUNCTION(0x1, "dev", "ad17", V_MV78260_PLUS)),
+ MPP_MODE(53,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78260_PLUS),
+ MPP_VAR_FUNCTION(0x1, "dev", "ad18", V_MV78260_PLUS)),
+ MPP_MODE(54,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78260_PLUS),
+ MPP_VAR_FUNCTION(0x1, "dev", "ad19", V_MV78260_PLUS)),
+ MPP_MODE(55,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78260_PLUS),
+ MPP_VAR_FUNCTION(0x1, "dev", "ad20", V_MV78260_PLUS),
+ MPP_VAR_FUNCTION(0x2, "vdd", "cpu0-pd", V_MV78260_PLUS)),
+ MPP_MODE(56,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78260_PLUS),
+ MPP_VAR_FUNCTION(0x1, "dev", "ad21", V_MV78260_PLUS),
+ MPP_VAR_FUNCTION(0x2, "vdd", "cpu1-pd", V_MV78260_PLUS)),
+ MPP_MODE(57,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78260_PLUS),
+ MPP_VAR_FUNCTION(0x1, "dev", "ad22", V_MV78260_PLUS),
+ MPP_VAR_FUNCTION(0x2, "vdd", "cpu2-3-pd", V_MV78460)),
+ MPP_MODE(58,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78260_PLUS),
+ MPP_VAR_FUNCTION(0x1, "dev", "ad23", V_MV78260_PLUS)),
+ MPP_MODE(59,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78260_PLUS),
+ MPP_VAR_FUNCTION(0x1, "dev", "ad24", V_MV78260_PLUS)),
+ MPP_MODE(60,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78260_PLUS),
+ MPP_VAR_FUNCTION(0x1, "dev", "ad25", V_MV78260_PLUS)),
+ MPP_MODE(61,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78260_PLUS),
+ MPP_VAR_FUNCTION(0x1, "dev", "ad26", V_MV78260_PLUS)),
+ MPP_MODE(62,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78260_PLUS),
+ MPP_VAR_FUNCTION(0x1, "dev", "ad27", V_MV78260_PLUS)),
+ MPP_MODE(63,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78260_PLUS),
+ MPP_VAR_FUNCTION(0x1, "dev", "ad28", V_MV78260_PLUS)),
+ MPP_MODE(64,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78260_PLUS),
+ MPP_VAR_FUNCTION(0x1, "dev", "ad29", V_MV78260_PLUS)),
+ MPP_MODE(65,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78260_PLUS),
+ MPP_VAR_FUNCTION(0x1, "dev", "ad30", V_MV78260_PLUS)),
+ MPP_MODE(66,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78260_PLUS),
+ MPP_VAR_FUNCTION(0x1, "dev", "ad31", V_MV78260_PLUS)),
+};
+
+static struct mvebu_pinctrl_soc_info armada_xp_pinctrl_info;
+
+static struct of_device_id armada_xp_pinctrl_of_match[] __devinitdata = {
+ {
+ .compatible = "marvell,mv78230-pinctrl",
+ .data = (void *) V_MV78230,
+ },
+ {
+ .compatible = "marvell,mv78260-pinctrl",
+ .data = (void *) V_MV78260,
+ },
+ {
+ .compatible = "marvell,mv78460-pinctrl",
+ .data = (void *) V_MV78460,
+ },
+ { },
+};
+
+static struct mvebu_mpp_ctrl mv78230_mpp_controls[] = {
+ MPP_REG_CTRL(0, 48),
+};
+
+static struct pinctrl_gpio_range mv78230_mpp_gpio_ranges[] = {
+ MPP_GPIO_RANGE(0, 0, 0, 32),
+ MPP_GPIO_RANGE(1, 32, 32, 17),
+};
+
+static struct mvebu_mpp_ctrl mv78260_mpp_controls[] = {
+ MPP_REG_CTRL(0, 66),
+};
+
+static struct pinctrl_gpio_range mv78260_mpp_gpio_ranges[] = {
+ MPP_GPIO_RANGE(0, 0, 0, 32),
+ MPP_GPIO_RANGE(1, 32, 32, 32),
+ MPP_GPIO_RANGE(2, 64, 64, 3),
+};
+
+static struct mvebu_mpp_ctrl mv78460_mpp_controls[] = {
+ MPP_REG_CTRL(0, 66),
+};
+
+static struct pinctrl_gpio_range mv78460_mpp_gpio_ranges[] = {
+ MPP_GPIO_RANGE(0, 0, 0, 32),
+ MPP_GPIO_RANGE(1, 32, 32, 32),
+ MPP_GPIO_RANGE(2, 64, 64, 3),
+};
+
+static int __devinit armada_xp_pinctrl_probe(struct platform_device *pdev)
+{
+ struct mvebu_pinctrl_soc_info *soc = &armada_xp_pinctrl_info;
+ const struct of_device_id *match =
+ of_match_device(armada_xp_pinctrl_of_match, &pdev->dev);
+
+ if (!match)
+ return -ENODEV;
+
+ soc->variant = (unsigned) match->data & 0xff;
+
+ switch (soc->variant) {
+ case V_MV78230:
+ soc->controls = mv78230_mpp_controls;
+ soc->ncontrols = ARRAY_SIZE(mv78230_mpp_controls);
+ soc->modes = armada_xp_mpp_modes;
+ /* We don't necessarily want the full list of the
+ * armada_xp_mpp_modes, but only the first 'n' ones
+ * that are available on this SoC */
+ soc->nmodes = mv78230_mpp_controls[0].npins;
+ soc->gpioranges = mv78230_mpp_gpio_ranges;
+ soc->ngpioranges = ARRAY_SIZE(mv78230_mpp_gpio_ranges);
+ break;
+ case V_MV78260:
+ soc->controls = mv78260_mpp_controls;
+ soc->ncontrols = ARRAY_SIZE(mv78260_mpp_controls);
+ soc->modes = armada_xp_mpp_modes;
+ /* We don't necessarily want the full list of the
+ * armada_xp_mpp_modes, but only the first 'n' ones
+ * that are available on this SoC */
+ soc->nmodes = mv78260_mpp_controls[0].npins;
+ soc->gpioranges = mv78260_mpp_gpio_ranges;
+ soc->ngpioranges = ARRAY_SIZE(mv78260_mpp_gpio_ranges);
+ break;
+ case V_MV78460:
+ soc->controls = mv78460_mpp_controls;
+ soc->ncontrols = ARRAY_SIZE(mv78460_mpp_controls);
+ soc->modes = armada_xp_mpp_modes;
+ /* We don't necessarily want the full list of the
+ * armada_xp_mpp_modes, but only the first 'n' ones
+ * that are available on this SoC */
+ soc->nmodes = mv78460_mpp_controls[0].npins;
+ soc->gpioranges = mv78460_mpp_gpio_ranges;
+ soc->ngpioranges = ARRAY_SIZE(mv78460_mpp_gpio_ranges);
+ break;
+ }
+
+ pdev->dev.platform_data = soc;
+
+ return mvebu_pinctrl_probe(pdev);
+}
+
+static int __devexit armada_xp_pinctrl_remove(struct platform_device *pdev)
+{
+ return mvebu_pinctrl_remove(pdev);
+}
+
+static struct platform_driver armada_xp_pinctrl_driver = {
+ .driver = {
+ .name = "armada-xp-pinctrl",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(armada_xp_pinctrl_of_match),
+ },
+ .probe = armada_xp_pinctrl_probe,
+ .remove = __devexit_p(armada_xp_pinctrl_remove),
+};
+
+module_platform_driver(armada_xp_pinctrl_driver);
+
+MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
+MODULE_DESCRIPTION("Marvell Armada XP pinctrl driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/pinctrl-dove.c b/drivers/pinctrl/pinctrl-dove.c
new file mode 100644
index 0000000..ffe74b2
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-dove.c
@@ -0,0 +1,620 @@
+/*
+ * Marvell Dove pinctrl driver based on mvebu pinctrl core
+ *
+ * Author: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/bitops.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-mvebu.h"
+
+#define DOVE_SB_REGS_VIRT_BASE 0xfde00000
+#define DOVE_MPP_VIRT_BASE (DOVE_SB_REGS_VIRT_BASE | 0xd0200)
+#define DOVE_PMU_MPP_GENERAL_CTRL (DOVE_MPP_VIRT_BASE + 0x10)
+#define DOVE_AU0_AC97_SEL BIT(16)
+#define DOVE_GLOBAL_CONFIG_1 (DOVE_SB_REGS_VIRT_BASE | 0xe802C)
+#define DOVE_TWSI_ENABLE_OPTION1 BIT(7)
+#define DOVE_GLOBAL_CONFIG_2 (DOVE_SB_REGS_VIRT_BASE | 0xe8030)
+#define DOVE_TWSI_ENABLE_OPTION2 BIT(20)
+#define DOVE_TWSI_ENABLE_OPTION3 BIT(21)
+#define DOVE_TWSI_OPTION3_GPIO BIT(22)
+#define DOVE_SSP_CTRL_STATUS_1 (DOVE_SB_REGS_VIRT_BASE | 0xe8034)
+#define DOVE_SSP_ON_AU1 BIT(0)
+#define DOVE_MPP_GENERAL_VIRT_BASE (DOVE_SB_REGS_VIRT_BASE | 0xe803c)
+#define DOVE_AU1_SPDIFO_GPIO_EN BIT(1)
+#define DOVE_NAND_GPIO_EN BIT(0)
+#define DOVE_GPIO_LO_VIRT_BASE (DOVE_SB_REGS_VIRT_BASE | 0xd0400)
+#define DOVE_MPP_CTRL4_VIRT_BASE (DOVE_GPIO_LO_VIRT_BASE + 0x40)
+#define DOVE_SPI_GPIO_SEL BIT(5)
+#define DOVE_UART1_GPIO_SEL BIT(4)
+#define DOVE_AU1_GPIO_SEL BIT(3)
+#define DOVE_CAM_GPIO_SEL BIT(2)
+#define DOVE_SD1_GPIO_SEL BIT(1)
+#define DOVE_SD0_GPIO_SEL BIT(0)
+
+#define MPPS_PER_REG 8
+#define MPP_BITS 4
+#define MPP_MASK 0xf
+
+#define CONFIG_PMU BIT(4)
+
+static int dove_pmu_mpp_ctrl_get(struct mvebu_mpp_ctrl *ctrl,
+ unsigned long *config)
+{
+ unsigned off = (ctrl->pid / MPPS_PER_REG) * MPP_BITS;
+ unsigned shift = (ctrl->pid % MPPS_PER_REG) * MPP_BITS;
+ unsigned long pmu = readl(DOVE_PMU_MPP_GENERAL_CTRL);
+ unsigned long mpp = readl(DOVE_MPP_VIRT_BASE + off);
+
+ if (pmu & (1 << ctrl->pid))
+ *config = CONFIG_PMU;
+ else
+ *config = (mpp >> shift) & MPP_MASK;
+ return 0;
+}
+
+static int dove_pmu_mpp_ctrl_set(struct mvebu_mpp_ctrl *ctrl,
+ unsigned long config)
+{
+ unsigned off = (ctrl->pid / MPPS_PER_REG) * MPP_BITS;
+ unsigned shift = (ctrl->pid % MPPS_PER_REG) * MPP_BITS;
+ unsigned long pmu = readl(DOVE_PMU_MPP_GENERAL_CTRL);
+ unsigned long mpp = readl(DOVE_MPP_VIRT_BASE + off);
+
+ if (config == CONFIG_PMU)
+ writel(pmu | (1 << ctrl->pid), DOVE_PMU_MPP_GENERAL_CTRL);
+ else {
+ writel(pmu & ~(1 << ctrl->pid), DOVE_PMU_MPP_GENERAL_CTRL);
+ mpp &= ~(MPP_MASK << shift);
+ mpp |= config << shift;
+ writel(mpp, DOVE_MPP_VIRT_BASE + off);
+ }
+ return 0;
+}
+
+static int dove_mpp4_ctrl_get(struct mvebu_mpp_ctrl *ctrl,
+ unsigned long *config)
+{
+ unsigned long mpp4 = readl(DOVE_MPP_CTRL4_VIRT_BASE);
+ unsigned long mask;
+
+ switch (ctrl->pid) {
+ case 24: /* mpp_camera */
+ mask = DOVE_CAM_GPIO_SEL;
+ break;
+ case 40: /* mpp_sdio0 */
+ mask = DOVE_SD0_GPIO_SEL;
+ break;
+ case 46: /* mpp_sdio1 */
+ mask = DOVE_SD1_GPIO_SEL;
+ break;
+ case 58: /* mpp_spi0 */
+ mask = DOVE_SPI_GPIO_SEL;
+ break;
+ case 62: /* mpp_uart1 */
+ mask = DOVE_UART1_GPIO_SEL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *config = ((mpp4 & mask) != 0);
+
+ return 0;
+}
+
+static int dove_mpp4_ctrl_set(struct mvebu_mpp_ctrl *ctrl,
+ unsigned long config)
+{
+ unsigned long mpp4 = readl(DOVE_MPP_CTRL4_VIRT_BASE);
+ unsigned long mask;
+
+ switch (ctrl->pid) {
+ case 24: /* mpp_camera */
+ mask = DOVE_CAM_GPIO_SEL;
+ break;
+ case 40: /* mpp_sdio0 */
+ mask = DOVE_SD0_GPIO_SEL;
+ break;
+ case 46: /* mpp_sdio1 */
+ mask = DOVE_SD1_GPIO_SEL;
+ break;
+ case 58: /* mpp_spi0 */
+ mask = DOVE_SPI_GPIO_SEL;
+ break;
+ case 62: /* mpp_uart1 */
+ mask = DOVE_UART1_GPIO_SEL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mpp4 &= ~mask;
+ if (config)
+ mpp4 |= mask;
+
+ writel(mpp4, DOVE_MPP_CTRL4_VIRT_BASE);
+
+ return 0;
+}
+
+static int dove_nand_ctrl_get(struct mvebu_mpp_ctrl *ctrl,
+ unsigned long *config)
+{
+ unsigned long gmpp = readl(DOVE_MPP_GENERAL_VIRT_BASE);
+
+ *config = ((gmpp & DOVE_NAND_GPIO_EN) != 0);
+
+ return 0;
+}
+
+static int dove_nand_ctrl_set(struct mvebu_mpp_ctrl *ctrl,
+ unsigned long config)
+{
+ unsigned long gmpp = readl(DOVE_MPP_GENERAL_VIRT_BASE);
+
+ gmpp &= ~DOVE_NAND_GPIO_EN;
+ if (config)
+ gmpp |= DOVE_NAND_GPIO_EN;
+
+ writel(gmpp, DOVE_MPP_GENERAL_VIRT_BASE);
+
+ return 0;
+}
+
+static int dove_audio0_ctrl_get(struct mvebu_mpp_ctrl *ctrl,
+ unsigned long *config)
+{
+ unsigned long pmu = readl(DOVE_PMU_MPP_GENERAL_CTRL);
+
+ *config = ((pmu & DOVE_AU0_AC97_SEL) != 0);
+
+ return 0;
+}
+
+static int dove_audio0_ctrl_set(struct mvebu_mpp_ctrl *ctrl,
+ unsigned long config)
+{
+ unsigned long pmu = readl(DOVE_PMU_MPP_GENERAL_CTRL);
+
+ pmu &= ~DOVE_AU0_AC97_SEL;
+ if (config)
+ pmu |= DOVE_AU0_AC97_SEL;
+ writel(pmu, DOVE_PMU_MPP_GENERAL_CTRL);
+
+ return 0;
+}
+
+static int dove_audio1_ctrl_get(struct mvebu_mpp_ctrl *ctrl,
+ unsigned long *config)
+{
+ unsigned long mpp4 = readl(DOVE_MPP_CTRL4_VIRT_BASE);
+ unsigned long sspc1 = readl(DOVE_SSP_CTRL_STATUS_1);
+ unsigned long gmpp = readl(DOVE_MPP_GENERAL_VIRT_BASE);
+ unsigned long gcfg2 = readl(DOVE_GLOBAL_CONFIG_2);
+
+ *config = 0;
+ if (mpp4 & DOVE_AU1_GPIO_SEL)
+ *config |= BIT(3);
+ if (sspc1 & DOVE_SSP_ON_AU1)
+ *config |= BIT(2);
+ if (gmpp & DOVE_AU1_SPDIFO_GPIO_EN)
+ *config |= BIT(1);
+ if (gcfg2 & DOVE_TWSI_OPTION3_GPIO)
+ *config |= BIT(0);
+
+ /* SSP/TWSI only if I2S1 not set*/
+ if ((*config & BIT(3)) == 0)
+ *config &= ~(BIT(2) | BIT(0));
+ /* TWSI only if SPDIFO not set*/
+ if ((*config & BIT(1)) == 0)
+ *config &= ~BIT(0);
+ return 0;
+}
+
+static int dove_audio1_ctrl_set(struct mvebu_mpp_ctrl *ctrl,
+ unsigned long config)
+{
+ unsigned long mpp4 = readl(DOVE_MPP_CTRL4_VIRT_BASE);
+ unsigned long sspc1 = readl(DOVE_SSP_CTRL_STATUS_1);
+ unsigned long gmpp = readl(DOVE_MPP_GENERAL_VIRT_BASE);
+ unsigned long gcfg2 = readl(DOVE_GLOBAL_CONFIG_2);
+
+ if (config & BIT(0))
+ gcfg2 |= DOVE_TWSI_OPTION3_GPIO;
+ if (config & BIT(1))
+ gmpp |= DOVE_AU1_SPDIFO_GPIO_EN;
+ if (config & BIT(2))
+ sspc1 |= DOVE_SSP_ON_AU1;
+ if (config & BIT(3))
+ mpp4 |= DOVE_AU1_GPIO_SEL;
+
+ writel(mpp4, DOVE_MPP_CTRL4_VIRT_BASE);
+ writel(sspc1, DOVE_SSP_CTRL_STATUS_1);
+ writel(gmpp, DOVE_MPP_GENERAL_VIRT_BASE);
+ writel(gcfg2, DOVE_GLOBAL_CONFIG_2);
+
+ return 0;
+}
+
+/* mpp[52:57] gpio pins depend heavily on current config;
+ * gpio_req does not try to mux in gpio capabilities to not
+ * break other functions. If you require all mpps as gpio
+ * enforce gpio setting by pinctrl mapping.
+ */
+static int dove_audio1_ctrl_gpio_req(struct mvebu_mpp_ctrl *ctrl, u8 pid)
+{
+ unsigned long config;
+
+ dove_audio1_ctrl_get(ctrl, &config);
+
+ switch (config) {
+ case 0x02: /* i2s1 : gpio[56:57] */
+ case 0x0e: /* ssp : gpio[56:57] */
+ if (pid >= 56)
+ return 0;
+ return -ENOTSUPP;
+ case 0x08: /* spdifo : gpio[52:55] */
+ case 0x0b: /* twsi : gpio[52:55] */
+ if (pid <= 55)
+ return 0;
+ return -ENOTSUPP;
+ case 0x0a: /* all gpio */
+ return 0;
+ /* 0x00 : i2s1/spdifo : no gpio */
+ /* 0x0c : ssp/spdifo : no gpio */
+ /* 0x0f : ssp/twsi : no gpio */
+ }
+ return -ENOTSUPP;
+}
+
+/* mpp[52:57] has gpio pins capable of in and out */
+static int dove_audio1_ctrl_gpio_dir(struct mvebu_mpp_ctrl *ctrl, u8 pid,
+ bool input)
+{
+ if (pid < 52 || pid > 57)
+ return -ENOTSUPP;
+ return 0;
+}
+
+static int dove_twsi_ctrl_get(struct mvebu_mpp_ctrl *ctrl,
+ unsigned long *config)
+{
+ unsigned long gcfg1 = readl(DOVE_GLOBAL_CONFIG_1);
+ unsigned long gcfg2 = readl(DOVE_GLOBAL_CONFIG_2);
+
+ *config = 0;
+ if (gcfg1 & DOVE_TWSI_ENABLE_OPTION1)
+ *config = 1;
+ else if (gcfg2 & DOVE_TWSI_ENABLE_OPTION2)
+ *config = 2;
+ else if (gcfg2 & DOVE_TWSI_ENABLE_OPTION3)
+ *config = 3;
+
+ return 0;
+}
+
+static int dove_twsi_ctrl_set(struct mvebu_mpp_ctrl *ctrl,
+ unsigned long config)
+{
+ unsigned long gcfg1 = readl(DOVE_GLOBAL_CONFIG_1);
+ unsigned long gcfg2 = readl(DOVE_GLOBAL_CONFIG_2);
+
+ gcfg1 &= ~DOVE_TWSI_ENABLE_OPTION1;
+ gcfg2 &= ~(DOVE_TWSI_ENABLE_OPTION2 | DOVE_TWSI_ENABLE_OPTION2);
+
+ switch (config) {
+ case 1:
+ gcfg1 |= DOVE_TWSI_ENABLE_OPTION1;
+ break;
+ case 2:
+ gcfg2 |= DOVE_TWSI_ENABLE_OPTION2;
+ break;
+ case 3:
+ gcfg2 |= DOVE_TWSI_ENABLE_OPTION3;
+ break;
+ }
+
+ writel(gcfg1, DOVE_GLOBAL_CONFIG_1);
+ writel(gcfg2, DOVE_GLOBAL_CONFIG_2);
+
+ return 0;
+}
+
+static struct mvebu_mpp_ctrl dove_mpp_controls[] = {
+ MPP_FUNC_CTRL(0, 0, "mpp0", dove_pmu_mpp_ctrl),
+ MPP_FUNC_CTRL(1, 1, "mpp1", dove_pmu_mpp_ctrl),
+ MPP_FUNC_CTRL(2, 2, "mpp2", dove_pmu_mpp_ctrl),
+ MPP_FUNC_CTRL(3, 3, "mpp3", dove_pmu_mpp_ctrl),
+ MPP_FUNC_CTRL(4, 4, "mpp4", dove_pmu_mpp_ctrl),
+ MPP_FUNC_CTRL(5, 5, "mpp5", dove_pmu_mpp_ctrl),
+ MPP_FUNC_CTRL(6, 6, "mpp6", dove_pmu_mpp_ctrl),
+ MPP_FUNC_CTRL(7, 7, "mpp7", dove_pmu_mpp_ctrl),
+ MPP_FUNC_CTRL(8, 8, "mpp8", dove_pmu_mpp_ctrl),
+ MPP_FUNC_CTRL(9, 9, "mpp9", dove_pmu_mpp_ctrl),
+ MPP_FUNC_CTRL(10, 10, "mpp10", dove_pmu_mpp_ctrl),
+ MPP_FUNC_CTRL(11, 11, "mpp11", dove_pmu_mpp_ctrl),
+ MPP_FUNC_CTRL(12, 12, "mpp12", dove_pmu_mpp_ctrl),
+ MPP_FUNC_CTRL(13, 13, "mpp13", dove_pmu_mpp_ctrl),
+ MPP_FUNC_CTRL(14, 14, "mpp14", dove_pmu_mpp_ctrl),
+ MPP_FUNC_CTRL(15, 15, "mpp15", dove_pmu_mpp_ctrl),
+ MPP_REG_CTRL(16, 23),
+ MPP_FUNC_CTRL(24, 39, "mpp_camera", dove_mpp4_ctrl),
+ MPP_FUNC_CTRL(40, 45, "mpp_sdio0", dove_mpp4_ctrl),
+ MPP_FUNC_CTRL(46, 51, "mpp_sdio1", dove_mpp4_ctrl),
+ MPP_FUNC_GPIO_CTRL(52, 57, "mpp_audio1", dove_audio1_ctrl),
+ MPP_FUNC_CTRL(58, 61, "mpp_spi0", dove_mpp4_ctrl),
+ MPP_FUNC_CTRL(62, 63, "mpp_uart1", dove_mpp4_ctrl),
+ MPP_FUNC_CTRL(64, 71, "mpp_nand", dove_nand_ctrl),
+ MPP_FUNC_CTRL(72, 72, "audio0", dove_audio0_ctrl),
+ MPP_FUNC_CTRL(73, 73, "twsi", dove_twsi_ctrl),
+};
+
+static struct mvebu_mpp_mode dove_mpp_modes[] = {
+ MPP_MODE(0,
+ MPP_FUNCTION(0x00, "gpio", NULL),
+ MPP_FUNCTION(0x02, "uart2", "rts"),
+ MPP_FUNCTION(0x03, "sdio0", "cd"),
+ MPP_FUNCTION(0x0f, "lcd0", "pwm"),
+ MPP_FUNCTION(0x10, "pmu", NULL)),
+ MPP_MODE(1,
+ MPP_FUNCTION(0x00, "gpio", NULL),
+ MPP_FUNCTION(0x02, "uart2", "cts"),
+ MPP_FUNCTION(0x03, "sdio0", "wp"),
+ MPP_FUNCTION(0x0f, "lcd1", "pwm"),
+ MPP_FUNCTION(0x10, "pmu", NULL)),
+ MPP_MODE(2,
+ MPP_FUNCTION(0x00, "gpio", NULL),
+ MPP_FUNCTION(0x01, "sata", "prsnt"),
+ MPP_FUNCTION(0x02, "uart2", "txd"),
+ MPP_FUNCTION(0x03, "sdio0", "buspwr"),
+ MPP_FUNCTION(0x04, "uart1", "rts"),
+ MPP_FUNCTION(0x10, "pmu", NULL)),
+ MPP_MODE(3,
+ MPP_FUNCTION(0x00, "gpio", NULL),
+ MPP_FUNCTION(0x01, "sata", "act"),
+ MPP_FUNCTION(0x02, "uart2", "rxd"),
+ MPP_FUNCTION(0x03, "sdio0", "ledctrl"),
+ MPP_FUNCTION(0x04, "uart1", "cts"),
+ MPP_FUNCTION(0x0f, "lcd-spi", "cs1"),
+ MPP_FUNCTION(0x10, "pmu", NULL)),
+ MPP_MODE(4,
+ MPP_FUNCTION(0x00, "gpio", NULL),
+ MPP_FUNCTION(0x02, "uart3", "rts"),
+ MPP_FUNCTION(0x03, "sdio1", "cd"),
+ MPP_FUNCTION(0x04, "spi1", "miso"),
+ MPP_FUNCTION(0x10, "pmu", NULL)),
+ MPP_MODE(5,
+ MPP_FUNCTION(0x00, "gpio", NULL),
+ MPP_FUNCTION(0x02, "uart3", "cts"),
+ MPP_FUNCTION(0x03, "sdio1", "wp"),
+ MPP_FUNCTION(0x04, "spi1", "cs"),
+ MPP_FUNCTION(0x10, "pmu", NULL)),
+ MPP_MODE(6,
+ MPP_FUNCTION(0x00, "gpio", NULL),
+ MPP_FUNCTION(0x02, "uart3", "txd"),
+ MPP_FUNCTION(0x03, "sdio1", "buspwr"),
+ MPP_FUNCTION(0x04, "spi1", "mosi"),
+ MPP_FUNCTION(0x10, "pmu", NULL)),
+ MPP_MODE(7,
+ MPP_FUNCTION(0x00, "gpio", NULL),
+ MPP_FUNCTION(0x02, "uart3", "rxd"),
+ MPP_FUNCTION(0x03, "sdio1", "ledctrl"),
+ MPP_FUNCTION(0x04, "spi1", "sck"),
+ MPP_FUNCTION(0x10, "pmu", NULL)),
+ MPP_MODE(8,
+ MPP_FUNCTION(0x00, "gpio", NULL),
+ MPP_FUNCTION(0x01, "watchdog", "rstout"),
+ MPP_FUNCTION(0x10, "pmu", NULL)),
+ MPP_MODE(9,
+ MPP_FUNCTION(0x00, "gpio", NULL),
+ MPP_FUNCTION(0x05, "pex1", "clkreq"),
+ MPP_FUNCTION(0x10, "pmu", NULL)),
+ MPP_MODE(10,
+ MPP_FUNCTION(0x00, "gpio", NULL),
+ MPP_FUNCTION(0x05, "ssp", "sclk"),
+ MPP_FUNCTION(0x10, "pmu", NULL)),
+ MPP_MODE(11,
+ MPP_FUNCTION(0x00, "gpio", NULL),
+ MPP_FUNCTION(0x01, "sata", "prsnt"),
+ MPP_FUNCTION(0x02, "sata-1", "act"),
+ MPP_FUNCTION(0x03, "sdio0", "ledctrl"),
+ MPP_FUNCTION(0x04, "sdio1", "ledctrl"),
+ MPP_FUNCTION(0x05, "pex0", "clkreq"),
+ MPP_FUNCTION(0x10, "pmu", NULL)),
+ MPP_MODE(12,
+ MPP_FUNCTION(0x00, "gpio", NULL),
+ MPP_FUNCTION(0x01, "sata", "act"),
+ MPP_FUNCTION(0x02, "uart2", "rts"),
+ MPP_FUNCTION(0x03, "audio0", "extclk"),
+ MPP_FUNCTION(0x04, "sdio1", "cd"),
+ MPP_FUNCTION(0x10, "pmu", NULL)),
+ MPP_MODE(13,
+ MPP_FUNCTION(0x00, "gpio", NULL),
+ MPP_FUNCTION(0x02, "uart2", "cts"),
+ MPP_FUNCTION(0x03, "audio1", "extclk"),
+ MPP_FUNCTION(0x04, "sdio1", "wp"),
+ MPP_FUNCTION(0x05, "ssp", "extclk"),
+ MPP_FUNCTION(0x10, "pmu", NULL)),
+ MPP_MODE(14,
+ MPP_FUNCTION(0x00, "gpio", NULL),
+ MPP_FUNCTION(0x02, "uart2", "txd"),
+ MPP_FUNCTION(0x04, "sdio1", "buspwr"),
+ MPP_FUNCTION(0x05, "ssp", "rxd"),
+ MPP_FUNCTION(0x10, "pmu", NULL)),
+ MPP_MODE(15,
+ MPP_FUNCTION(0x00, "gpio", NULL),
+ MPP_FUNCTION(0x02, "uart2", "rxd"),
+ MPP_FUNCTION(0x04, "sdio1", "ledctrl"),
+ MPP_FUNCTION(0x05, "ssp", "sfrm"),
+ MPP_FUNCTION(0x10, "pmu", NULL)),
+ MPP_MODE(16,
+ MPP_FUNCTION(0x00, "gpio", NULL),
+ MPP_FUNCTION(0x02, "uart3", "rts"),
+ MPP_FUNCTION(0x03, "sdio0", "cd"),
+ MPP_FUNCTION(0x04, "lcd-spi", "cs1"),
+ MPP_FUNCTION(0x05, "ac97", "sdi1")),
+ MPP_MODE(17,
+ MPP_FUNCTION(0x00, "gpio", NULL),
+ MPP_FUNCTION(0x01, "ac97-1", "sysclko"),
+ MPP_FUNCTION(0x02, "uart3", "cts"),
+ MPP_FUNCTION(0x03, "sdio0", "wp"),
+ MPP_FUNCTION(0x04, "twsi", "sda"),
+ MPP_FUNCTION(0x05, "ac97", "sdi2")),
+ MPP_MODE(18,
+ MPP_FUNCTION(0x00, "gpio", NULL),
+ MPP_FUNCTION(0x02, "uart3", "txd"),
+ MPP_FUNCTION(0x03, "sdio0", "buspwr"),
+ MPP_FUNCTION(0x04, "lcd0", "pwm"),
+ MPP_FUNCTION(0x05, "ac97", "sdi3")),
+ MPP_MODE(19,
+ MPP_FUNCTION(0x00, "gpio", NULL),
+ MPP_FUNCTION(0x02, "uart3", "rxd"),
+ MPP_FUNCTION(0x03, "sdio0", "ledctrl"),
+ MPP_FUNCTION(0x04, "twsi", "sck")),
+ MPP_MODE(20,
+ MPP_FUNCTION(0x00, "gpio", NULL),
+ MPP_FUNCTION(0x01, "ac97", "sysclko"),
+ MPP_FUNCTION(0x02, "lcd-spi", "miso"),
+ MPP_FUNCTION(0x03, "sdio1", "cd"),
+ MPP_FUNCTION(0x05, "sdio0", "cd"),
+ MPP_FUNCTION(0x06, "spi1", "miso")),
+ MPP_MODE(21,
+ MPP_FUNCTION(0x00, "gpio", NULL),
+ MPP_FUNCTION(0x01, "uart1", "rts"),
+ MPP_FUNCTION(0x02, "lcd-spi", "cs0"),
+ MPP_FUNCTION(0x03, "sdio1", "wp"),
+ MPP_FUNCTION(0x04, "ssp", "sfrm"),
+ MPP_FUNCTION(0x05, "sdio0", "wp"),
+ MPP_FUNCTION(0x06, "spi1", "cs")),
+ MPP_MODE(22,
+ MPP_FUNCTION(0x00, "gpio", NULL),
+ MPP_FUNCTION(0x01, "uart1", "cts"),
+ MPP_FUNCTION(0x02, "lcd-spi", "mosi"),
+ MPP_FUNCTION(0x03, "sdio1", "buspwr"),
+ MPP_FUNCTION(0x04, "ssp", "txd"),
+ MPP_FUNCTION(0x05, "sdio0", "buspwr"),
+ MPP_FUNCTION(0x06, "spi1", "mosi")),
+ MPP_MODE(23,
+ MPP_FUNCTION(0x00, "gpio", NULL),
+ MPP_FUNCTION(0x02, "lcd-spi", "sck"),
+ MPP_FUNCTION(0x03, "sdio1", "ledctrl"),
+ MPP_FUNCTION(0x04, "ssp", "sclk"),
+ MPP_FUNCTION(0x05, "sdio0", "ledctrl"),
+ MPP_FUNCTION(0x06, "spi1", "sck")),
+ MPP_MODE(24,
+ MPP_FUNCTION(0x00, "camera", NULL),
+ MPP_FUNCTION(0x01, "gpio", NULL)),
+ MPP_MODE(40,
+ MPP_FUNCTION(0x00, "sdio0", NULL),
+ MPP_FUNCTION(0x01, "gpio", NULL)),
+ MPP_MODE(46,
+ MPP_FUNCTION(0x00, "sdio1", NULL),
+ MPP_FUNCTION(0x01, "gpio", NULL)),
+ MPP_MODE(52,
+ MPP_FUNCTION(0x00, "i2s1/spdifo", NULL),
+ MPP_FUNCTION(0x02, "i2s1", NULL),
+ MPP_FUNCTION(0x08, "spdifo", NULL),
+ MPP_FUNCTION(0x0a, "gpio", NULL),
+ MPP_FUNCTION(0x0b, "twsi", NULL),
+ MPP_FUNCTION(0x0c, "ssp/spdifo", NULL),
+ MPP_FUNCTION(0x0e, "ssp", NULL),
+ MPP_FUNCTION(0x0f, "ssp/twsi", NULL)),
+ MPP_MODE(58,
+ MPP_FUNCTION(0x00, "spi0", NULL),
+ MPP_FUNCTION(0x01, "gpio", NULL)),
+ MPP_MODE(62,
+ MPP_FUNCTION(0x00, "uart1", NULL),
+ MPP_FUNCTION(0x01, "gpio", NULL)),
+ MPP_MODE(64,
+ MPP_FUNCTION(0x00, "nand", NULL),
+ MPP_FUNCTION(0x01, "gpo", NULL)),
+ MPP_MODE(72,
+ MPP_FUNCTION(0x00, "i2s", NULL),
+ MPP_FUNCTION(0x01, "ac97", NULL)),
+ MPP_MODE(73,
+ MPP_FUNCTION(0x00, "twsi-none", NULL),
+ MPP_FUNCTION(0x01, "twsi-opt1", NULL),
+ MPP_FUNCTION(0x02, "twsi-opt2", NULL),
+ MPP_FUNCTION(0x03, "twsi-opt3", NULL)),
+};
+
+static struct pinctrl_gpio_range dove_mpp_gpio_ranges[] = {
+ MPP_GPIO_RANGE(0, 0, 0, 32),
+ MPP_GPIO_RANGE(1, 32, 32, 32),
+ MPP_GPIO_RANGE(2, 64, 64, 8),
+};
+
+static struct mvebu_pinctrl_soc_info dove_pinctrl_info = {
+ .controls = dove_mpp_controls,
+ .ncontrols = ARRAY_SIZE(dove_mpp_controls),
+ .modes = dove_mpp_modes,
+ .nmodes = ARRAY_SIZE(dove_mpp_modes),
+ .gpioranges = dove_mpp_gpio_ranges,
+ .ngpioranges = ARRAY_SIZE(dove_mpp_gpio_ranges),
+ .variant = 0,
+};
+
+static struct clk *clk;
+
+static struct of_device_id dove_pinctrl_of_match[] __devinitdata = {
+ { .compatible = "marvell,dove-pinctrl", .data = &dove_pinctrl_info },
+ { }
+};
+
+static int __devinit dove_pinctrl_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match =
+ of_match_device(dove_pinctrl_of_match, &pdev->dev);
+ pdev->dev.platform_data = match->data;
+
+ /*
+ * General MPP Configuration Register is part of pdma registers.
+ * grab clk to make sure it is ticking.
+ */
+ clk = devm_clk_get(&pdev->dev, NULL);
+ if (!IS_ERR(clk))
+ clk_prepare_enable(clk);
+
+ return mvebu_pinctrl_probe(pdev);
+}
+
+static int __devexit dove_pinctrl_remove(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = mvebu_pinctrl_remove(pdev);
+ if (!IS_ERR(clk))
+ clk_disable_unprepare(clk);
+ return ret;
+}
+
+static struct platform_driver dove_pinctrl_driver = {
+ .driver = {
+ .name = "dove-pinctrl",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(dove_pinctrl_of_match),
+ },
+ .probe = dove_pinctrl_probe,
+ .remove = __devexit_p(dove_pinctrl_remove),
+};
+
+module_platform_driver(dove_pinctrl_driver);
+
+MODULE_AUTHOR("Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>");
+MODULE_DESCRIPTION("Marvell Dove pinctrl driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/pinctrl-falcon.c b/drivers/pinctrl/pinctrl-falcon.c
new file mode 100644
index 0000000..ee73059
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-falcon.c
@@ -0,0 +1,468 @@
+/*
+ * linux/drivers/pinctrl/pinmux-falcon.c
+ * based on linux/drivers/pinctrl/pinmux-pxa910.c
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2012 Thomas Langer <thomas.langer@lantiq.com>
+ * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-lantiq.h"
+
+#include <lantiq_soc.h>
+
+/* Multiplexer Control Register */
+#define LTQ_PADC_MUX(x) (x * 0x4)
+/* Pull Up Enable Register */
+#define LTQ_PADC_PUEN 0x80
+/* Pull Down Enable Register */
+#define LTQ_PADC_PDEN 0x84
+/* Slew Rate Control Register */
+#define LTQ_PADC_SRC 0x88
+/* Drive Current Control Register */
+#define LTQ_PADC_DCC 0x8C
+/* Pad Control Availability Register */
+#define LTQ_PADC_AVAIL 0xF0
+
+#define pad_r32(p, reg) ltq_r32(p + reg)
+#define pad_w32(p, val, reg) ltq_w32(val, p + reg)
+#define pad_w32_mask(c, clear, set, reg) \
+ pad_w32(c, (pad_r32(c, reg) & ~(clear)) | (set), reg)
+
+#define pad_getbit(m, r, p) (!!(ltq_r32(m + r) & (1 << p)))
+
+#define PORTS 5
+#define PINS 32
+#define PORT(x) (x / PINS)
+#define PORT_PIN(x) (x % PINS)
+
+#define MFP_FALCON(a, f0, f1, f2, f3) \
+{ \
+ .name = #a, \
+ .pin = a, \
+ .func = { \
+ FALCON_MUX_##f0, \
+ FALCON_MUX_##f1, \
+ FALCON_MUX_##f2, \
+ FALCON_MUX_##f3, \
+ }, \
+}
+
+#define GRP_MUX(a, m, p) \
+{ \
+ .name = a, \
+ .mux = FALCON_MUX_##m, \
+ .pins = p, \
+ .npins = ARRAY_SIZE(p), \
+}
+
+enum falcon_mux {
+ FALCON_MUX_GPIO = 0,
+ FALCON_MUX_RST,
+ FALCON_MUX_NTR,
+ FALCON_MUX_MDIO,
+ FALCON_MUX_LED,
+ FALCON_MUX_SPI,
+ FALCON_MUX_ASC,
+ FALCON_MUX_I2C,
+ FALCON_MUX_HOSTIF,
+ FALCON_MUX_SLIC,
+ FALCON_MUX_JTAG,
+ FALCON_MUX_PCM,
+ FALCON_MUX_MII,
+ FALCON_MUX_PHY,
+ FALCON_MUX_NONE = 0xffff,
+};
+
+static struct pinctrl_pin_desc falcon_pads[PORTS * PINS];
+static int pad_count[PORTS];
+
+static void lantiq_load_pin_desc(struct pinctrl_pin_desc *d, int bank, int len)
+{
+ int base = bank * PINS;
+ int i;
+
+ for (i = 0; i < len; i++) {
+ /* strlen("ioXYZ") + 1 = 6 */
+ char *name = kzalloc(6, GFP_KERNEL);
+ snprintf(name, 6, "io%d", base + i);
+ d[i].number = base + i;
+ d[i].name = name;
+ }
+ pad_count[bank] = len;
+}
+
+static struct ltq_mfp_pin falcon_mfp[] = {
+ /* pin f0 f1 f2 f3 */
+ MFP_FALCON(GPIO0, RST, GPIO, NONE, NONE),
+ MFP_FALCON(GPIO1, GPIO, GPIO, NONE, NONE),
+ MFP_FALCON(GPIO2, GPIO, GPIO, NONE, NONE),
+ MFP_FALCON(GPIO3, GPIO, GPIO, NONE, NONE),
+ MFP_FALCON(GPIO4, NTR, GPIO, NONE, NONE),
+ MFP_FALCON(GPIO5, NTR, GPIO, NONE, NONE),
+ MFP_FALCON(GPIO6, RST, GPIO, NONE, NONE),
+ MFP_FALCON(GPIO7, MDIO, GPIO, NONE, NONE),
+ MFP_FALCON(GPIO8, MDIO, GPIO, NONE, NONE),
+ MFP_FALCON(GPIO9, LED, GPIO, NONE, NONE),
+ MFP_FALCON(GPIO10, LED, GPIO, NONE, NONE),
+ MFP_FALCON(GPIO11, LED, GPIO, NONE, NONE),
+ MFP_FALCON(GPIO12, LED, GPIO, NONE, NONE),
+ MFP_FALCON(GPIO13, LED, GPIO, NONE, NONE),
+ MFP_FALCON(GPIO14, LED, GPIO, NONE, NONE),
+ MFP_FALCON(GPIO32, ASC, GPIO, NONE, NONE),
+ MFP_FALCON(GPIO33, ASC, GPIO, NONE, NONE),
+ MFP_FALCON(GPIO34, SPI, GPIO, NONE, NONE),
+ MFP_FALCON(GPIO35, SPI, GPIO, NONE, NONE),
+ MFP_FALCON(GPIO36, SPI, GPIO, NONE, NONE),
+ MFP_FALCON(GPIO37, SPI, GPIO, NONE, NONE),
+ MFP_FALCON(GPIO38, SPI, GPIO, NONE, NONE),
+ MFP_FALCON(GPIO39, I2C, GPIO, NONE, NONE),
+ MFP_FALCON(GPIO40, I2C, GPIO, NONE, NONE),
+ MFP_FALCON(GPIO41, HOSTIF, GPIO, HOSTIF, JTAG),
+ MFP_FALCON(GPIO42, HOSTIF, GPIO, HOSTIF, NONE),
+ MFP_FALCON(GPIO43, SLIC, GPIO, NONE, NONE),
+ MFP_FALCON(GPIO44, SLIC, GPIO, PCM, ASC),
+ MFP_FALCON(GPIO45, SLIC, GPIO, PCM, ASC),
+ MFP_FALCON(GPIO64, MII, GPIO, NONE, NONE),
+ MFP_FALCON(GPIO65, MII, GPIO, NONE, NONE),
+ MFP_FALCON(GPIO66, MII, GPIO, NONE, NONE),
+ MFP_FALCON(GPIO67, MII, GPIO, NONE, NONE),
+ MFP_FALCON(GPIO68, MII, GPIO, NONE, NONE),
+ MFP_FALCON(GPIO69, MII, GPIO, NONE, NONE),
+ MFP_FALCON(GPIO70, MII, GPIO, NONE, NONE),
+ MFP_FALCON(GPIO71, MII, GPIO, NONE, NONE),
+ MFP_FALCON(GPIO72, MII, GPIO, NONE, NONE),
+ MFP_FALCON(GPIO73, MII, GPIO, NONE, NONE),
+ MFP_FALCON(GPIO74, MII, GPIO, NONE, NONE),
+ MFP_FALCON(GPIO75, MII, GPIO, NONE, NONE),
+ MFP_FALCON(GPIO76, MII, GPIO, NONE, NONE),
+ MFP_FALCON(GPIO77, MII, GPIO, NONE, NONE),
+ MFP_FALCON(GPIO78, MII, GPIO, NONE, NONE),
+ MFP_FALCON(GPIO79, MII, GPIO, NONE, NONE),
+ MFP_FALCON(GPIO80, MII, GPIO, NONE, NONE),
+ MFP_FALCON(GPIO81, MII, GPIO, NONE, NONE),
+ MFP_FALCON(GPIO82, MII, GPIO, NONE, NONE),
+ MFP_FALCON(GPIO83, MII, GPIO, NONE, NONE),
+ MFP_FALCON(GPIO84, MII, GPIO, NONE, NONE),
+ MFP_FALCON(GPIO85, MII, GPIO, NONE, NONE),
+ MFP_FALCON(GPIO86, MII, GPIO, NONE, NONE),
+ MFP_FALCON(GPIO87, MII, GPIO, NONE, NONE),
+ MFP_FALCON(GPIO88, PHY, GPIO, NONE, NONE),
+};
+
+static const unsigned pins_por[] = {GPIO0};
+static const unsigned pins_ntr[] = {GPIO4};
+static const unsigned pins_ntr8k[] = {GPIO5};
+static const unsigned pins_hrst[] = {GPIO6};
+static const unsigned pins_mdio[] = {GPIO7, GPIO8};
+static const unsigned pins_bled[] = {GPIO7, GPIO10, GPIO11,
+ GPIO12, GPIO13, GPIO14};
+static const unsigned pins_asc0[] = {GPIO32, GPIO33};
+static const unsigned pins_spi[] = {GPIO34, GPIO35, GPIO36};
+static const unsigned pins_spi_cs0[] = {GPIO37};
+static const unsigned pins_spi_cs1[] = {GPIO38};
+static const unsigned pins_i2c[] = {GPIO39, GPIO40};
+static const unsigned pins_jtag[] = {GPIO41};
+static const unsigned pins_slic[] = {GPIO43, GPIO44, GPIO45};
+static const unsigned pins_pcm[] = {GPIO44, GPIO45};
+static const unsigned pins_asc1[] = {GPIO44, GPIO45};
+
+static struct ltq_pin_group falcon_grps[] = {
+ GRP_MUX("por", RST, pins_por),
+ GRP_MUX("ntr", NTR, pins_ntr),
+ GRP_MUX("ntr8k", NTR, pins_ntr8k),
+ GRP_MUX("hrst", RST, pins_hrst),
+ GRP_MUX("mdio", MDIO, pins_mdio),
+ GRP_MUX("bootled", LED, pins_bled),
+ GRP_MUX("asc0", ASC, pins_asc0),
+ GRP_MUX("spi", SPI, pins_spi),
+ GRP_MUX("spi cs0", SPI, pins_spi_cs0),
+ GRP_MUX("spi cs1", SPI, pins_spi_cs1),
+ GRP_MUX("i2c", I2C, pins_i2c),
+ GRP_MUX("jtag", JTAG, pins_jtag),
+ GRP_MUX("slic", SLIC, pins_slic),
+ GRP_MUX("pcm", PCM, pins_pcm),
+ GRP_MUX("asc1", ASC, pins_asc1),
+};
+
+static const char * const ltq_rst_grps[] = {"por", "hrst"};
+static const char * const ltq_ntr_grps[] = {"ntr", "ntr8k"};
+static const char * const ltq_mdio_grps[] = {"mdio"};
+static const char * const ltq_bled_grps[] = {"bootled"};
+static const char * const ltq_asc_grps[] = {"asc0", "asc1"};
+static const char * const ltq_spi_grps[] = {"spi", "spi cs0", "spi cs1"};
+static const char * const ltq_i2c_grps[] = {"i2c"};
+static const char * const ltq_jtag_grps[] = {"jtag"};
+static const char * const ltq_slic_grps[] = {"slic"};
+static const char * const ltq_pcm_grps[] = {"pcm"};
+
+static struct ltq_pmx_func falcon_funcs[] = {
+ {"rst", ARRAY_AND_SIZE(ltq_rst_grps)},
+ {"ntr", ARRAY_AND_SIZE(ltq_ntr_grps)},
+ {"mdio", ARRAY_AND_SIZE(ltq_mdio_grps)},
+ {"led", ARRAY_AND_SIZE(ltq_bled_grps)},
+ {"asc", ARRAY_AND_SIZE(ltq_asc_grps)},
+ {"spi", ARRAY_AND_SIZE(ltq_spi_grps)},
+ {"i2c", ARRAY_AND_SIZE(ltq_i2c_grps)},
+ {"jtag", ARRAY_AND_SIZE(ltq_jtag_grps)},
+ {"slic", ARRAY_AND_SIZE(ltq_slic_grps)},
+ {"pcm", ARRAY_AND_SIZE(ltq_pcm_grps)},
+};
+
+
+
+
+/* --------- pinconf related code --------- */
+static int falcon_pinconf_group_get(struct pinctrl_dev *pctrldev,
+ unsigned group, unsigned long *config)
+{
+ return -ENOTSUPP;
+}
+
+static int falcon_pinconf_group_set(struct pinctrl_dev *pctrldev,
+ unsigned group, unsigned long config)
+{
+ return -ENOTSUPP;
+}
+
+static int falcon_pinconf_get(struct pinctrl_dev *pctrldev,
+ unsigned pin, unsigned long *config)
+{
+ struct ltq_pinmux_info *info = pinctrl_dev_get_drvdata(pctrldev);
+ enum ltq_pinconf_param param = LTQ_PINCONF_UNPACK_PARAM(*config);
+ void __iomem *mem = info->membase[PORT(pin)];
+
+ switch (param) {
+ case LTQ_PINCONF_PARAM_DRIVE_CURRENT:
+ *config = LTQ_PINCONF_PACK(param,
+ !!pad_getbit(mem, LTQ_PADC_DCC, PORT_PIN(pin)));
+ break;
+
+ case LTQ_PINCONF_PARAM_SLEW_RATE:
+ *config = LTQ_PINCONF_PACK(param,
+ !!pad_getbit(mem, LTQ_PADC_SRC, PORT_PIN(pin)));
+ break;
+
+ case LTQ_PINCONF_PARAM_PULL:
+ if (pad_getbit(mem, LTQ_PADC_PDEN, PORT_PIN(pin)))
+ *config = LTQ_PINCONF_PACK(param, 1);
+ else if (pad_getbit(mem, LTQ_PADC_PUEN, PORT_PIN(pin)))
+ *config = LTQ_PINCONF_PACK(param, 2);
+ else
+ *config = LTQ_PINCONF_PACK(param, 0);
+
+ break;
+
+ default:
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static int falcon_pinconf_set(struct pinctrl_dev *pctrldev,
+ unsigned pin, unsigned long config)
+{
+ enum ltq_pinconf_param param = LTQ_PINCONF_UNPACK_PARAM(config);
+ int arg = LTQ_PINCONF_UNPACK_ARG(config);
+ struct ltq_pinmux_info *info = pinctrl_dev_get_drvdata(pctrldev);
+ void __iomem *mem = info->membase[PORT(pin)];
+ u32 reg;
+
+ switch (param) {
+ case LTQ_PINCONF_PARAM_DRIVE_CURRENT:
+ reg = LTQ_PADC_DCC;
+ break;
+
+ case LTQ_PINCONF_PARAM_SLEW_RATE:
+ reg = LTQ_PADC_SRC;
+ break;
+
+ case LTQ_PINCONF_PARAM_PULL:
+ if (arg == 1)
+ reg = LTQ_PADC_PDEN;
+ else
+ reg = LTQ_PADC_PUEN;
+ break;
+
+ default:
+ pr_err("%s: Invalid config param %04x\n",
+ pinctrl_dev_get_name(pctrldev), param);
+ return -ENOTSUPP;
+ }
+
+ pad_w32(mem, BIT(PORT_PIN(pin)), reg);
+ if (!(pad_r32(mem, reg) & BIT(PORT_PIN(pin))))
+ return -ENOTSUPP;
+ return 0;
+}
+
+static void falcon_pinconf_dbg_show(struct pinctrl_dev *pctrldev,
+ struct seq_file *s, unsigned offset)
+{
+}
+
+static void falcon_pinconf_group_dbg_show(struct pinctrl_dev *pctrldev,
+ struct seq_file *s, unsigned selector)
+{
+}
+
+struct pinconf_ops falcon_pinconf_ops = {
+ .pin_config_get = falcon_pinconf_get,
+ .pin_config_set = falcon_pinconf_set,
+ .pin_config_group_get = falcon_pinconf_group_get,
+ .pin_config_group_set = falcon_pinconf_group_set,
+ .pin_config_dbg_show = falcon_pinconf_dbg_show,
+ .pin_config_group_dbg_show = falcon_pinconf_group_dbg_show,
+};
+
+static struct pinctrl_desc falcon_pctrl_desc = {
+ .owner = THIS_MODULE,
+ .pins = falcon_pads,
+ .confops = &falcon_pinconf_ops,
+};
+
+static inline int falcon_mux_apply(struct pinctrl_dev *pctrldev,
+ int mfp, int mux)
+{
+ struct ltq_pinmux_info *info = pinctrl_dev_get_drvdata(pctrldev);
+ int port = PORT(info->mfp[mfp].pin);
+
+ if ((port >= PORTS) || (!info->membase[port]))
+ return -ENODEV;
+
+ pad_w32(info->membase[port], mux,
+ LTQ_PADC_MUX(PORT_PIN(info->mfp[mfp].pin)));
+ return 0;
+}
+
+static const struct ltq_cfg_param falcon_cfg_params[] = {
+ {"lantiq,pull", LTQ_PINCONF_PARAM_PULL},
+ {"lantiq,drive-current", LTQ_PINCONF_PARAM_DRIVE_CURRENT},
+ {"lantiq,slew-rate", LTQ_PINCONF_PARAM_SLEW_RATE},
+};
+
+static struct ltq_pinmux_info falcon_info = {
+ .desc = &falcon_pctrl_desc,
+ .apply_mux = falcon_mux_apply,
+};
+
+
+
+
+/* --------- register the pinctrl layer --------- */
+
+int pinctrl_falcon_get_range_size(int id)
+{
+ u32 avail;
+
+ if ((id >= PORTS) || (!falcon_info.membase[id]))
+ return -EINVAL;
+
+ avail = pad_r32(falcon_info.membase[id], LTQ_PADC_AVAIL);
+
+ return fls(avail);
+}
+
+void pinctrl_falcon_add_gpio_range(struct pinctrl_gpio_range *range)
+{
+ pinctrl_add_gpio_range(falcon_info.pctrl, range);
+}
+
+static int pinctrl_falcon_probe(struct platform_device *pdev)
+{
+ struct device_node *np;
+ int pad_count = 0;
+ int ret = 0;
+
+ /* load and remap the pad resources of the different banks */
+ for_each_compatible_node(np, NULL, "lantiq,pad-falcon") {
+ struct platform_device *ppdev = of_find_device_by_node(np);
+ const __be32 *bank = of_get_property(np, "lantiq,bank", NULL);
+ struct resource res;
+ u32 avail;
+ int pins;
+
+ if (!ppdev) {
+ dev_err(&pdev->dev, "failed to find pad pdev\n");
+ continue;
+ }
+ if (!bank || *bank >= PORTS)
+ continue;
+ if (of_address_to_resource(np, 0, &res))
+ continue;
+ falcon_info.clk[*bank] = clk_get(&ppdev->dev, NULL);
+ if (IS_ERR(falcon_info.clk[*bank])) {
+ dev_err(&ppdev->dev, "failed to get clock\n");
+ return PTR_ERR(falcon_info.clk[*bank]);
+ }
+ falcon_info.membase[*bank] =
+ devm_request_and_ioremap(&pdev->dev, &res);
+ if (!falcon_info.membase[*bank]) {
+ dev_err(&pdev->dev,
+ "Failed to remap memory for bank %d\n",
+ *bank);
+ return -ENOMEM;
+ }
+ avail = pad_r32(falcon_info.membase[*bank],
+ LTQ_PADC_AVAIL);
+ pins = fls(avail);
+ lantiq_load_pin_desc(&falcon_pads[pad_count], *bank, pins);
+ pad_count += pins;
+ clk_enable(falcon_info.clk[*bank]);
+ dev_dbg(&pdev->dev, "found %s with %d pads\n",
+ res.name, pins);
+ }
+ dev_dbg(&pdev->dev, "found a total of %d pads\n", pad_count);
+ falcon_pctrl_desc.name = dev_name(&pdev->dev);
+ falcon_pctrl_desc.npins = pad_count;
+
+ falcon_info.mfp = falcon_mfp;
+ falcon_info.num_mfp = ARRAY_SIZE(falcon_mfp);
+ falcon_info.grps = falcon_grps;
+ falcon_info.num_grps = ARRAY_SIZE(falcon_grps);
+ falcon_info.funcs = falcon_funcs;
+ falcon_info.num_funcs = ARRAY_SIZE(falcon_funcs);
+
+ ret = ltq_pinctrl_register(pdev, &falcon_info);
+ if (!ret)
+ dev_info(&pdev->dev, "Init done\n");
+ return ret;
+}
+
+static const struct of_device_id falcon_match[] = {
+ { .compatible = "lantiq,pinctrl-falcon" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, falcon_match);
+
+static struct platform_driver pinctrl_falcon_driver = {
+ .probe = pinctrl_falcon_probe,
+ .driver = {
+ .name = "pinctrl-falcon",
+ .owner = THIS_MODULE,
+ .of_match_table = falcon_match,
+ },
+};
+
+int __init pinctrl_falcon_init(void)
+{
+ return platform_driver_register(&pinctrl_falcon_driver);
+}
+
+core_initcall_sync(pinctrl_falcon_init);
diff --git a/drivers/pinctrl/pinctrl-kirkwood.c b/drivers/pinctrl/pinctrl-kirkwood.c
new file mode 100644
index 0000000..9a74ef6
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-kirkwood.c
@@ -0,0 +1,472 @@
+/*
+ * Marvell Kirkwood pinctrl driver based on mvebu pinctrl core
+ *
+ * Author: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-mvebu.h"
+
+#define V(f6180, f6190, f6192, f6281, f6282) \
+ ((f6180 << 0) | (f6190 << 1) | (f6192 << 2) | \
+ (f6281 << 3) | (f6282 << 4))
+
+enum kirkwood_variant {
+ VARIANT_MV88F6180 = V(1, 0, 0, 0, 0),
+ VARIANT_MV88F6190 = V(0, 1, 0, 0, 0),
+ VARIANT_MV88F6192 = V(0, 0, 1, 0, 0),
+ VARIANT_MV88F6281 = V(0, 0, 0, 1, 0),
+ VARIANT_MV88F6282 = V(0, 0, 0, 0, 1),
+};
+
+static struct mvebu_mpp_mode mv88f6xxx_mpp_modes[] = {
+ MPP_MODE(0,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "nand", "io2", V(1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x2, "spi", "cs", V(1, 1, 1, 1, 1))),
+ MPP_MODE(1,
+ MPP_VAR_FUNCTION(0x0, "gpo", NULL, V(1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "nand", "io3", V(1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x2, "spi", "mosi", V(1, 1, 1, 1, 1))),
+ MPP_MODE(2,
+ MPP_VAR_FUNCTION(0x0, "gpo", NULL, V(1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "nand", "io4", V(1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x2, "spi", "sck", V(1, 1, 1, 1, 1))),
+ MPP_MODE(3,
+ MPP_VAR_FUNCTION(0x0, "gpo", NULL, V(1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "nand", "io5", V(1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x2, "spi", "miso", V(1, 1, 1, 1, 1))),
+ MPP_MODE(4,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "nand", "io6", V(1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x2, "uart0", "rxd", V(1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x5, "sata1", "act", V(0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "hsync", V(0, 0, 0, 0, 1)),
+ MPP_VAR_FUNCTION(0xd, "ptp", "clk", V(1, 1, 1, 1, 0))),
+ MPP_MODE(5,
+ MPP_VAR_FUNCTION(0x0, "gpo", NULL, V(1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "nand", "io7", V(1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x2, "uart0", "txd", V(1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x4, "ptp", "trig", V(1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x5, "sata0", "act", V(0, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "vsync", V(0, 0, 0, 0, 1))),
+ MPP_MODE(6,
+ MPP_VAR_FUNCTION(0x0, "sysrst", "out", V(1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "spi", "mosi", V(1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x2, "ptp", "trig", V(1, 1, 1, 1, 0))),
+ MPP_MODE(7,
+ MPP_VAR_FUNCTION(0x0, "gpo", NULL, V(1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "pex", "rsto", V(1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x2, "spi", "cs", V(1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x3, "ptp", "trig", V(1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "pwm", V(0, 0, 0, 0, 1))),
+ MPP_MODE(8,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "twsi0", "sda", V(1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x2, "uart0", "rts", V(1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x3, "uart1", "rts", V(1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x4, "mii-1", "rxerr", V(0, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x5, "sata1", "prsnt", V(0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0xc, "ptp", "clk", V(1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0xd, "mii", "col", V(1, 1, 1, 1, 1))),
+ MPP_MODE(9,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "twsi0", "sck", V(1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x2, "uart0", "cts", V(1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x3, "uart1", "cts", V(1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x5, "sata0", "prsnt", V(0, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0xc, "ptp", "evreq", V(1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0xd, "mii", "crs", V(1, 1, 1, 1, 1))),
+ MPP_MODE(10,
+ MPP_VAR_FUNCTION(0x0, "gpo", NULL, V(1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x2, "spi", "sck", V(1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0X3, "uart0", "txd", V(1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x5, "sata1", "act", V(0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0xc, "ptp", "trig", V(1, 1, 1, 1, 0))),
+ MPP_MODE(11,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x2, "spi", "miso", V(1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x3, "uart0", "rxd", V(1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x4, "ptp-1", "evreq", V(1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0xc, "ptp-2", "trig", V(1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0xd, "ptp", "clk", V(1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x5, "sata0", "act", V(0, 1, 1, 1, 1))),
+ MPP_MODE(12,
+ MPP_VAR_FUNCTION(0x0, "gpo", NULL, V(1, 1, 1, 0, 1)),
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 0)),
+ MPP_VAR_FUNCTION(0x1, "sdio", "clk", V(1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0xa, "audio", "spdifo", V(0, 0, 0, 0, 1)),
+ MPP_VAR_FUNCTION(0xb, "spi", "mosi", V(0, 0, 0, 0, 1)),
+ MPP_VAR_FUNCTION(0xd, "twsi1", "sda", V(0, 0, 0, 0, 1))),
+ MPP_MODE(13,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "sdio", "cmd", V(1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x3, "uart1", "txd", V(1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0xa, "audio", "rmclk", V(0, 0, 0, 0, 1)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "pwm", V(0, 0, 0, 0, 1))),
+ MPP_MODE(14,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "sdio", "d0", V(1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x3, "uart1", "rxd", V(1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x4, "sata1", "prsnt", V(0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0xa, "audio", "spdifi", V(0, 0, 0, 0, 1)),
+ MPP_VAR_FUNCTION(0xb, "audio-1", "sdi", V(0, 0, 0, 0, 1)),
+ MPP_VAR_FUNCTION(0xd, "mii", "col", V(1, 1, 1, 1, 1))),
+ MPP_MODE(15,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "sdio", "d1", V(1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x2, "uart0", "rts", V(1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x3, "uart1", "txd", V(1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x4, "sata0", "act", V(0, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0xb, "spi", "cs", V(0, 0, 0, 0, 1))),
+ MPP_MODE(16,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "sdio", "d2", V(1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x2, "uart0", "cts", V(1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x3, "uart1", "rxd", V(1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x4, "sata1", "act", V(0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "extclk", V(0, 0, 0, 0, 1)),
+ MPP_VAR_FUNCTION(0xd, "mii", "crs", V(1, 1, 1, 1, 1))),
+ MPP_MODE(17,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "sdio", "d3", V(1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x4, "sata0", "prsnt", V(0, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0xa, "sata1", "act", V(0, 0, 0, 0, 1)),
+ MPP_VAR_FUNCTION(0xd, "twsi1", "sck", V(0, 0, 0, 0, 1))),
+ MPP_MODE(18,
+ MPP_VAR_FUNCTION(0x0, "gpo", NULL, V(1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "nand", "io0", V(1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x2, "pex", "clkreq", V(0, 0, 0, 0, 1))),
+ MPP_MODE(19,
+ MPP_VAR_FUNCTION(0x0, "gpo", NULL, V(1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "nand", "io1", V(1, 1, 1, 1, 1))),
+ MPP_MODE(20,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "ts", "mp0", V(0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x2, "tdm", "tx0ql", V(0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x3, "ge1", "txd0", V(0, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x4, "audio", "spdifi", V(0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x5, "sata1", "act", V(0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "d0", V(0, 0, 0, 0, 1)),
+ MPP_VAR_FUNCTION(0xc, "mii", "rxerr", V(1, 0, 0, 0, 0))),
+ MPP_MODE(21,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "ts", "mp1", V(0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x2, "tdm", "rx0ql", V(0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x3, "ge1", "txd1", V(0, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x4, "audio", "spdifi", V(1, 0, 0, 0, 0)),
+ MPP_VAR_FUNCTION(0x4, "audio", "spdifo", V(0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x5, "sata0", "act", V(0, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "d1", V(0, 0, 0, 0, 1))),
+ MPP_MODE(22,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "ts", "mp2", V(0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x2, "tdm", "tx2ql", V(0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x3, "ge1", "txd2", V(0, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x4, "audio", "spdifo", V(1, 0, 0, 0, 0)),
+ MPP_VAR_FUNCTION(0x4, "audio", "rmclk", V(0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x5, "sata1", "prsnt", V(0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "d2", V(0, 0, 0, 0, 1))),
+ MPP_MODE(23,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "ts", "mp3", V(0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x2, "tdm", "rx2ql", V(0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x3, "ge1", "txd3", V(0, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x4, "audio", "rmclk", V(1, 0, 0, 0, 0)),
+ MPP_VAR_FUNCTION(0x4, "audio", "bclk", V(0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x5, "sata0", "prsnt", V(0, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "d3", V(0, 0, 0, 0, 1))),
+ MPP_MODE(24,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "ts", "mp4", V(0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x2, "tdm", "spi-cs0", V(0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x3, "ge1", "rxd0", V(0, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x4, "audio", "bclk", V(1, 0, 0, 0, 0)),
+ MPP_VAR_FUNCTION(0x4, "audio", "sdo", V(0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "d4", V(0, 0, 0, 0, 1))),
+ MPP_MODE(25,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "ts", "mp5", V(0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x2, "tdm", "spi-sck", V(0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x3, "ge1", "rxd1", V(0, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x4, "audio", "sdo", V(1, 0, 0, 0, 0)),
+ MPP_VAR_FUNCTION(0x4, "audio", "lrclk", V(0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "d5", V(0, 0, 0, 0, 1))),
+ MPP_MODE(26,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "ts", "mp6", V(0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x2, "tdm", "spi-miso", V(0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x3, "ge1", "rxd2", V(0, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x4, "audio", "lrclk", V(1, 0, 0, 0, 0)),
+ MPP_VAR_FUNCTION(0x4, "audio", "mclk", V(0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "d6", V(0, 0, 0, 0, 1))),
+ MPP_MODE(27,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "ts", "mp7", V(0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x2, "tdm", "spi-mosi", V(0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x3, "ge1", "rxd3", V(0, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x4, "audio", "mclk", V(1, 0, 0, 0, 0)),
+ MPP_VAR_FUNCTION(0x4, "audio", "sdi", V(0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "d7", V(0, 0, 0, 0, 1))),
+ MPP_MODE(28,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "ts", "mp8", V(0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x2, "tdm", "int", V(0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x3, "ge1", "col", V(0, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x4, "audio", "sdi", V(1, 0, 0, 0, 0)),
+ MPP_VAR_FUNCTION(0x4, "audio", "extclk", V(0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "d8", V(0, 0, 0, 0, 1))),
+ MPP_MODE(29,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "ts", "mp9", V(0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x2, "tdm", "rst", V(0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x3, "ge1", "txclk", V(0, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x4, "audio", "extclk", V(1, 0, 0, 0, 0)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "d9", V(0, 0, 0, 0, 1))),
+ MPP_MODE(30,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "ts", "mp10", V(0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x2, "tdm", "pclk", V(0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x3, "ge1", "rxctl", V(0, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "d10", V(0, 0, 0, 0, 1))),
+ MPP_MODE(31,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "ts", "mp11", V(0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x2, "tdm", "fs", V(0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x3, "ge1", "rxclk", V(0, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "d11", V(0, 0, 0, 0, 1))),
+ MPP_MODE(32,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "ts", "mp12", V(0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x2, "tdm", "drx", V(0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x3, "ge1", "txclko", V(0, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "d12", V(0, 0, 0, 0, 1))),
+ MPP_MODE(33,
+ MPP_VAR_FUNCTION(0x0, "gpo", NULL, V(0, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x2, "tdm", "dtx", V(0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x3, "ge1", "txctl", V(0, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "d13", V(0, 0, 0, 0, 1))),
+ MPP_MODE(34,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x2, "tdm", "spi-cs1", V(0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x3, "ge1", "txen", V(0, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x5, "sata1", "act", V(0, 0, 0, 1, 1)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "d14", V(0, 0, 0, 0, 1))),
+ MPP_MODE(35,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x2, "tdm", "tx0ql", V(0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x3, "ge1", "rxerr", V(0, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x5, "sata0", "act", V(0, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "d15", V(0, 0, 0, 0, 1)),
+ MPP_VAR_FUNCTION(0xc, "mii", "rxerr", V(0, 1, 1, 1, 1))),
+ MPP_MODE(36,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "ts", "mp0", V(0, 0, 0, 1, 1)),
+ MPP_VAR_FUNCTION(0x2, "tdm", "spi-cs1", V(0, 0, 0, 1, 1)),
+ MPP_VAR_FUNCTION(0x4, "audio", "spdifi", V(0, 0, 0, 1, 1)),
+ MPP_VAR_FUNCTION(0xb, "twsi1", "sda", V(0, 0, 0, 0, 1))),
+ MPP_MODE(37,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "ts", "mp1", V(0, 0, 0, 1, 1)),
+ MPP_VAR_FUNCTION(0x2, "tdm", "tx2ql", V(0, 0, 0, 1, 1)),
+ MPP_VAR_FUNCTION(0x4, "audio", "spdifo", V(0, 0, 0, 1, 1)),
+ MPP_VAR_FUNCTION(0xb, "twsi1", "sck", V(0, 0, 0, 0, 1))),
+ MPP_MODE(38,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "ts", "mp2", V(0, 0, 0, 1, 1)),
+ MPP_VAR_FUNCTION(0x2, "tdm", "rx2ql", V(0, 0, 0, 1, 1)),
+ MPP_VAR_FUNCTION(0x4, "audio", "rmclk", V(0, 0, 0, 1, 1)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "d18", V(0, 0, 0, 0, 1))),
+ MPP_MODE(39,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "ts", "mp3", V(0, 0, 0, 1, 1)),
+ MPP_VAR_FUNCTION(0x2, "tdm", "spi-cs0", V(0, 0, 0, 1, 1)),
+ MPP_VAR_FUNCTION(0x4, "audio", "bclk", V(0, 0, 0, 1, 1)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "d19", V(0, 0, 0, 0, 1))),
+ MPP_MODE(40,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "ts", "mp4", V(0, 0, 0, 1, 1)),
+ MPP_VAR_FUNCTION(0x2, "tdm", "spi-sck", V(0, 0, 0, 1, 1)),
+ MPP_VAR_FUNCTION(0x4, "audio", "sdo", V(0, 0, 0, 1, 1)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "d20", V(0, 0, 0, 0, 1))),
+ MPP_MODE(41,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "ts", "mp5", V(0, 0, 0, 1, 1)),
+ MPP_VAR_FUNCTION(0x2, "tdm", "spi-miso", V(0, 0, 0, 1, 1)),
+ MPP_VAR_FUNCTION(0x4, "audio", "lrclk", V(0, 0, 0, 1, 1)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "d21", V(0, 0, 0, 0, 1))),
+ MPP_MODE(42,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "ts", "mp6", V(0, 0, 0, 1, 1)),
+ MPP_VAR_FUNCTION(0x2, "tdm", "spi-mosi", V(0, 0, 0, 1, 1)),
+ MPP_VAR_FUNCTION(0x4, "audio", "mclk", V(0, 0, 0, 1, 1)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "d22", V(0, 0, 0, 0, 1))),
+ MPP_MODE(43,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "ts", "mp7", V(0, 0, 0, 1, 1)),
+ MPP_VAR_FUNCTION(0x2, "tdm", "int", V(0, 0, 0, 1, 1)),
+ MPP_VAR_FUNCTION(0x4, "audio", "sdi", V(0, 0, 0, 1, 1)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "d23", V(0, 0, 0, 0, 1))),
+ MPP_MODE(44,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "ts", "mp8", V(0, 0, 0, 1, 1)),
+ MPP_VAR_FUNCTION(0x2, "tdm", "rst", V(0, 0, 0, 1, 1)),
+ MPP_VAR_FUNCTION(0x4, "audio", "extclk", V(0, 0, 0, 1, 1)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "clk", V(0, 0, 0, 0, 1))),
+ MPP_MODE(45,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "ts", "mp9", V(0, 0, 0, 1, 1)),
+ MPP_VAR_FUNCTION(0x2, "tdm", "pclk", V(0, 0, 0, 1, 1)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "e", V(0, 0, 0, 0, 1))),
+ MPP_MODE(46,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "ts", "mp10", V(0, 0, 0, 1, 1)),
+ MPP_VAR_FUNCTION(0x2, "tdm", "fs", V(0, 0, 0, 1, 1)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "hsync", V(0, 0, 0, 0, 1))),
+ MPP_MODE(47,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "ts", "mp11", V(0, 0, 0, 1, 1)),
+ MPP_VAR_FUNCTION(0x2, "tdm", "drx", V(0, 0, 0, 1, 1)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "vsync", V(0, 0, 0, 0, 1))),
+ MPP_MODE(48,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "ts", "mp12", V(0, 0, 0, 1, 1)),
+ MPP_VAR_FUNCTION(0x2, "tdm", "dtx", V(0, 0, 0, 1, 1)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "d16", V(0, 0, 0, 0, 1))),
+ MPP_MODE(49,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 0)),
+ MPP_VAR_FUNCTION(0x0, "gpo", NULL, V(0, 0, 0, 0, 1)),
+ MPP_VAR_FUNCTION(0x1, "ts", "mp9", V(0, 0, 0, 1, 0)),
+ MPP_VAR_FUNCTION(0x2, "tdm", "rx0ql", V(0, 0, 0, 1, 1)),
+ MPP_VAR_FUNCTION(0x5, "ptp", "clk", V(0, 0, 0, 1, 0)),
+ MPP_VAR_FUNCTION(0xa, "pex", "clkreq", V(0, 0, 0, 0, 1)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "d17", V(0, 0, 0, 0, 1))),
+};
+
+static struct mvebu_mpp_ctrl mv88f6180_mpp_controls[] = {
+ MPP_REG_CTRL(0, 29),
+};
+
+static struct pinctrl_gpio_range mv88f6180_gpio_ranges[] = {
+ MPP_GPIO_RANGE(0, 0, 0, 30),
+};
+
+static struct mvebu_mpp_ctrl mv88f619x_mpp_controls[] = {
+ MPP_REG_CTRL(0, 35),
+};
+
+static struct pinctrl_gpio_range mv88f619x_gpio_ranges[] = {
+ MPP_GPIO_RANGE(0, 0, 0, 32),
+ MPP_GPIO_RANGE(1, 32, 32, 4),
+};
+
+static struct mvebu_mpp_ctrl mv88f628x_mpp_controls[] = {
+ MPP_REG_CTRL(0, 49),
+};
+
+static struct pinctrl_gpio_range mv88f628x_gpio_ranges[] = {
+ MPP_GPIO_RANGE(0, 0, 0, 32),
+ MPP_GPIO_RANGE(1, 32, 32, 18),
+};
+
+static struct mvebu_pinctrl_soc_info mv88f6180_info = {
+ .variant = VARIANT_MV88F6180,
+ .controls = mv88f6180_mpp_controls,
+ .ncontrols = ARRAY_SIZE(mv88f6180_mpp_controls),
+ .modes = mv88f6xxx_mpp_modes,
+ .nmodes = ARRAY_SIZE(mv88f6xxx_mpp_modes),
+ .gpioranges = mv88f6180_gpio_ranges,
+ .ngpioranges = ARRAY_SIZE(mv88f6180_gpio_ranges),
+};
+
+static struct mvebu_pinctrl_soc_info mv88f6190_info = {
+ .variant = VARIANT_MV88F6190,
+ .controls = mv88f619x_mpp_controls,
+ .ncontrols = ARRAY_SIZE(mv88f619x_mpp_controls),
+ .modes = mv88f6xxx_mpp_modes,
+ .nmodes = ARRAY_SIZE(mv88f6xxx_mpp_modes),
+ .gpioranges = mv88f619x_gpio_ranges,
+ .ngpioranges = ARRAY_SIZE(mv88f619x_gpio_ranges),
+};
+
+static struct mvebu_pinctrl_soc_info mv88f6192_info = {
+ .variant = VARIANT_MV88F6192,
+ .controls = mv88f619x_mpp_controls,
+ .ncontrols = ARRAY_SIZE(mv88f619x_mpp_controls),
+ .modes = mv88f6xxx_mpp_modes,
+ .nmodes = ARRAY_SIZE(mv88f6xxx_mpp_modes),
+ .gpioranges = mv88f619x_gpio_ranges,
+ .ngpioranges = ARRAY_SIZE(mv88f619x_gpio_ranges),
+};
+
+static struct mvebu_pinctrl_soc_info mv88f6281_info = {
+ .variant = VARIANT_MV88F6281,
+ .controls = mv88f628x_mpp_controls,
+ .ncontrols = ARRAY_SIZE(mv88f628x_mpp_controls),
+ .modes = mv88f6xxx_mpp_modes,
+ .nmodes = ARRAY_SIZE(mv88f6xxx_mpp_modes),
+ .gpioranges = mv88f628x_gpio_ranges,
+ .ngpioranges = ARRAY_SIZE(mv88f628x_gpio_ranges),
+};
+
+static struct mvebu_pinctrl_soc_info mv88f6282_info = {
+ .variant = VARIANT_MV88F6282,
+ .controls = mv88f628x_mpp_controls,
+ .ncontrols = ARRAY_SIZE(mv88f628x_mpp_controls),
+ .modes = mv88f6xxx_mpp_modes,
+ .nmodes = ARRAY_SIZE(mv88f6xxx_mpp_modes),
+ .gpioranges = mv88f628x_gpio_ranges,
+ .ngpioranges = ARRAY_SIZE(mv88f628x_gpio_ranges),
+};
+
+static struct of_device_id kirkwood_pinctrl_of_match[] __devinitdata = {
+ { .compatible = "marvell,88f6180-pinctrl", .data = &mv88f6180_info },
+ { .compatible = "marvell,88f6190-pinctrl", .data = &mv88f6190_info },
+ { .compatible = "marvell,88f6192-pinctrl", .data = &mv88f6192_info },
+ { .compatible = "marvell,88f6281-pinctrl", .data = &mv88f6281_info },
+ { .compatible = "marvell,88f6282-pinctrl", .data = &mv88f6282_info },
+ { }
+};
+
+static int __devinit kirkwood_pinctrl_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match =
+ of_match_device(kirkwood_pinctrl_of_match, &pdev->dev);
+ pdev->dev.platform_data = match->data;
+ return mvebu_pinctrl_probe(pdev);
+}
+
+static int __devexit kirkwood_pinctrl_remove(struct platform_device *pdev)
+{
+ return mvebu_pinctrl_remove(pdev);
+}
+
+static struct platform_driver kirkwood_pinctrl_driver = {
+ .driver = {
+ .name = "kirkwood-pinctrl",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(kirkwood_pinctrl_of_match),
+ },
+ .probe = kirkwood_pinctrl_probe,
+ .remove = __devexit_p(kirkwood_pinctrl_remove),
+};
+
+module_platform_driver(kirkwood_pinctrl_driver);
+
+MODULE_AUTHOR("Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>");
+MODULE_DESCRIPTION("Marvell Kirkwood pinctrl driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/pinctrl-lantiq.c b/drivers/pinctrl/pinctrl-lantiq.c
new file mode 100644
index 0000000..07ba768
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-lantiq.c
@@ -0,0 +1,342 @@
+/*
+ * linux/drivers/pinctrl/pinctrl-lantiq.c
+ * based on linux/drivers/pinctrl/pinctrl-pxa3xx.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * publishhed by the Free Software Foundation.
+ *
+ * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+
+#include "pinctrl-lantiq.h"
+
+static int ltq_get_group_count(struct pinctrl_dev *pctrldev)
+{
+ struct ltq_pinmux_info *info = pinctrl_dev_get_drvdata(pctrldev);
+ return info->num_grps;
+}
+
+static const char *ltq_get_group_name(struct pinctrl_dev *pctrldev,
+ unsigned selector)
+{
+ struct ltq_pinmux_info *info = pinctrl_dev_get_drvdata(pctrldev);
+ if (selector >= info->num_grps)
+ return NULL;
+ return info->grps[selector].name;
+}
+
+static int ltq_get_group_pins(struct pinctrl_dev *pctrldev,
+ unsigned selector,
+ const unsigned **pins,
+ unsigned *num_pins)
+{
+ struct ltq_pinmux_info *info = pinctrl_dev_get_drvdata(pctrldev);
+ if (selector >= info->num_grps)
+ return -EINVAL;
+ *pins = info->grps[selector].pins;
+ *num_pins = info->grps[selector].npins;
+ return 0;
+}
+
+void ltq_pinctrl_dt_free_map(struct pinctrl_dev *pctldev,
+ struct pinctrl_map *map, unsigned num_maps)
+{
+ int i;
+
+ for (i = 0; i < num_maps; i++)
+ if (map[i].type == PIN_MAP_TYPE_CONFIGS_PIN)
+ kfree(map[i].data.configs.configs);
+ kfree(map);
+}
+
+static void ltq_pinctrl_pin_dbg_show(struct pinctrl_dev *pctldev,
+ struct seq_file *s,
+ unsigned offset)
+{
+ seq_printf(s, " %s", dev_name(pctldev->dev));
+}
+
+static int ltq_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
+ struct device_node *np,
+ struct pinctrl_map **map)
+{
+ struct ltq_pinmux_info *info = pinctrl_dev_get_drvdata(pctldev);
+ unsigned long configs[3];
+ unsigned num_configs = 0;
+ struct property *prop;
+ const char *group, *pin;
+ const char *function;
+ int ret, i;
+
+ ret = of_property_read_string(np, "lantiq,function", &function);
+ if (!ret) {
+ of_property_for_each_string(np, "lantiq,groups", prop, group) {
+ (*map)->type = PIN_MAP_TYPE_MUX_GROUP;
+ (*map)->name = function;
+ (*map)->data.mux.group = group;
+ (*map)->data.mux.function = function;
+ (*map)++;
+ }
+ if (of_find_property(np, "lantiq,pins", NULL))
+ dev_err(pctldev->dev,
+ "%s mixes pins and groups settings\n",
+ np->name);
+ return 0;
+ }
+
+ for (i = 0; i < info->num_params; i++) {
+ u32 val;
+ int ret = of_property_read_u32(np,
+ info->params[i].property, &val);
+ if (!ret)
+ configs[num_configs++] =
+ LTQ_PINCONF_PACK(info->params[i].param,
+ val);
+ }
+
+ if (!num_configs)
+ return -EINVAL;
+
+ of_property_for_each_string(np, "lantiq,pins", prop, pin) {
+ (*map)->data.configs.configs = kmemdup(configs,
+ num_configs * sizeof(unsigned long),
+ GFP_KERNEL);
+ (*map)->type = PIN_MAP_TYPE_CONFIGS_PIN;
+ (*map)->name = pin;
+ (*map)->data.configs.group_or_pin = pin;
+ (*map)->data.configs.num_configs = num_configs;
+ (*map)++;
+ }
+ return 0;
+}
+
+static int ltq_pinctrl_dt_subnode_size(struct device_node *np)
+{
+ int ret;
+
+ ret = of_property_count_strings(np, "lantiq,groups");
+ if (ret < 0)
+ ret = of_property_count_strings(np, "lantiq,pins");
+ return ret;
+}
+
+int ltq_pinctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
+ struct device_node *np_config,
+ struct pinctrl_map **map,
+ unsigned *num_maps)
+{
+ struct pinctrl_map *tmp;
+ struct device_node *np;
+ int ret;
+
+ *num_maps = 0;
+ for_each_child_of_node(np_config, np)
+ *num_maps += ltq_pinctrl_dt_subnode_size(np);
+ *map = kzalloc(*num_maps * sizeof(struct pinctrl_map), GFP_KERNEL);
+ if (!*map)
+ return -ENOMEM;
+ tmp = *map;
+
+ for_each_child_of_node(np_config, np) {
+ ret = ltq_pinctrl_dt_subnode_to_map(pctldev, np, &tmp);
+ if (ret < 0) {
+ ltq_pinctrl_dt_free_map(pctldev, *map, *num_maps);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static struct pinctrl_ops ltq_pctrl_ops = {
+ .get_groups_count = ltq_get_group_count,
+ .get_group_name = ltq_get_group_name,
+ .get_group_pins = ltq_get_group_pins,
+ .pin_dbg_show = ltq_pinctrl_pin_dbg_show,
+ .dt_node_to_map = ltq_pinctrl_dt_node_to_map,
+ .dt_free_map = ltq_pinctrl_dt_free_map,
+};
+
+static int ltq_pmx_func_count(struct pinctrl_dev *pctrldev)
+{
+ struct ltq_pinmux_info *info = pinctrl_dev_get_drvdata(pctrldev);
+
+ return info->num_funcs;
+}
+
+static const char *ltq_pmx_func_name(struct pinctrl_dev *pctrldev,
+ unsigned selector)
+{
+ struct ltq_pinmux_info *info = pinctrl_dev_get_drvdata(pctrldev);
+
+ if (selector >= info->num_funcs)
+ return NULL;
+
+ return info->funcs[selector].name;
+}
+
+static int ltq_pmx_get_groups(struct pinctrl_dev *pctrldev,
+ unsigned func,
+ const char * const **groups,
+ unsigned * const num_groups)
+{
+ struct ltq_pinmux_info *info = pinctrl_dev_get_drvdata(pctrldev);
+
+ *groups = info->funcs[func].groups;
+ *num_groups = info->funcs[func].num_groups;
+
+ return 0;
+}
+
+/* Return function number. If failure, return negative value. */
+static int match_mux(const struct ltq_mfp_pin *mfp, unsigned mux)
+{
+ int i;
+ for (i = 0; i < LTQ_MAX_MUX; i++) {
+ if (mfp->func[i] == mux)
+ break;
+ }
+ if (i >= LTQ_MAX_MUX)
+ return -EINVAL;
+ return i;
+}
+
+/* dont assume .mfp is linearly mapped. find the mfp with the correct .pin */
+static int match_mfp(const struct ltq_pinmux_info *info, int pin)
+{
+ int i;
+ for (i = 0; i < info->num_mfp; i++) {
+ if (info->mfp[i].pin == pin)
+ return i;
+ }
+ return -1;
+}
+
+/* check whether current pin configuration is valid. Negative for failure */
+static int match_group_mux(const struct ltq_pin_group *grp,
+ const struct ltq_pinmux_info *info,
+ unsigned mux)
+{
+ int i, pin, ret = 0;
+ for (i = 0; i < grp->npins; i++) {
+ pin = match_mfp(info, grp->pins[i]);
+ if (pin < 0) {
+ dev_err(info->dev, "could not find mfp for pin %d\n",
+ grp->pins[i]);
+ return -EINVAL;
+ }
+ ret = match_mux(&info->mfp[pin], mux);
+ if (ret < 0) {
+ dev_err(info->dev, "Can't find mux %d on pin%d\n",
+ mux, pin);
+ break;
+ }
+ }
+ return ret;
+}
+
+static int ltq_pmx_enable(struct pinctrl_dev *pctrldev,
+ unsigned func,
+ unsigned group)
+{
+ struct ltq_pinmux_info *info = pinctrl_dev_get_drvdata(pctrldev);
+ const struct ltq_pin_group *pin_grp = &info->grps[group];
+ int i, pin, pin_func, ret;
+
+ if (!pin_grp->npins ||
+ (match_group_mux(pin_grp, info, pin_grp->mux) < 0)) {
+ dev_err(info->dev, "Failed to set the pin group: %s\n",
+ info->grps[group].name);
+ return -EINVAL;
+ }
+ for (i = 0; i < pin_grp->npins; i++) {
+ pin = match_mfp(info, pin_grp->pins[i]);
+ if (pin < 0) {
+ dev_err(info->dev, "could not find mfp for pin %d\n",
+ pin_grp->pins[i]);
+ return -EINVAL;
+ }
+ pin_func = match_mux(&info->mfp[pin], pin_grp->mux);
+ ret = info->apply_mux(pctrldev, pin, pin_func);
+ if (ret) {
+ dev_err(info->dev,
+ "failed to apply mux %d for pin %d\n",
+ pin_func, pin);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static void ltq_pmx_disable(struct pinctrl_dev *pctrldev,
+ unsigned func,
+ unsigned group)
+{
+ /*
+ * Nothing to do here. However, pinconf_check_ops() requires this
+ * callback to be defined.
+ */
+}
+
+static int ltq_pmx_gpio_request_enable(struct pinctrl_dev *pctrldev,
+ struct pinctrl_gpio_range *range,
+ unsigned pin)
+{
+ struct ltq_pinmux_info *info = pinctrl_dev_get_drvdata(pctrldev);
+ int mfp = match_mfp(info, pin + (range->id * 32));
+ int pin_func;
+
+ if (mfp < 0) {
+ dev_err(info->dev, "could not find mfp for pin %d\n", pin);
+ return -EINVAL;
+ }
+
+ pin_func = match_mux(&info->mfp[mfp], 0);
+ if (pin_func < 0) {
+ dev_err(info->dev, "No GPIO function on pin%d\n", mfp);
+ return -EINVAL;
+ }
+
+ return info->apply_mux(pctrldev, mfp, pin_func);
+}
+
+static struct pinmux_ops ltq_pmx_ops = {
+ .get_functions_count = ltq_pmx_func_count,
+ .get_function_name = ltq_pmx_func_name,
+ .get_function_groups = ltq_pmx_get_groups,
+ .enable = ltq_pmx_enable,
+ .disable = ltq_pmx_disable,
+ .gpio_request_enable = ltq_pmx_gpio_request_enable,
+};
+
+/*
+ * allow different socs to register with the generic part of the lanti
+ * pinctrl code
+ */
+int ltq_pinctrl_register(struct platform_device *pdev,
+ struct ltq_pinmux_info *info)
+{
+ struct pinctrl_desc *desc;
+
+ if (!info)
+ return -EINVAL;
+ desc = info->desc;
+ desc->pctlops = &ltq_pctrl_ops;
+ desc->pmxops = &ltq_pmx_ops;
+ info->dev = &pdev->dev;
+
+ info->pctrl = pinctrl_register(desc, &pdev->dev, info);
+ if (!info->pctrl) {
+ dev_err(&pdev->dev, "failed to register LTQ pinmux driver\n");
+ return -EINVAL;
+ }
+ platform_set_drvdata(pdev, info);
+ return 0;
+}
diff --git a/drivers/pinctrl/pinctrl-lantiq.h b/drivers/pinctrl/pinctrl-lantiq.h
new file mode 100644
index 0000000..4419d32
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-lantiq.h
@@ -0,0 +1,194 @@
+/*
+ * linux/drivers/pinctrl/pinctrl-lantiq.h
+ * based on linux/drivers/pinctrl/pinctrl-pxa3xx.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * publishhed by the Free Software Foundation.
+ *
+ * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ */
+
+#ifndef __PINCTRL_LANTIQ_H
+
+#include <linux/clkdev.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/pinctrl/machine.h>
+
+#include "core.h"
+
+#define ARRAY_AND_SIZE(x) (x), ARRAY_SIZE(x)
+
+#define LTQ_MAX_MUX 4
+#define MFPR_FUNC_MASK 0x3
+
+#define LTQ_PINCONF_PACK(param, arg) ((param) << 16 | (arg))
+#define LTQ_PINCONF_UNPACK_PARAM(conf) ((conf) >> 16)
+#define LTQ_PINCONF_UNPACK_ARG(conf) ((conf) & 0xffff)
+
+enum ltq_pinconf_param {
+ LTQ_PINCONF_PARAM_PULL,
+ LTQ_PINCONF_PARAM_OPEN_DRAIN,
+ LTQ_PINCONF_PARAM_DRIVE_CURRENT,
+ LTQ_PINCONF_PARAM_SLEW_RATE,
+};
+
+struct ltq_cfg_param {
+ const char *property;
+ enum ltq_pinconf_param param;
+};
+
+struct ltq_mfp_pin {
+ const char *name;
+ const unsigned int pin;
+ const unsigned short func[LTQ_MAX_MUX];
+};
+
+struct ltq_pin_group {
+ const char *name;
+ const unsigned mux;
+ const unsigned *pins;
+ const unsigned npins;
+};
+
+struct ltq_pmx_func {
+ const char *name;
+ const char * const *groups;
+ const unsigned num_groups;
+};
+
+struct ltq_pinmux_info {
+ struct device *dev;
+ struct pinctrl_dev *pctrl;
+
+ /* we need to manage up to 5 pad controllers */
+ void __iomem *membase[5];
+
+ /* the descriptor for the subsystem */
+ struct pinctrl_desc *desc;
+
+ /* we expose our pads to the subsystem */
+ struct pinctrl_pin_desc *pads;
+
+ /* the number of pads. this varies between socs */
+ unsigned int num_pads;
+
+ /* these are our multifunction pins */
+ const struct ltq_mfp_pin *mfp;
+ unsigned int num_mfp;
+
+ /* a number of multifunction pins can be grouped together */
+ const struct ltq_pin_group *grps;
+ unsigned int num_grps;
+
+ /* a mapping between function string and id */
+ const struct ltq_pmx_func *funcs;
+ unsigned int num_funcs;
+
+ /* the pinconf options that we are able to read from the DT */
+ const struct ltq_cfg_param *params;
+ unsigned int num_params;
+
+ /* the pad controller can have a irq mapping */
+ const unsigned *exin;
+ unsigned int num_exin;
+
+ /* we need 5 clocks max */
+ struct clk *clk[5];
+
+ /* soc specific callback used to apply muxing */
+ int (*apply_mux)(struct pinctrl_dev *pctrldev, int pin, int mux);
+};
+
+enum ltq_pin {
+ GPIO0 = 0,
+ GPIO1,
+ GPIO2,
+ GPIO3,
+ GPIO4,
+ GPIO5,
+ GPIO6,
+ GPIO7,
+ GPIO8,
+ GPIO9,
+ GPIO10, /* 10 */
+ GPIO11,
+ GPIO12,
+ GPIO13,
+ GPIO14,
+ GPIO15,
+ GPIO16,
+ GPIO17,
+ GPIO18,
+ GPIO19,
+ GPIO20, /* 20 */
+ GPIO21,
+ GPIO22,
+ GPIO23,
+ GPIO24,
+ GPIO25,
+ GPIO26,
+ GPIO27,
+ GPIO28,
+ GPIO29,
+ GPIO30, /* 30 */
+ GPIO31,
+ GPIO32,
+ GPIO33,
+ GPIO34,
+ GPIO35,
+ GPIO36,
+ GPIO37,
+ GPIO38,
+ GPIO39,
+ GPIO40, /* 40 */
+ GPIO41,
+ GPIO42,
+ GPIO43,
+ GPIO44,
+ GPIO45,
+ GPIO46,
+ GPIO47,
+ GPIO48,
+ GPIO49,
+ GPIO50, /* 50 */
+ GPIO51,
+ GPIO52,
+ GPIO53,
+ GPIO54,
+ GPIO55,
+
+ GPIO64,
+ GPIO65,
+ GPIO66,
+ GPIO67,
+ GPIO68,
+ GPIO69,
+ GPIO70,
+ GPIO71,
+ GPIO72,
+ GPIO73,
+ GPIO74,
+ GPIO75,
+ GPIO76,
+ GPIO77,
+ GPIO78,
+ GPIO79,
+ GPIO80,
+ GPIO81,
+ GPIO82,
+ GPIO83,
+ GPIO84,
+ GPIO85,
+ GPIO86,
+ GPIO87,
+ GPIO88,
+};
+
+extern int ltq_pinctrl_register(struct platform_device *pdev,
+ struct ltq_pinmux_info *info);
+extern int ltq_pinctrl_unregister(struct platform_device *pdev);
+#endif /* __PINCTRL_PXA3XX_H */
diff --git a/drivers/pinctrl/pinctrl-mvebu.c b/drivers/pinctrl/pinctrl-mvebu.c
new file mode 100644
index 0000000..8e6266c
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-mvebu.c
@@ -0,0 +1,754 @@
+/*
+ * Marvell MVEBU pinctrl core driver
+ *
+ * Authors: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/pinctrl/machine.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+
+#include "core.h"
+#include "pinctrl-mvebu.h"
+
+#define MPPS_PER_REG 8
+#define MPP_BITS 4
+#define MPP_MASK 0xf
+
+struct mvebu_pinctrl_function {
+ const char *name;
+ const char **groups;
+ unsigned num_groups;
+};
+
+struct mvebu_pinctrl_group {
+ const char *name;
+ struct mvebu_mpp_ctrl *ctrl;
+ struct mvebu_mpp_ctrl_setting *settings;
+ unsigned num_settings;
+ unsigned gid;
+ unsigned *pins;
+ unsigned npins;
+};
+
+struct mvebu_pinctrl {
+ struct device *dev;
+ struct pinctrl_dev *pctldev;
+ struct pinctrl_desc desc;
+ void __iomem *base;
+ struct mvebu_pinctrl_group *groups;
+ unsigned num_groups;
+ struct mvebu_pinctrl_function *functions;
+ unsigned num_functions;
+ u8 variant;
+};
+
+static struct mvebu_pinctrl_group *mvebu_pinctrl_find_group_by_pid(
+ struct mvebu_pinctrl *pctl, unsigned pid)
+{
+ unsigned n;
+ for (n = 0; n < pctl->num_groups; n++) {
+ if (pid >= pctl->groups[n].pins[0] &&
+ pid < pctl->groups[n].pins[0] +
+ pctl->groups[n].npins)
+ return &pctl->groups[n];
+ }
+ return NULL;
+}
+
+static struct mvebu_pinctrl_group *mvebu_pinctrl_find_group_by_name(
+ struct mvebu_pinctrl *pctl, const char *name)
+{
+ unsigned n;
+ for (n = 0; n < pctl->num_groups; n++) {
+ if (strcmp(name, pctl->groups[n].name) == 0)
+ return &pctl->groups[n];
+ }
+ return NULL;
+}
+
+static struct mvebu_mpp_ctrl_setting *mvebu_pinctrl_find_setting_by_val(
+ struct mvebu_pinctrl *pctl, struct mvebu_pinctrl_group *grp,
+ unsigned long config)
+{
+ unsigned n;
+ for (n = 0; n < grp->num_settings; n++) {
+ if (config == grp->settings[n].val) {
+ if (!pctl->variant || (pctl->variant &
+ grp->settings[n].variant))
+ return &grp->settings[n];
+ }
+ }
+ return NULL;
+}
+
+static struct mvebu_mpp_ctrl_setting *mvebu_pinctrl_find_setting_by_name(
+ struct mvebu_pinctrl *pctl, struct mvebu_pinctrl_group *grp,
+ const char *name)
+{
+ unsigned n;
+ for (n = 0; n < grp->num_settings; n++) {
+ if (strcmp(name, grp->settings[n].name) == 0) {
+ if (!pctl->variant || (pctl->variant &
+ grp->settings[n].variant))
+ return &grp->settings[n];
+ }
+ }
+ return NULL;
+}
+
+static struct mvebu_mpp_ctrl_setting *mvebu_pinctrl_find_gpio_setting(
+ struct mvebu_pinctrl *pctl, struct mvebu_pinctrl_group *grp)
+{
+ unsigned n;
+ for (n = 0; n < grp->num_settings; n++) {
+ if (grp->settings[n].flags &
+ (MVEBU_SETTING_GPO | MVEBU_SETTING_GPI)) {
+ if (!pctl->variant || (pctl->variant &
+ grp->settings[n].variant))
+ return &grp->settings[n];
+ }
+ }
+ return NULL;
+}
+
+static struct mvebu_pinctrl_function *mvebu_pinctrl_find_function_by_name(
+ struct mvebu_pinctrl *pctl, const char *name)
+{
+ unsigned n;
+ for (n = 0; n < pctl->num_functions; n++) {
+ if (strcmp(name, pctl->functions[n].name) == 0)
+ return &pctl->functions[n];
+ }
+ return NULL;
+}
+
+/*
+ * Common mpp pin configuration registers on MVEBU are
+ * registers of eight 4-bit values for each mpp setting.
+ * Register offset and bit mask are calculated accordingly below.
+ */
+static int mvebu_common_mpp_get(struct mvebu_pinctrl *pctl,
+ struct mvebu_pinctrl_group *grp,
+ unsigned long *config)
+{
+ unsigned pin = grp->gid;
+ unsigned off = (pin / MPPS_PER_REG) * MPP_BITS;
+ unsigned shift = (pin % MPPS_PER_REG) * MPP_BITS;
+
+ *config = readl(pctl->base + off);
+ *config >>= shift;
+ *config &= MPP_MASK;
+
+ return 0;
+}
+
+static int mvebu_common_mpp_set(struct mvebu_pinctrl *pctl,
+ struct mvebu_pinctrl_group *grp,
+ unsigned long config)
+{
+ unsigned pin = grp->gid;
+ unsigned off = (pin / MPPS_PER_REG) * MPP_BITS;
+ unsigned shift = (pin % MPPS_PER_REG) * MPP_BITS;
+ unsigned long reg;
+
+ reg = readl(pctl->base + off);
+ reg &= ~(MPP_MASK << shift);
+ reg |= (config << shift);
+ writel(reg, pctl->base + off);
+
+ return 0;
+}
+
+static int mvebu_pinconf_group_get(struct pinctrl_dev *pctldev,
+ unsigned gid, unsigned long *config)
+{
+ struct mvebu_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+ struct mvebu_pinctrl_group *grp = &pctl->groups[gid];
+
+ if (!grp->ctrl)
+ return -EINVAL;
+
+ if (grp->ctrl->mpp_get)
+ return grp->ctrl->mpp_get(grp->ctrl, config);
+
+ return mvebu_common_mpp_get(pctl, grp, config);
+}
+
+static int mvebu_pinconf_group_set(struct pinctrl_dev *pctldev,
+ unsigned gid, unsigned long config)
+{
+ struct mvebu_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+ struct mvebu_pinctrl_group *grp = &pctl->groups[gid];
+
+ if (!grp->ctrl)
+ return -EINVAL;
+
+ if (grp->ctrl->mpp_set)
+ return grp->ctrl->mpp_set(grp->ctrl, config);
+
+ return mvebu_common_mpp_set(pctl, grp, config);
+}
+
+static void mvebu_pinconf_group_dbg_show(struct pinctrl_dev *pctldev,
+ struct seq_file *s, unsigned gid)
+{
+ struct mvebu_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+ struct mvebu_pinctrl_group *grp = &pctl->groups[gid];
+ struct mvebu_mpp_ctrl_setting *curr;
+ unsigned long config;
+ unsigned n;
+
+ if (mvebu_pinconf_group_get(pctldev, gid, &config))
+ return;
+
+ curr = mvebu_pinctrl_find_setting_by_val(pctl, grp, config);
+
+ if (curr) {
+ seq_printf(s, "current: %s", curr->name);
+ if (curr->subname)
+ seq_printf(s, "(%s)", curr->subname);
+ if (curr->flags & (MVEBU_SETTING_GPO | MVEBU_SETTING_GPI)) {
+ seq_printf(s, "(");
+ if (curr->flags & MVEBU_SETTING_GPI)
+ seq_printf(s, "i");
+ if (curr->flags & MVEBU_SETTING_GPO)
+ seq_printf(s, "o");
+ seq_printf(s, ")");
+ }
+ } else
+ seq_printf(s, "current: UNKNOWN");
+
+ if (grp->num_settings > 1) {
+ seq_printf(s, ", available = [");
+ for (n = 0; n < grp->num_settings; n++) {
+ if (curr == &grp->settings[n])
+ continue;
+
+ /* skip unsupported settings for this variant */
+ if (pctl->variant &&
+ !(pctl->variant & grp->settings[n].variant))
+ continue;
+
+ seq_printf(s, " %s", grp->settings[n].name);
+ if (grp->settings[n].subname)
+ seq_printf(s, "(%s)", grp->settings[n].subname);
+ if (grp->settings[n].flags &
+ (MVEBU_SETTING_GPO | MVEBU_SETTING_GPI)) {
+ seq_printf(s, "(");
+ if (grp->settings[n].flags & MVEBU_SETTING_GPI)
+ seq_printf(s, "i");
+ if (grp->settings[n].flags & MVEBU_SETTING_GPO)
+ seq_printf(s, "o");
+ seq_printf(s, ")");
+ }
+ }
+ seq_printf(s, " ]");
+ }
+ return;
+}
+
+static struct pinconf_ops mvebu_pinconf_ops = {
+ .pin_config_group_get = mvebu_pinconf_group_get,
+ .pin_config_group_set = mvebu_pinconf_group_set,
+ .pin_config_group_dbg_show = mvebu_pinconf_group_dbg_show,
+};
+
+static int mvebu_pinmux_get_funcs_count(struct pinctrl_dev *pctldev)
+{
+ struct mvebu_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctl->num_functions;
+}
+
+static const char *mvebu_pinmux_get_func_name(struct pinctrl_dev *pctldev,
+ unsigned fid)
+{
+ struct mvebu_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctl->functions[fid].name;
+}
+
+static int mvebu_pinmux_get_groups(struct pinctrl_dev *pctldev, unsigned fid,
+ const char * const **groups,
+ unsigned * const num_groups)
+{
+ struct mvebu_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ *groups = pctl->functions[fid].groups;
+ *num_groups = pctl->functions[fid].num_groups;
+ return 0;
+}
+
+static int mvebu_pinmux_enable(struct pinctrl_dev *pctldev, unsigned fid,
+ unsigned gid)
+{
+ struct mvebu_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+ struct mvebu_pinctrl_function *func = &pctl->functions[fid];
+ struct mvebu_pinctrl_group *grp = &pctl->groups[gid];
+ struct mvebu_mpp_ctrl_setting *setting;
+ int ret;
+
+ setting = mvebu_pinctrl_find_setting_by_name(pctl, grp,
+ func->name);
+ if (!setting) {
+ dev_err(pctl->dev,
+ "unable to find setting %s in group %s\n",
+ func->name, func->groups[gid]);
+ return -EINVAL;
+ }
+
+ ret = mvebu_pinconf_group_set(pctldev, grp->gid, setting->val);
+ if (ret) {
+ dev_err(pctl->dev, "cannot set group %s to %s\n",
+ func->groups[gid], func->name);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mvebu_pinmux_gpio_request_enable(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range, unsigned offset)
+{
+ struct mvebu_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+ struct mvebu_pinctrl_group *grp;
+ struct mvebu_mpp_ctrl_setting *setting;
+
+ grp = mvebu_pinctrl_find_group_by_pid(pctl, offset);
+ if (!grp)
+ return -EINVAL;
+
+ if (grp->ctrl->mpp_gpio_req)
+ return grp->ctrl->mpp_gpio_req(grp->ctrl, offset);
+
+ setting = mvebu_pinctrl_find_gpio_setting(pctl, grp);
+ if (!setting)
+ return -ENOTSUPP;
+
+ return mvebu_pinconf_group_set(pctldev, grp->gid, setting->val);
+}
+
+static int mvebu_pinmux_gpio_set_direction(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range, unsigned offset, bool input)
+{
+ struct mvebu_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+ struct mvebu_pinctrl_group *grp;
+ struct mvebu_mpp_ctrl_setting *setting;
+
+ grp = mvebu_pinctrl_find_group_by_pid(pctl, offset);
+ if (!grp)
+ return -EINVAL;
+
+ if (grp->ctrl->mpp_gpio_dir)
+ return grp->ctrl->mpp_gpio_dir(grp->ctrl, offset, input);
+
+ setting = mvebu_pinctrl_find_gpio_setting(pctl, grp);
+ if (!setting)
+ return -ENOTSUPP;
+
+ if ((input && (setting->flags & MVEBU_SETTING_GPI)) ||
+ (!input && (setting->flags & MVEBU_SETTING_GPO)))
+ return 0;
+
+ return -ENOTSUPP;
+}
+
+static struct pinmux_ops mvebu_pinmux_ops = {
+ .get_functions_count = mvebu_pinmux_get_funcs_count,
+ .get_function_name = mvebu_pinmux_get_func_name,
+ .get_function_groups = mvebu_pinmux_get_groups,
+ .gpio_request_enable = mvebu_pinmux_gpio_request_enable,
+ .gpio_set_direction = mvebu_pinmux_gpio_set_direction,
+ .enable = mvebu_pinmux_enable,
+};
+
+static int mvebu_pinctrl_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ struct mvebu_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+ return pctl->num_groups;
+}
+
+static const char *mvebu_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned gid)
+{
+ struct mvebu_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+ return pctl->groups[gid].name;
+}
+
+static int mvebu_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned gid, const unsigned **pins,
+ unsigned *num_pins)
+{
+ struct mvebu_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+ *pins = pctl->groups[gid].pins;
+ *num_pins = pctl->groups[gid].npins;
+ return 0;
+}
+
+static int mvebu_pinctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
+ struct device_node *np,
+ struct pinctrl_map **map,
+ unsigned *num_maps)
+{
+ struct mvebu_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+ struct property *prop;
+ const char *function;
+ const char *group;
+ int ret, nmaps, n;
+
+ *map = NULL;
+ *num_maps = 0;
+
+ ret = of_property_read_string(np, "marvell,function", &function);
+ if (ret) {
+ dev_err(pctl->dev,
+ "missing marvell,function in node %s\n", np->name);
+ return 0;
+ }
+
+ nmaps = of_property_count_strings(np, "marvell,pins");
+ if (nmaps < 0) {
+ dev_err(pctl->dev,
+ "missing marvell,pins in node %s\n", np->name);
+ return 0;
+ }
+
+ *map = kmalloc(nmaps * sizeof(struct pinctrl_map), GFP_KERNEL);
+ if (map == NULL) {
+ dev_err(pctl->dev,
+ "cannot allocate pinctrl_map memory for %s\n",
+ np->name);
+ return -ENOMEM;
+ }
+
+ n = 0;
+ of_property_for_each_string(np, "marvell,pins", prop, group) {
+ struct mvebu_pinctrl_group *grp =
+ mvebu_pinctrl_find_group_by_name(pctl, group);
+
+ if (!grp) {
+ dev_err(pctl->dev, "unknown pin %s", group);
+ continue;
+ }
+
+ if (!mvebu_pinctrl_find_setting_by_name(pctl, grp, function)) {
+ dev_err(pctl->dev, "unsupported function %s on pin %s",
+ function, group);
+ continue;
+ }
+
+ (*map)[n].type = PIN_MAP_TYPE_MUX_GROUP;
+ (*map)[n].data.mux.group = group;
+ (*map)[n].data.mux.function = function;
+ n++;
+ }
+
+ *num_maps = nmaps;
+
+ return 0;
+}
+
+static void mvebu_pinctrl_dt_free_map(struct pinctrl_dev *pctldev,
+ struct pinctrl_map *map, unsigned num_maps)
+{
+ kfree(map);
+}
+
+static struct pinctrl_ops mvebu_pinctrl_ops = {
+ .get_groups_count = mvebu_pinctrl_get_groups_count,
+ .get_group_name = mvebu_pinctrl_get_group_name,
+ .get_group_pins = mvebu_pinctrl_get_group_pins,
+ .dt_node_to_map = mvebu_pinctrl_dt_node_to_map,
+ .dt_free_map = mvebu_pinctrl_dt_free_map,
+};
+
+static int __devinit _add_function(struct mvebu_pinctrl_function *funcs,
+ const char *name)
+{
+ while (funcs->num_groups) {
+ /* function already there */
+ if (strcmp(funcs->name, name) == 0) {
+ funcs->num_groups++;
+ return -EEXIST;
+ }
+ funcs++;
+ }
+ funcs->name = name;
+ funcs->num_groups = 1;
+ return 0;
+}
+
+static int __devinit mvebu_pinctrl_build_functions(struct platform_device *pdev,
+ struct mvebu_pinctrl *pctl)
+{
+ struct mvebu_pinctrl_function *funcs;
+ int num = 0;
+ int n, s;
+
+ /* we allocate functions for number of pins and hope
+ * there are less unique functions than pins available */
+ funcs = devm_kzalloc(&pdev->dev, pctl->desc.npins *
+ sizeof(struct mvebu_pinctrl_function), GFP_KERNEL);
+ if (!funcs)
+ return -ENOMEM;
+
+ for (n = 0; n < pctl->num_groups; n++) {
+ struct mvebu_pinctrl_group *grp = &pctl->groups[n];
+ for (s = 0; s < grp->num_settings; s++) {
+ /* skip unsupported settings on this variant */
+ if (pctl->variant &&
+ !(pctl->variant & grp->settings[s].variant))
+ continue;
+
+ /* check for unique functions and count groups */
+ if (_add_function(funcs, grp->settings[s].name))
+ continue;
+
+ num++;
+ }
+ }
+
+ /* with the number of unique functions and it's groups known,
+ reallocate functions and assign group names */
+ funcs = krealloc(funcs, num * sizeof(struct mvebu_pinctrl_function),
+ GFP_KERNEL);
+ if (!funcs)
+ return -ENOMEM;
+
+ pctl->num_functions = num;
+ pctl->functions = funcs;
+
+ for (n = 0; n < pctl->num_groups; n++) {
+ struct mvebu_pinctrl_group *grp = &pctl->groups[n];
+ for (s = 0; s < grp->num_settings; s++) {
+ struct mvebu_pinctrl_function *f;
+ const char **groups;
+
+ /* skip unsupported settings on this variant */
+ if (pctl->variant &&
+ !(pctl->variant & grp->settings[s].variant))
+ continue;
+
+ f = mvebu_pinctrl_find_function_by_name(pctl,
+ grp->settings[s].name);
+
+ /* allocate group name array if not done already */
+ if (!f->groups) {
+ f->groups = devm_kzalloc(&pdev->dev,
+ f->num_groups * sizeof(char *),
+ GFP_KERNEL);
+ if (!f->groups)
+ return -ENOMEM;
+ }
+
+ /* find next free group name and assign current name */
+ groups = f->groups;
+ while (*groups)
+ groups++;
+ *groups = grp->name;
+ }
+ }
+
+ return 0;
+}
+
+int __devinit mvebu_pinctrl_probe(struct platform_device *pdev)
+{
+ struct mvebu_pinctrl_soc_info *soc = dev_get_platdata(&pdev->dev);
+ struct device_node *np = pdev->dev.of_node;
+ struct mvebu_pinctrl *pctl;
+ void __iomem *base;
+ struct pinctrl_pin_desc *pdesc;
+ unsigned gid, n, k;
+ int ret;
+
+ if (!soc || !soc->controls || !soc->modes) {
+ dev_err(&pdev->dev, "wrong pinctrl soc info\n");
+ return -EINVAL;
+ }
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ dev_err(&pdev->dev, "unable to get base address\n");
+ return -ENODEV;
+ }
+
+ pctl = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_pinctrl),
+ GFP_KERNEL);
+ if (!pctl) {
+ dev_err(&pdev->dev, "unable to alloc driver\n");
+ return -ENOMEM;
+ }
+
+ pctl->desc.name = dev_name(&pdev->dev);
+ pctl->desc.owner = THIS_MODULE;
+ pctl->desc.pctlops = &mvebu_pinctrl_ops;
+ pctl->desc.pmxops = &mvebu_pinmux_ops;
+ pctl->desc.confops = &mvebu_pinconf_ops;
+ pctl->variant = soc->variant;
+ pctl->base = base;
+ pctl->dev = &pdev->dev;
+ platform_set_drvdata(pdev, pctl);
+
+ /* count controls and create names for mvebu generic
+ register controls; also does sanity checks */
+ pctl->num_groups = 0;
+ pctl->desc.npins = 0;
+ for (n = 0; n < soc->ncontrols; n++) {
+ struct mvebu_mpp_ctrl *ctrl = &soc->controls[n];
+ char *names;
+
+ pctl->desc.npins += ctrl->npins;
+ /* initial control pins */
+ for (k = 0; k < ctrl->npins; k++)
+ ctrl->pins[k] = ctrl->pid + k;
+
+ /* special soc specific control */
+ if (ctrl->mpp_get || ctrl->mpp_set) {
+ if (!ctrl->name || !ctrl->mpp_set || !ctrl->mpp_set) {
+ dev_err(&pdev->dev, "wrong soc control info\n");
+ return -EINVAL;
+ }
+ pctl->num_groups += 1;
+ continue;
+ }
+
+ /* generic mvebu register control */
+ names = devm_kzalloc(&pdev->dev, ctrl->npins * 8, GFP_KERNEL);
+ if (!names) {
+ dev_err(&pdev->dev, "failed to alloc mpp names\n");
+ return -ENOMEM;
+ }
+ for (k = 0; k < ctrl->npins; k++)
+ sprintf(names + 8*k, "mpp%d", ctrl->pid+k);
+ ctrl->name = names;
+ pctl->num_groups += ctrl->npins;
+ }
+
+ pdesc = devm_kzalloc(&pdev->dev, pctl->desc.npins *
+ sizeof(struct pinctrl_pin_desc), GFP_KERNEL);
+ if (!pdesc) {
+ dev_err(&pdev->dev, "failed to alloc pinctrl pins\n");
+ return -ENOMEM;
+ }
+
+ for (n = 0; n < pctl->desc.npins; n++)
+ pdesc[n].number = n;
+ pctl->desc.pins = pdesc;
+
+ pctl->groups = devm_kzalloc(&pdev->dev, pctl->num_groups *
+ sizeof(struct mvebu_pinctrl_group), GFP_KERNEL);
+ if (!pctl->groups) {
+ dev_err(&pdev->dev, "failed to alloc pinctrl groups\n");
+ return -ENOMEM;
+ }
+
+ /* assign mpp controls to groups */
+ gid = 0;
+ for (n = 0; n < soc->ncontrols; n++) {
+ struct mvebu_mpp_ctrl *ctrl = &soc->controls[n];
+ pctl->groups[gid].gid = gid;
+ pctl->groups[gid].ctrl = ctrl;
+ pctl->groups[gid].name = ctrl->name;
+ pctl->groups[gid].pins = ctrl->pins;
+ pctl->groups[gid].npins = ctrl->npins;
+
+ /* generic mvebu register control maps to a number of groups */
+ if (!ctrl->mpp_get && !ctrl->mpp_set) {
+ pctl->groups[gid].npins = 1;
+
+ for (k = 1; k < ctrl->npins; k++) {
+ gid++;
+ pctl->groups[gid].gid = gid;
+ pctl->groups[gid].ctrl = ctrl;
+ pctl->groups[gid].name = &ctrl->name[8*k];
+ pctl->groups[gid].pins = &ctrl->pins[k];
+ pctl->groups[gid].npins = 1;
+ }
+ }
+ gid++;
+ }
+
+ /* assign mpp modes to groups */
+ for (n = 0; n < soc->nmodes; n++) {
+ struct mvebu_mpp_mode *mode = &soc->modes[n];
+ struct mvebu_pinctrl_group *grp =
+ mvebu_pinctrl_find_group_by_pid(pctl, mode->pid);
+ unsigned num_settings;
+
+ if (!grp) {
+ dev_warn(&pdev->dev, "unknown pinctrl group %d\n",
+ mode->pid);
+ continue;
+ }
+
+ for (num_settings = 0; ;) {
+ struct mvebu_mpp_ctrl_setting *set =
+ &mode->settings[num_settings];
+
+ if (!set->name)
+ break;
+ num_settings++;
+
+ /* skip unsupported settings for this variant */
+ if (pctl->variant && !(pctl->variant & set->variant))
+ continue;
+
+ /* find gpio/gpo/gpi settings */
+ if (strcmp(set->name, "gpio") == 0)
+ set->flags = MVEBU_SETTING_GPI |
+ MVEBU_SETTING_GPO;
+ else if (strcmp(set->name, "gpo") == 0)
+ set->flags = MVEBU_SETTING_GPO;
+ else if (strcmp(set->name, "gpi") == 0)
+ set->flags = MVEBU_SETTING_GPI;
+ }
+
+ grp->settings = mode->settings;
+ grp->num_settings = num_settings;
+ }
+
+ ret = mvebu_pinctrl_build_functions(pdev, pctl);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to build functions\n");
+ return ret;
+ }
+
+ pctl->pctldev = pinctrl_register(&pctl->desc, &pdev->dev, pctl);
+ if (!pctl->pctldev) {
+ dev_err(&pdev->dev, "unable to register pinctrl driver\n");
+ return -EINVAL;
+ }
+
+ dev_info(&pdev->dev, "registered pinctrl driver\n");
+
+ /* register gpio ranges */
+ for (n = 0; n < soc->ngpioranges; n++)
+ pinctrl_add_gpio_range(pctl->pctldev, &soc->gpioranges[n]);
+
+ return 0;
+}
+
+int __devexit mvebu_pinctrl_remove(struct platform_device *pdev)
+{
+ struct mvebu_pinctrl *pctl = platform_get_drvdata(pdev);
+ pinctrl_unregister(pctl->pctldev);
+ return 0;
+}
diff --git a/drivers/pinctrl/pinctrl-mvebu.h b/drivers/pinctrl/pinctrl-mvebu.h
new file mode 100644
index 0000000..90bd3be
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-mvebu.h
@@ -0,0 +1,192 @@
+/*
+ * Marvell MVEBU pinctrl driver
+ *
+ * Authors: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __PINCTRL_MVEBU_H__
+#define __PINCTRL_MVEBU_H__
+
+/**
+ * struct mvebu_mpp_ctrl - describe a mpp control
+ * @name: name of the control group
+ * @pid: first pin id handled by this control
+ * @npins: number of pins controlled by this control
+ * @mpp_get: (optional) special function to get mpp setting
+ * @mpp_set: (optional) special function to set mpp setting
+ * @mpp_gpio_req: (optional) special function to request gpio
+ * @mpp_gpio_dir: (optional) special function to set gpio direction
+ *
+ * A mpp_ctrl describes a muxable unit, e.g. pin, group of pins, or
+ * internal function, inside the SoC. Each muxable unit can be switched
+ * between two or more different settings, e.g. assign mpp pin 13 to
+ * uart1 or sata.
+ *
+ * If optional mpp_get/_set functions are set these are used to get/set
+ * a specific mode. Otherwise it is assumed that the mpp control is based
+ * on 4-bit groups in subsequent registers. The optional mpp_gpio_req/_dir
+ * functions can be used to allow pin settings with varying gpio pins.
+ */
+struct mvebu_mpp_ctrl {
+ const char *name;
+ u8 pid;
+ u8 npins;
+ unsigned *pins;
+ int (*mpp_get)(struct mvebu_mpp_ctrl *ctrl, unsigned long *config);
+ int (*mpp_set)(struct mvebu_mpp_ctrl *ctrl, unsigned long config);
+ int (*mpp_gpio_req)(struct mvebu_mpp_ctrl *ctrl, u8 pid);
+ int (*mpp_gpio_dir)(struct mvebu_mpp_ctrl *ctrl, u8 pid, bool input);
+};
+
+/**
+ * struct mvebu_mpp_ctrl_setting - describe a mpp ctrl setting
+ * @val: ctrl setting value
+ * @name: ctrl setting name, e.g. uart2, spi0 - unique per mpp_mode
+ * @subname: (optional) additional ctrl setting name, e.g. rts, cts
+ * @variant: (optional) variant identifier mask
+ * @flags: (private) flags to store gpi/gpo/gpio capabilities
+ *
+ * A ctrl_setting describes a specific internal mux function that a mpp pin
+ * can be switched to. The value (val) will be written in the corresponding
+ * register for common mpp pin configuration registers on MVEBU. SoC specific
+ * mpp_get/_set function may use val to distinguish between different settings.
+ *
+ * The name will be used to switch to this setting in DT description, e.g.
+ * marvell,function = "uart2". subname is only for debugging purposes.
+ *
+ * If name is one of "gpi", "gpo", "gpio" gpio capabilities are
+ * parsed during initialization and stored in flags.
+ *
+ * The variant can be used to combine different revisions of one SoC to a
+ * common pinctrl driver. It is matched (AND) with variant of soc_info to
+ * determine if a setting is available on the current SoC revision.
+ */
+struct mvebu_mpp_ctrl_setting {
+ u8 val;
+ const char *name;
+ const char *subname;
+ u8 variant;
+ u8 flags;
+#define MVEBU_SETTING_GPO (1 << 0)
+#define MVEBU_SETTING_GPI (1 << 1)
+};
+
+/**
+ * struct mvebu_mpp_mode - link ctrl and settings
+ * @pid: first pin id handled by this mode
+ * @settings: list of settings available for this mode
+ *
+ * A mode connects all available settings with the corresponding mpp_ctrl
+ * given by pid.
+ */
+struct mvebu_mpp_mode {
+ u8 pid;
+ struct mvebu_mpp_ctrl_setting *settings;
+};
+
+/**
+ * struct mvebu_pinctrl_soc_info - SoC specific info passed to pinctrl-mvebu
+ * @variant: variant mask of soc_info
+ * @controls: list of available mvebu_mpp_ctrls
+ * @ncontrols: number of available mvebu_mpp_ctrls
+ * @modes: list of available mvebu_mpp_modes
+ * @nmodes: number of available mvebu_mpp_modes
+ * @gpioranges: list of pinctrl_gpio_ranges
+ * @ngpioranges: number of available pinctrl_gpio_ranges
+ *
+ * This struct describes all pinctrl related information for a specific SoC.
+ * If variant is unequal 0 it will be matched (AND) with variant of each
+ * setting and allows to distinguish between different revisions of one SoC.
+ */
+struct mvebu_pinctrl_soc_info {
+ u8 variant;
+ struct mvebu_mpp_ctrl *controls;
+ int ncontrols;
+ struct mvebu_mpp_mode *modes;
+ int nmodes;
+ struct pinctrl_gpio_range *gpioranges;
+ int ngpioranges;
+};
+
+#define MPP_REG_CTRL(_idl, _idh) \
+ { \
+ .name = NULL, \
+ .pid = _idl, \
+ .npins = _idh - _idl + 1, \
+ .pins = (unsigned[_idh - _idl + 1]) { }, \
+ .mpp_get = NULL, \
+ .mpp_set = NULL, \
+ .mpp_gpio_req = NULL, \
+ .mpp_gpio_dir = NULL, \
+ }
+
+#define MPP_FUNC_CTRL(_idl, _idh, _name, _func) \
+ { \
+ .name = _name, \
+ .pid = _idl, \
+ .npins = _idh - _idl + 1, \
+ .pins = (unsigned[_idh - _idl + 1]) { }, \
+ .mpp_get = _func ## _get, \
+ .mpp_set = _func ## _set, \
+ .mpp_gpio_req = NULL, \
+ .mpp_gpio_dir = NULL, \
+ }
+
+#define MPP_FUNC_GPIO_CTRL(_idl, _idh, _name, _func) \
+ { \
+ .name = _name, \
+ .pid = _idl, \
+ .npins = _idh - _idl + 1, \
+ .pins = (unsigned[_idh - _idl + 1]) { }, \
+ .mpp_get = _func ## _get, \
+ .mpp_set = _func ## _set, \
+ .mpp_gpio_req = _func ## _gpio_req, \
+ .mpp_gpio_dir = _func ## _gpio_dir, \
+ }
+
+#define _MPP_VAR_FUNCTION(_val, _name, _subname, _mask) \
+ { \
+ .val = _val, \
+ .name = _name, \
+ .subname = _subname, \
+ .variant = _mask, \
+ .flags = 0, \
+ }
+
+#if defined(CONFIG_DEBUG_FS)
+#define MPP_VAR_FUNCTION(_val, _name, _subname, _mask) \
+ _MPP_VAR_FUNCTION(_val, _name, _subname, _mask)
+#else
+#define MPP_VAR_FUNCTION(_val, _name, _subname, _mask) \
+ _MPP_VAR_FUNCTION(_val, _name, NULL, _mask)
+#endif
+
+#define MPP_FUNCTION(_val, _name, _subname) \
+ MPP_VAR_FUNCTION(_val, _name, _subname, (u8)-1)
+
+#define MPP_MODE(_id, ...) \
+ { \
+ .pid = _id, \
+ .settings = (struct mvebu_mpp_ctrl_setting[]){ \
+ __VA_ARGS__, { } }, \
+ }
+
+#define MPP_GPIO_RANGE(_id, _pinbase, _gpiobase, _npins) \
+ { \
+ .name = "mvebu-gpio", \
+ .id = _id, \
+ .pin_base = _pinbase, \
+ .base = _gpiobase, \
+ .npins = _npins, \
+ }
+
+int mvebu_pinctrl_probe(struct platform_device *pdev);
+int mvebu_pinctrl_remove(struct platform_device *pdev);
+
+#endif
diff --git a/drivers/pinctrl/pinctrl-xway.c b/drivers/pinctrl/pinctrl-xway.c
new file mode 100644
index 0000000..f8d917d
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-xway.c
@@ -0,0 +1,781 @@
+/*
+ * linux/drivers/pinctrl/pinmux-xway.c
+ * based on linux/drivers/pinctrl/pinmux-pxa910.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * publishhed by the Free Software Foundation.
+ *
+ * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_gpio.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-lantiq.h"
+
+#include <lantiq_soc.h>
+
+/* we have 3 1/2 banks of 16 bit each */
+#define PINS 16
+#define PORT3 3
+#define PORT(x) (x / PINS)
+#define PORT_PIN(x) (x % PINS)
+
+/* we have 2 mux bits that can be set for each pin */
+#define MUX_ALT0 0x1
+#define MUX_ALT1 0x2
+
+/*
+ * each bank has this offset apart from the 1/2 bank that is mixed into the
+ * other 3 ranges
+ */
+#define REG_OFF 0x30
+
+/* these are the offsets to our registers */
+#define GPIO_BASE(p) (REG_OFF * PORT(p))
+#define GPIO_OUT(p) GPIO_BASE(p)
+#define GPIO_IN(p) (GPIO_BASE(p) + 0x04)
+#define GPIO_DIR(p) (GPIO_BASE(p) + 0x08)
+#define GPIO_ALT0(p) (GPIO_BASE(p) + 0x0C)
+#define GPIO_ALT1(p) (GPIO_BASE(p) + 0x10)
+#define GPIO_OD(p) (GPIO_BASE(p) + 0x14)
+#define GPIO_PUDSEL(p) (GPIO_BASE(p) + 0x1c)
+#define GPIO_PUDEN(p) (GPIO_BASE(p) + 0x20)
+
+/* the 1/2 port needs special offsets for some registers */
+#define GPIO3_OD (GPIO_BASE(0) + 0x24)
+#define GPIO3_PUDSEL (GPIO_BASE(0) + 0x28)
+#define GPIO3_PUDEN (GPIO_BASE(0) + 0x2C)
+#define GPIO3_ALT1 (GPIO_BASE(PINS) + 0x24)
+
+/* macros to help us access the registers */
+#define gpio_getbit(m, r, p) (!!(ltq_r32(m + r) & BIT(p)))
+#define gpio_setbit(m, r, p) ltq_w32_mask(0, BIT(p), m + r)
+#define gpio_clearbit(m, r, p) ltq_w32_mask(BIT(p), 0, m + r)
+
+#define MFP_XWAY(a, f0, f1, f2, f3) \
+ { \
+ .name = #a, \
+ .pin = a, \
+ .func = { \
+ XWAY_MUX_##f0, \
+ XWAY_MUX_##f1, \
+ XWAY_MUX_##f2, \
+ XWAY_MUX_##f3, \
+ }, \
+ }
+
+#define GRP_MUX(a, m, p) \
+ { .name = a, .mux = XWAY_MUX_##m, .pins = p, .npins = ARRAY_SIZE(p), }
+
+#define FUNC_MUX(f, m) \
+ { .func = f, .mux = XWAY_MUX_##m, }
+
+#define XWAY_MAX_PIN 32
+#define XR9_MAX_PIN 56
+
+enum xway_mux {
+ XWAY_MUX_GPIO = 0,
+ XWAY_MUX_SPI,
+ XWAY_MUX_ASC,
+ XWAY_MUX_PCI,
+ XWAY_MUX_CGU,
+ XWAY_MUX_EBU,
+ XWAY_MUX_JTAG,
+ XWAY_MUX_EXIN,
+ XWAY_MUX_TDM,
+ XWAY_MUX_STP,
+ XWAY_MUX_SIN,
+ XWAY_MUX_GPT,
+ XWAY_MUX_NMI,
+ XWAY_MUX_MDIO,
+ XWAY_MUX_MII,
+ XWAY_MUX_EPHY,
+ XWAY_MUX_DFE,
+ XWAY_MUX_SDIO,
+ XWAY_MUX_NONE = 0xffff,
+};
+
+static const struct ltq_mfp_pin xway_mfp[] = {
+ /* pin f0 f1 f2 f3 */
+ MFP_XWAY(GPIO0, GPIO, EXIN, NONE, TDM),
+ MFP_XWAY(GPIO1, GPIO, EXIN, NONE, NONE),
+ MFP_XWAY(GPIO2, GPIO, CGU, EXIN, NONE),
+ MFP_XWAY(GPIO3, GPIO, CGU, NONE, PCI),
+ MFP_XWAY(GPIO4, GPIO, STP, NONE, ASC),
+ MFP_XWAY(GPIO5, GPIO, STP, NONE, NONE),
+ MFP_XWAY(GPIO6, GPIO, STP, GPT, ASC),
+ MFP_XWAY(GPIO7, GPIO, CGU, PCI, NONE),
+ MFP_XWAY(GPIO8, GPIO, CGU, NMI, NONE),
+ MFP_XWAY(GPIO9, GPIO, ASC, SPI, EXIN),
+ MFP_XWAY(GPIO10, GPIO, ASC, SPI, NONE),
+ MFP_XWAY(GPIO11, GPIO, ASC, PCI, SPI),
+ MFP_XWAY(GPIO12, GPIO, ASC, NONE, NONE),
+ MFP_XWAY(GPIO13, GPIO, EBU, SPI, NONE),
+ MFP_XWAY(GPIO14, GPIO, CGU, PCI, NONE),
+ MFP_XWAY(GPIO15, GPIO, SPI, JTAG, NONE),
+ MFP_XWAY(GPIO16, GPIO, SPI, NONE, JTAG),
+ MFP_XWAY(GPIO17, GPIO, SPI, NONE, JTAG),
+ MFP_XWAY(GPIO18, GPIO, SPI, NONE, JTAG),
+ MFP_XWAY(GPIO19, GPIO, PCI, NONE, NONE),
+ MFP_XWAY(GPIO20, GPIO, JTAG, NONE, NONE),
+ MFP_XWAY(GPIO21, GPIO, PCI, EBU, GPT),
+ MFP_XWAY(GPIO22, GPIO, SPI, NONE, NONE),
+ MFP_XWAY(GPIO23, GPIO, EBU, PCI, STP),
+ MFP_XWAY(GPIO24, GPIO, EBU, TDM, PCI),
+ MFP_XWAY(GPIO25, GPIO, TDM, NONE, ASC),
+ MFP_XWAY(GPIO26, GPIO, EBU, NONE, TDM),
+ MFP_XWAY(GPIO27, GPIO, TDM, NONE, ASC),
+ MFP_XWAY(GPIO28, GPIO, GPT, NONE, NONE),
+ MFP_XWAY(GPIO29, GPIO, PCI, NONE, NONE),
+ MFP_XWAY(GPIO30, GPIO, PCI, NONE, NONE),
+ MFP_XWAY(GPIO31, GPIO, EBU, PCI, NONE),
+ MFP_XWAY(GPIO32, GPIO, NONE, NONE, EBU),
+ MFP_XWAY(GPIO33, GPIO, NONE, NONE, EBU),
+ MFP_XWAY(GPIO34, GPIO, NONE, NONE, EBU),
+ MFP_XWAY(GPIO35, GPIO, NONE, NONE, EBU),
+ MFP_XWAY(GPIO36, GPIO, SIN, NONE, EBU),
+ MFP_XWAY(GPIO37, GPIO, PCI, NONE, NONE),
+ MFP_XWAY(GPIO38, GPIO, PCI, NONE, NONE),
+ MFP_XWAY(GPIO39, GPIO, EXIN, NONE, NONE),
+ MFP_XWAY(GPIO40, GPIO, NONE, NONE, NONE),
+ MFP_XWAY(GPIO41, GPIO, NONE, NONE, NONE),
+ MFP_XWAY(GPIO42, GPIO, MDIO, NONE, NONE),
+ MFP_XWAY(GPIO43, GPIO, MDIO, NONE, NONE),
+ MFP_XWAY(GPIO44, GPIO, NONE, NONE, SIN),
+ MFP_XWAY(GPIO45, GPIO, NONE, NONE, SIN),
+ MFP_XWAY(GPIO46, GPIO, NONE, NONE, EXIN),
+ MFP_XWAY(GPIO47, GPIO, NONE, NONE, SIN),
+ MFP_XWAY(GPIO48, GPIO, EBU, NONE, NONE),
+ MFP_XWAY(GPIO49, GPIO, EBU, NONE, NONE),
+ MFP_XWAY(GPIO50, GPIO, NONE, NONE, NONE),
+ MFP_XWAY(GPIO51, GPIO, NONE, NONE, NONE),
+ MFP_XWAY(GPIO52, GPIO, NONE, NONE, NONE),
+ MFP_XWAY(GPIO53, GPIO, NONE, NONE, NONE),
+ MFP_XWAY(GPIO54, GPIO, NONE, NONE, NONE),
+ MFP_XWAY(GPIO55, GPIO, NONE, NONE, NONE),
+};
+
+static const struct ltq_mfp_pin ase_mfp[] = {
+ /* pin f0 f1 f2 f3 */
+ MFP_XWAY(GPIO0, GPIO, EXIN, MII, TDM),
+ MFP_XWAY(GPIO1, GPIO, STP, DFE, EBU),
+ MFP_XWAY(GPIO2, GPIO, STP, DFE, EPHY),
+ MFP_XWAY(GPIO3, GPIO, STP, EPHY, EBU),
+ MFP_XWAY(GPIO4, GPIO, GPT, EPHY, MII),
+ MFP_XWAY(GPIO5, GPIO, MII, ASC, GPT),
+ MFP_XWAY(GPIO6, GPIO, MII, ASC, EXIN),
+ MFP_XWAY(GPIO7, GPIO, SPI, MII, JTAG),
+ MFP_XWAY(GPIO8, GPIO, SPI, MII, JTAG),
+ MFP_XWAY(GPIO9, GPIO, SPI, MII, JTAG),
+ MFP_XWAY(GPIO10, GPIO, SPI, MII, JTAG),
+ MFP_XWAY(GPIO11, GPIO, EBU, CGU, JTAG),
+ MFP_XWAY(GPIO12, GPIO, EBU, MII, SDIO),
+ MFP_XWAY(GPIO13, GPIO, EBU, MII, CGU),
+ MFP_XWAY(GPIO14, GPIO, EBU, SPI, CGU),
+ MFP_XWAY(GPIO15, GPIO, EBU, SPI, SDIO),
+ MFP_XWAY(GPIO16, GPIO, NONE, NONE, NONE),
+ MFP_XWAY(GPIO17, GPIO, NONE, NONE, NONE),
+ MFP_XWAY(GPIO18, GPIO, NONE, NONE, NONE),
+ MFP_XWAY(GPIO19, GPIO, EBU, MII, SDIO),
+ MFP_XWAY(GPIO20, GPIO, EBU, MII, SDIO),
+ MFP_XWAY(GPIO21, GPIO, EBU, MII, SDIO),
+ MFP_XWAY(GPIO22, GPIO, EBU, MII, CGU),
+ MFP_XWAY(GPIO23, GPIO, EBU, MII, CGU),
+ MFP_XWAY(GPIO24, GPIO, EBU, NONE, MII),
+ MFP_XWAY(GPIO25, GPIO, EBU, MII, GPT),
+ MFP_XWAY(GPIO26, GPIO, EBU, MII, SDIO),
+ MFP_XWAY(GPIO27, GPIO, EBU, NONE, MII),
+ MFP_XWAY(GPIO28, GPIO, MII, EBU, SDIO),
+ MFP_XWAY(GPIO29, GPIO, EBU, MII, EXIN),
+ MFP_XWAY(GPIO30, GPIO, NONE, NONE, NONE),
+ MFP_XWAY(GPIO31, GPIO, NONE, NONE, NONE),
+};
+
+static const unsigned pins_jtag[] = {GPIO15, GPIO16, GPIO17, GPIO19, GPIO35};
+static const unsigned pins_asc0[] = {GPIO11, GPIO12};
+static const unsigned pins_asc0_cts_rts[] = {GPIO9, GPIO10};
+static const unsigned pins_stp[] = {GPIO4, GPIO5, GPIO6};
+static const unsigned pins_nmi[] = {GPIO8};
+static const unsigned pins_mdio[] = {GPIO42, GPIO43};
+
+static const unsigned pins_ebu_a24[] = {GPIO13};
+static const unsigned pins_ebu_clk[] = {GPIO21};
+static const unsigned pins_ebu_cs1[] = {GPIO23};
+static const unsigned pins_ebu_a23[] = {GPIO24};
+static const unsigned pins_ebu_wait[] = {GPIO26};
+static const unsigned pins_ebu_a25[] = {GPIO31};
+static const unsigned pins_ebu_rdy[] = {GPIO48};
+static const unsigned pins_ebu_rd[] = {GPIO49};
+
+static const unsigned pins_nand_ale[] = {GPIO13};
+static const unsigned pins_nand_cs1[] = {GPIO23};
+static const unsigned pins_nand_cle[] = {GPIO24};
+static const unsigned pins_nand_rdy[] = {GPIO48};
+static const unsigned pins_nand_rd[] = {GPIO49};
+
+static const unsigned pins_exin0[] = {GPIO0};
+static const unsigned pins_exin1[] = {GPIO1};
+static const unsigned pins_exin2[] = {GPIO2};
+static const unsigned pins_exin3[] = {GPIO39};
+static const unsigned pins_exin4[] = {GPIO46};
+static const unsigned pins_exin5[] = {GPIO9};
+
+static const unsigned pins_spi[] = {GPIO16, GPIO17, GPIO18};
+static const unsigned pins_spi_cs1[] = {GPIO15};
+static const unsigned pins_spi_cs2[] = {GPIO21};
+static const unsigned pins_spi_cs3[] = {GPIO13};
+static const unsigned pins_spi_cs4[] = {GPIO10};
+static const unsigned pins_spi_cs5[] = {GPIO9};
+static const unsigned pins_spi_cs6[] = {GPIO11};
+
+static const unsigned pins_gpt1[] = {GPIO28};
+static const unsigned pins_gpt2[] = {GPIO21};
+static const unsigned pins_gpt3[] = {GPIO6};
+
+static const unsigned pins_clkout0[] = {GPIO8};
+static const unsigned pins_clkout1[] = {GPIO7};
+static const unsigned pins_clkout2[] = {GPIO3};
+static const unsigned pins_clkout3[] = {GPIO2};
+
+static const unsigned pins_pci_gnt1[] = {GPIO30};
+static const unsigned pins_pci_gnt2[] = {GPIO23};
+static const unsigned pins_pci_gnt3[] = {GPIO19};
+static const unsigned pins_pci_gnt4[] = {GPIO38};
+static const unsigned pins_pci_req1[] = {GPIO29};
+static const unsigned pins_pci_req2[] = {GPIO31};
+static const unsigned pins_pci_req3[] = {GPIO3};
+static const unsigned pins_pci_req4[] = {GPIO37};
+
+static const unsigned ase_pins_jtag[] = {GPIO7, GPIO8, GPIO9, GPIO10, GPIO11};
+static const unsigned ase_pins_asc[] = {GPIO5, GPIO6};
+static const unsigned ase_pins_stp[] = {GPIO1, GPIO2, GPIO3};
+static const unsigned ase_pins_ephy[] = {GPIO2, GPIO3, GPIO4};
+static const unsigned ase_pins_dfe[] = {GPIO1, GPIO2};
+
+static const unsigned ase_pins_spi[] = {GPIO8, GPIO9, GPIO10};
+static const unsigned ase_pins_spi_cs1[] = {GPIO7};
+static const unsigned ase_pins_spi_cs2[] = {GPIO15};
+static const unsigned ase_pins_spi_cs3[] = {GPIO14};
+
+static const unsigned ase_pins_exin0[] = {GPIO6};
+static const unsigned ase_pins_exin1[] = {GPIO29};
+static const unsigned ase_pins_exin2[] = {GPIO0};
+
+static const unsigned ase_pins_gpt1[] = {GPIO5};
+static const unsigned ase_pins_gpt2[] = {GPIO4};
+static const unsigned ase_pins_gpt3[] = {GPIO25};
+
+static const struct ltq_pin_group xway_grps[] = {
+ GRP_MUX("exin0", EXIN, pins_exin0),
+ GRP_MUX("exin1", EXIN, pins_exin1),
+ GRP_MUX("exin2", EXIN, pins_exin2),
+ GRP_MUX("jtag", JTAG, pins_jtag),
+ GRP_MUX("ebu a23", EBU, pins_ebu_a23),
+ GRP_MUX("ebu a24", EBU, pins_ebu_a24),
+ GRP_MUX("ebu a25", EBU, pins_ebu_a25),
+ GRP_MUX("ebu clk", EBU, pins_ebu_clk),
+ GRP_MUX("ebu cs1", EBU, pins_ebu_cs1),
+ GRP_MUX("ebu wait", EBU, pins_ebu_wait),
+ GRP_MUX("nand ale", EBU, pins_nand_ale),
+ GRP_MUX("nand cs1", EBU, pins_nand_cs1),
+ GRP_MUX("nand cle", EBU, pins_nand_cle),
+ GRP_MUX("spi", SPI, pins_spi),
+ GRP_MUX("spi_cs1", SPI, pins_spi_cs1),
+ GRP_MUX("spi_cs2", SPI, pins_spi_cs2),
+ GRP_MUX("spi_cs3", SPI, pins_spi_cs3),
+ GRP_MUX("spi_cs4", SPI, pins_spi_cs4),
+ GRP_MUX("spi_cs5", SPI, pins_spi_cs5),
+ GRP_MUX("spi_cs6", SPI, pins_spi_cs6),
+ GRP_MUX("asc0", ASC, pins_asc0),
+ GRP_MUX("asc0 cts rts", ASC, pins_asc0_cts_rts),
+ GRP_MUX("stp", STP, pins_stp),
+ GRP_MUX("nmi", NMI, pins_nmi),
+ GRP_MUX("gpt1", GPT, pins_gpt1),
+ GRP_MUX("gpt2", GPT, pins_gpt2),
+ GRP_MUX("gpt3", GPT, pins_gpt3),
+ GRP_MUX("clkout0", CGU, pins_clkout0),
+ GRP_MUX("clkout1", CGU, pins_clkout1),
+ GRP_MUX("clkout2", CGU, pins_clkout2),
+ GRP_MUX("clkout3", CGU, pins_clkout3),
+ GRP_MUX("gnt1", PCI, pins_pci_gnt1),
+ GRP_MUX("gnt2", PCI, pins_pci_gnt2),
+ GRP_MUX("gnt3", PCI, pins_pci_gnt3),
+ GRP_MUX("req1", PCI, pins_pci_req1),
+ GRP_MUX("req2", PCI, pins_pci_req2),
+ GRP_MUX("req3", PCI, pins_pci_req3),
+/* xrx only */
+ GRP_MUX("nand rdy", EBU, pins_nand_rdy),
+ GRP_MUX("nand rd", EBU, pins_nand_rd),
+ GRP_MUX("exin3", EXIN, pins_exin3),
+ GRP_MUX("exin4", EXIN, pins_exin4),
+ GRP_MUX("exin5", EXIN, pins_exin5),
+ GRP_MUX("gnt4", PCI, pins_pci_gnt4),
+ GRP_MUX("req4", PCI, pins_pci_gnt4),
+ GRP_MUX("mdio", MDIO, pins_mdio),
+};
+
+static const struct ltq_pin_group ase_grps[] = {
+ GRP_MUX("exin0", EXIN, ase_pins_exin0),
+ GRP_MUX("exin1", EXIN, ase_pins_exin1),
+ GRP_MUX("exin2", EXIN, ase_pins_exin2),
+ GRP_MUX("jtag", JTAG, ase_pins_jtag),
+ GRP_MUX("stp", STP, ase_pins_stp),
+ GRP_MUX("asc", ASC, ase_pins_asc),
+ GRP_MUX("gpt1", GPT, ase_pins_gpt1),
+ GRP_MUX("gpt2", GPT, ase_pins_gpt2),
+ GRP_MUX("gpt3", GPT, ase_pins_gpt3),
+ GRP_MUX("ephy", EPHY, ase_pins_ephy),
+ GRP_MUX("dfe", DFE, ase_pins_dfe),
+ GRP_MUX("spi", SPI, ase_pins_spi),
+ GRP_MUX("spi_cs1", SPI, ase_pins_spi_cs1),
+ GRP_MUX("spi_cs2", SPI, ase_pins_spi_cs2),
+ GRP_MUX("spi_cs3", SPI, ase_pins_spi_cs3),
+};
+
+static const char * const xway_pci_grps[] = {"gnt1", "gnt2",
+ "gnt3", "req1",
+ "req2", "req3"};
+static const char * const xway_spi_grps[] = {"spi", "spi_cs1",
+ "spi_cs2", "spi_cs3",
+ "spi_cs4", "spi_cs5",
+ "spi_cs6"};
+static const char * const xway_cgu_grps[] = {"clkout0", "clkout1",
+ "clkout2", "clkout3"};
+static const char * const xway_ebu_grps[] = {"ebu a23", "ebu a24",
+ "ebu a25", "ebu cs1",
+ "ebu wait", "ebu clk",
+ "nand ale", "nand cs1",
+ "nand cle"};
+static const char * const xway_exin_grps[] = {"exin0", "exin1", "exin2"};
+static const char * const xway_gpt_grps[] = {"gpt1", "gpt2", "gpt3"};
+static const char * const xway_asc_grps[] = {"asc0", "asc0 cts rts"};
+static const char * const xway_jtag_grps[] = {"jtag"};
+static const char * const xway_stp_grps[] = {"stp"};
+static const char * const xway_nmi_grps[] = {"nmi"};
+
+/* ar9/vr9/gr9 */
+static const char * const xrx_mdio_grps[] = {"mdio"};
+static const char * const xrx_ebu_grps[] = {"ebu a23", "ebu a24",
+ "ebu a25", "ebu cs1",
+ "ebu wait", "ebu clk",
+ "nand ale", "nand cs1",
+ "nand cle", "nand rdy",
+ "nand rd"};
+static const char * const xrx_exin_grps[] = {"exin0", "exin1", "exin2",
+ "exin3", "exin4", "exin5"};
+static const char * const xrx_pci_grps[] = {"gnt1", "gnt2",
+ "gnt3", "gnt4",
+ "req1", "req2",
+ "req3", "req4"};
+
+/* ase */
+static const char * const ase_exin_grps[] = {"exin0", "exin1", "exin2"};
+static const char * const ase_gpt_grps[] = {"gpt1", "gpt2", "gpt3"};
+static const char * const ase_dfe_grps[] = {"dfe"};
+static const char * const ase_ephy_grps[] = {"ephy"};
+static const char * const ase_asc_grps[] = {"asc"};
+static const char * const ase_jtag_grps[] = {"jtag"};
+static const char * const ase_stp_grps[] = {"stp"};
+static const char * const ase_spi_grps[] = {"spi", "spi_cs1",
+ "spi_cs2", "spi_cs3"};
+
+static const struct ltq_pmx_func danube_funcs[] = {
+ {"spi", ARRAY_AND_SIZE(xway_spi_grps)},
+ {"asc", ARRAY_AND_SIZE(xway_asc_grps)},
+ {"cgu", ARRAY_AND_SIZE(xway_cgu_grps)},
+ {"jtag", ARRAY_AND_SIZE(xway_jtag_grps)},
+ {"exin", ARRAY_AND_SIZE(xway_exin_grps)},
+ {"stp", ARRAY_AND_SIZE(xway_stp_grps)},
+ {"gpt", ARRAY_AND_SIZE(xway_gpt_grps)},
+ {"nmi", ARRAY_AND_SIZE(xway_nmi_grps)},
+ {"pci", ARRAY_AND_SIZE(xway_pci_grps)},
+ {"ebu", ARRAY_AND_SIZE(xway_ebu_grps)},
+};
+
+static const struct ltq_pmx_func xrx_funcs[] = {
+ {"spi", ARRAY_AND_SIZE(xway_spi_grps)},
+ {"asc", ARRAY_AND_SIZE(xway_asc_grps)},
+ {"cgu", ARRAY_AND_SIZE(xway_cgu_grps)},
+ {"jtag", ARRAY_AND_SIZE(xway_jtag_grps)},
+ {"exin", ARRAY_AND_SIZE(xrx_exin_grps)},
+ {"stp", ARRAY_AND_SIZE(xway_stp_grps)},
+ {"gpt", ARRAY_AND_SIZE(xway_gpt_grps)},
+ {"nmi", ARRAY_AND_SIZE(xway_nmi_grps)},
+ {"pci", ARRAY_AND_SIZE(xrx_pci_grps)},
+ {"ebu", ARRAY_AND_SIZE(xrx_ebu_grps)},
+ {"mdio", ARRAY_AND_SIZE(xrx_mdio_grps)},
+};
+
+static const struct ltq_pmx_func ase_funcs[] = {
+ {"spi", ARRAY_AND_SIZE(ase_spi_grps)},
+ {"asc", ARRAY_AND_SIZE(ase_asc_grps)},
+ {"jtag", ARRAY_AND_SIZE(ase_jtag_grps)},
+ {"exin", ARRAY_AND_SIZE(ase_exin_grps)},
+ {"stp", ARRAY_AND_SIZE(ase_stp_grps)},
+ {"gpt", ARRAY_AND_SIZE(ase_gpt_grps)},
+ {"ephy", ARRAY_AND_SIZE(ase_ephy_grps)},
+ {"dfe", ARRAY_AND_SIZE(ase_dfe_grps)},
+};
+
+/* --------- pinconf related code --------- */
+static int xway_pinconf_get(struct pinctrl_dev *pctldev,
+ unsigned pin,
+ unsigned long *config)
+{
+ struct ltq_pinmux_info *info = pinctrl_dev_get_drvdata(pctldev);
+ enum ltq_pinconf_param param = LTQ_PINCONF_UNPACK_PARAM(*config);
+ int port = PORT(pin);
+ u32 reg;
+
+ switch (param) {
+ case LTQ_PINCONF_PARAM_OPEN_DRAIN:
+ if (port == PORT3)
+ reg = GPIO3_OD;
+ else
+ reg = GPIO_OD(port);
+ *config = LTQ_PINCONF_PACK(param,
+ !!gpio_getbit(info->membase[0], reg, PORT_PIN(port)));
+ break;
+
+ case LTQ_PINCONF_PARAM_PULL:
+ if (port == PORT3)
+ reg = GPIO3_PUDEN;
+ else
+ reg = GPIO_PUDEN(port);
+ if (!gpio_getbit(info->membase[0], reg, PORT_PIN(port))) {
+ *config = LTQ_PINCONF_PACK(param, 0);
+ break;
+ }
+
+ if (port == PORT3)
+ reg = GPIO3_PUDSEL;
+ else
+ reg = GPIO_PUDSEL(port);
+ if (!gpio_getbit(info->membase[0], reg, PORT_PIN(port)))
+ *config = LTQ_PINCONF_PACK(param, 2);
+ else
+ *config = LTQ_PINCONF_PACK(param, 1);
+ break;
+
+ default:
+ dev_err(pctldev->dev, "Invalid config param %04x\n", param);
+ return -ENOTSUPP;
+ }
+ return 0;
+}
+
+static int xway_pinconf_set(struct pinctrl_dev *pctldev,
+ unsigned pin,
+ unsigned long config)
+{
+ struct ltq_pinmux_info *info = pinctrl_dev_get_drvdata(pctldev);
+ enum ltq_pinconf_param param = LTQ_PINCONF_UNPACK_PARAM(config);
+ int arg = LTQ_PINCONF_UNPACK_ARG(config);
+ int port = PORT(pin);
+ u32 reg;
+
+ switch (param) {
+ case LTQ_PINCONF_PARAM_OPEN_DRAIN:
+ if (port == PORT3)
+ reg = GPIO3_OD;
+ else
+ reg = GPIO_OD(port);
+ gpio_setbit(info->membase[0], reg, PORT_PIN(port));
+ break;
+
+ case LTQ_PINCONF_PARAM_PULL:
+ if (port == PORT3)
+ reg = GPIO3_PUDEN;
+ else
+ reg = GPIO_PUDEN(port);
+ if (arg == 0) {
+ gpio_clearbit(info->membase[0], reg, PORT_PIN(port));
+ break;
+ }
+ gpio_setbit(info->membase[0], reg, PORT_PIN(port));
+
+ if (port == PORT3)
+ reg = GPIO3_PUDSEL;
+ else
+ reg = GPIO_PUDSEL(port);
+ if (arg == 1)
+ gpio_clearbit(info->membase[0], reg, PORT_PIN(port));
+ else if (arg == 2)
+ gpio_setbit(info->membase[0], reg, PORT_PIN(port));
+ else
+ dev_err(pctldev->dev, "Invalid pull value %d\n", arg);
+ break;
+
+ default:
+ dev_err(pctldev->dev, "Invalid config param %04x\n", param);
+ return -ENOTSUPP;
+ }
+ return 0;
+}
+
+struct pinconf_ops xway_pinconf_ops = {
+ .pin_config_get = xway_pinconf_get,
+ .pin_config_set = xway_pinconf_set,
+};
+
+static struct pinctrl_desc xway_pctrl_desc = {
+ .owner = THIS_MODULE,
+ .confops = &xway_pinconf_ops,
+};
+
+static inline int xway_mux_apply(struct pinctrl_dev *pctrldev,
+ int pin, int mux)
+{
+ struct ltq_pinmux_info *info = pinctrl_dev_get_drvdata(pctrldev);
+ int port = PORT(pin);
+ u32 alt1_reg = GPIO_ALT1(pin);
+
+ if (port == PORT3)
+ alt1_reg = GPIO3_ALT1;
+
+ if (mux & MUX_ALT0)
+ gpio_setbit(info->membase[0], GPIO_ALT0(pin), PORT_PIN(pin));
+ else
+ gpio_clearbit(info->membase[0], GPIO_ALT0(pin), PORT_PIN(pin));
+
+ if (mux & MUX_ALT1)
+ gpio_setbit(info->membase[0], alt1_reg, PORT_PIN(pin));
+ else
+ gpio_clearbit(info->membase[0], alt1_reg, PORT_PIN(pin));
+
+ return 0;
+}
+
+static const struct ltq_cfg_param xway_cfg_params[] = {
+ {"lantiq,pull", LTQ_PINCONF_PARAM_PULL},
+ {"lantiq,open-drain", LTQ_PINCONF_PARAM_OPEN_DRAIN},
+};
+
+static struct ltq_pinmux_info xway_info = {
+ .desc = &xway_pctrl_desc,
+ .apply_mux = xway_mux_apply,
+ .params = xway_cfg_params,
+ .num_params = ARRAY_SIZE(xway_cfg_params),
+};
+
+/* --------- gpio_chip related code --------- */
+static void xway_gpio_set(struct gpio_chip *chip, unsigned int pin, int val)
+{
+ struct ltq_pinmux_info *info = dev_get_drvdata(chip->dev);
+
+ if (val)
+ gpio_setbit(info->membase[0], GPIO_OUT(pin), PORT_PIN(pin));
+ else
+ gpio_clearbit(info->membase[0], GPIO_OUT(pin), PORT_PIN(pin));
+}
+
+static int xway_gpio_get(struct gpio_chip *chip, unsigned int pin)
+{
+ struct ltq_pinmux_info *info = dev_get_drvdata(chip->dev);
+
+ return gpio_getbit(info->membase[0], GPIO_IN(pin), PORT_PIN(pin));
+}
+
+static int xway_gpio_dir_in(struct gpio_chip *chip, unsigned int pin)
+{
+ struct ltq_pinmux_info *info = dev_get_drvdata(chip->dev);
+
+ gpio_clearbit(info->membase[0], GPIO_DIR(pin), PORT_PIN(pin));
+
+ return 0;
+}
+
+static int xway_gpio_dir_out(struct gpio_chip *chip, unsigned int pin, int val)
+{
+ struct ltq_pinmux_info *info = dev_get_drvdata(chip->dev);
+
+ gpio_setbit(info->membase[0], GPIO_DIR(pin), PORT_PIN(pin));
+ xway_gpio_set(chip, pin, val);
+
+ return 0;
+}
+
+static int xway_gpio_req(struct gpio_chip *chip, unsigned offset)
+{
+ int gpio = chip->base + offset;
+
+ return pinctrl_request_gpio(gpio);
+}
+
+static void xway_gpio_free(struct gpio_chip *chip, unsigned offset)
+{
+ int gpio = chip->base + offset;
+
+ pinctrl_free_gpio(gpio);
+}
+
+static struct gpio_chip xway_chip = {
+ .label = "gpio-xway",
+ .direction_input = xway_gpio_dir_in,
+ .direction_output = xway_gpio_dir_out,
+ .get = xway_gpio_get,
+ .set = xway_gpio_set,
+ .request = xway_gpio_req,
+ .free = xway_gpio_free,
+ .base = -1,
+};
+
+
+/* --------- register the pinctrl layer --------- */
+static const unsigned xway_exin_pin_map[] = {GPIO0, GPIO1, GPIO2, GPIO39, GPIO46, GPIO9};
+static const unsigned ase_exin_pins_map[] = {GPIO6, GPIO29, GPIO0};
+
+static struct pinctrl_xway_soc {
+ int pin_count;
+ const struct ltq_mfp_pin *mfp;
+ const struct ltq_pin_group *grps;
+ unsigned int num_grps;
+ const struct ltq_pmx_func *funcs;
+ unsigned int num_funcs;
+ const unsigned *exin;
+ unsigned int num_exin;
+} soc_cfg[] = {
+ /* legacy xway */
+ {XWAY_MAX_PIN, xway_mfp,
+ xway_grps, ARRAY_SIZE(xway_grps),
+ danube_funcs, ARRAY_SIZE(danube_funcs),
+ xway_exin_pin_map, 3},
+ /* xway xr9 series */
+ {XR9_MAX_PIN, xway_mfp,
+ xway_grps, ARRAY_SIZE(xway_grps),
+ xrx_funcs, ARRAY_SIZE(xrx_funcs),
+ xway_exin_pin_map, 6},
+ /* xway ase series */
+ {XWAY_MAX_PIN, ase_mfp,
+ ase_grps, ARRAY_SIZE(ase_grps),
+ ase_funcs, ARRAY_SIZE(ase_funcs),
+ ase_exin_pins_map, 3},
+};
+
+static struct pinctrl_gpio_range xway_gpio_range = {
+ .name = "XWAY GPIO",
+ .gc = &xway_chip,
+};
+
+static const struct of_device_id xway_match[] = {
+ { .compatible = "lantiq,pinctrl-xway", .data = &soc_cfg[0]},
+ { .compatible = "lantiq,pinctrl-xr9", .data = &soc_cfg[1]},
+ { .compatible = "lantiq,pinctrl-ase", .data = &soc_cfg[2]},
+ {},
+};
+MODULE_DEVICE_TABLE(of, xway_match);
+
+static int __devinit pinmux_xway_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ const struct pinctrl_xway_soc *xway_soc;
+ struct resource *res;
+ int ret, i;
+
+ /* get and remap our register range */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get resource\n");
+ return -ENOENT;
+ }
+ xway_info.membase[0] = devm_request_and_ioremap(&pdev->dev, res);
+ if (!xway_info.membase[0]) {
+ dev_err(&pdev->dev, "Failed to remap resource\n");
+ return -ENOMEM;
+ }
+
+ match = of_match_device(xway_match, &pdev->dev);
+ if (match)
+ xway_soc = (const struct pinctrl_xway_soc *) match->data;
+ else
+ xway_soc = &soc_cfg[0];
+
+ /* find out how many pads we have */
+ xway_chip.ngpio = xway_soc->pin_count;
+
+ /* load our pad descriptors */
+ xway_info.pads = devm_kzalloc(&pdev->dev,
+ sizeof(struct pinctrl_pin_desc) * xway_chip.ngpio,
+ GFP_KERNEL);
+ if (!xway_info.pads) {
+ dev_err(&pdev->dev, "Failed to allocate pads\n");
+ return -ENOMEM;
+ }
+ for (i = 0; i < xway_chip.ngpio; i++) {
+ /* strlen("ioXY") + 1 = 5 */
+ char *name = devm_kzalloc(&pdev->dev, 5, GFP_KERNEL);
+
+ if (!name) {
+ dev_err(&pdev->dev, "Failed to allocate pad name\n");
+ return -ENOMEM;
+ }
+ snprintf(name, 5, "io%d", i);
+ xway_info.pads[i].number = GPIO0 + i;
+ xway_info.pads[i].name = name;
+ }
+ xway_pctrl_desc.pins = xway_info.pads;
+
+ /* load the gpio chip */
+ xway_chip.dev = &pdev->dev;
+ of_gpiochip_add(&xway_chip);
+ ret = gpiochip_add(&xway_chip);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register gpio chip\n");
+ return ret;
+ }
+
+ /* setup the data needed by pinctrl */
+ xway_pctrl_desc.name = dev_name(&pdev->dev);
+ xway_pctrl_desc.npins = xway_chip.ngpio;
+
+ xway_info.num_pads = xway_chip.ngpio;
+ xway_info.num_mfp = xway_chip.ngpio;
+ xway_info.mfp = xway_soc->mfp;
+ xway_info.grps = xway_soc->grps;
+ xway_info.num_grps = xway_soc->num_grps;
+ xway_info.funcs = xway_soc->funcs;
+ xway_info.num_funcs = xway_soc->num_funcs;
+ xway_info.exin = xway_soc->exin;
+ xway_info.num_exin = xway_soc->num_exin;
+
+ /* register with the generic lantiq layer */
+ ret = ltq_pinctrl_register(pdev, &xway_info);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register pinctrl driver\n");
+ return ret;
+ }
+
+ /* finish with registering the gpio range in pinctrl */
+ xway_gpio_range.npins = xway_chip.ngpio;
+ xway_gpio_range.base = xway_chip.base;
+ pinctrl_add_gpio_range(xway_info.pctrl, &xway_gpio_range);
+ dev_info(&pdev->dev, "Init done\n");
+ return 0;
+}
+
+static struct platform_driver pinmux_xway_driver = {
+ .probe = pinmux_xway_probe,
+ .driver = {
+ .name = "pinctrl-xway",
+ .owner = THIS_MODULE,
+ .of_match_table = xway_match,
+ },
+};
+
+static int __init pinmux_xway_init(void)
+{
+ return platform_driver_register(&pinmux_xway_driver);
+}
+
+core_initcall_sync(pinmux_xway_init);
diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c
index 6b9af98..18d74f2 100644
--- a/drivers/platform/x86/hp_accel.c
+++ b/drivers/platform/x86/hp_accel.c
@@ -382,31 +382,8 @@ static struct acpi_driver lis3lv02d_driver = {
},
.drv.pm = HP_ACCEL_PM,
};
-
-static int __init lis3lv02d_init_module(void)
-{
- int ret;
-
- if (acpi_disabled)
- return -ENODEV;
-
- ret = acpi_bus_register_driver(&lis3lv02d_driver);
- if (ret < 0)
- return ret;
-
- pr_info("driver loaded\n");
-
- return 0;
-}
-
-static void __exit lis3lv02d_exit_module(void)
-{
- acpi_bus_unregister_driver(&lis3lv02d_driver);
-}
+module_acpi_driver(lis3lv02d_driver);
MODULE_DESCRIPTION("Glue between LIS3LV02Dx and HP ACPI BIOS and support for disk protection LED.");
MODULE_AUTHOR("Yan Burman, Eric Piel, Pavel Machek");
MODULE_LICENSE("GPL");
-
-module_init(lis3lv02d_init_module);
-module_exit(lis3lv02d_exit_module);
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index dae7abe..5ff4f2e 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -917,20 +917,8 @@ static struct acpi_driver ideapad_acpi_driver = {
.drv.pm = &ideapad_pm,
.owner = THIS_MODULE,
};
-
-static int __init ideapad_acpi_module_init(void)
-{
- return acpi_bus_register_driver(&ideapad_acpi_driver);
-}
-
-static void __exit ideapad_acpi_module_exit(void)
-{
- acpi_bus_unregister_driver(&ideapad_acpi_driver);
-}
+module_acpi_driver(ideapad_acpi_driver);
MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
MODULE_DESCRIPTION("IdeaPad ACPI Extras");
MODULE_LICENSE("GPL");
-
-module_init(ideapad_acpi_module_init);
-module_exit(ideapad_acpi_module_exit);
diff --git a/drivers/platform/x86/topstar-laptop.c b/drivers/platform/x86/topstar-laptop.c
index d528daa..d727bfe 100644
--- a/drivers/platform/x86/topstar-laptop.c
+++ b/drivers/platform/x86/topstar-laptop.c
@@ -186,27 +186,7 @@ static struct acpi_driver acpi_topstar_driver = {
.notify = acpi_topstar_notify,
},
};
-
-static int __init topstar_laptop_init(void)
-{
- int ret;
-
- ret = acpi_bus_register_driver(&acpi_topstar_driver);
- if (ret < 0)
- return ret;
-
- pr_info("ACPI extras driver loaded\n");
-
- return 0;
-}
-
-static void __exit topstar_laptop_exit(void)
-{
- acpi_bus_unregister_driver(&acpi_topstar_driver);
-}
-
-module_init(topstar_laptop_init);
-module_exit(topstar_laptop_exit);
+module_acpi_driver(acpi_topstar_driver);
MODULE_AUTHOR("Herton Ronaldo Krzesinski");
MODULE_DESCRIPTION("Topstar Laptop ACPI Extras driver");
diff --git a/drivers/platform/x86/toshiba_bluetooth.c b/drivers/platform/x86/toshiba_bluetooth.c
index 5e5d631..e95be0b 100644
--- a/drivers/platform/x86/toshiba_bluetooth.c
+++ b/drivers/platform/x86/toshiba_bluetooth.c
@@ -122,30 +122,10 @@ static int toshiba_bt_rfkill_add(struct acpi_device *device)
return result;
}
-static int __init toshiba_bt_rfkill_init(void)
-{
- int result;
-
- result = acpi_bus_register_driver(&toshiba_bt_rfkill_driver);
- if (result < 0) {
- ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
- "Error registering driver\n"));
- return result;
- }
-
- return 0;
-}
-
static int toshiba_bt_rfkill_remove(struct acpi_device *device, int type)
{
/* clean up */
return 0;
}
-static void __exit toshiba_bt_rfkill_exit(void)
-{
- acpi_bus_unregister_driver(&toshiba_bt_rfkill_driver);
-}
-
-module_init(toshiba_bt_rfkill_init);
-module_exit(toshiba_bt_rfkill_exit);
+module_acpi_driver(toshiba_bt_rfkill_driver);
diff --git a/drivers/platform/x86/xo15-ebook.c b/drivers/platform/x86/xo15-ebook.c
index 38ba39d..16d340c 100644
--- a/drivers/platform/x86/xo15-ebook.c
+++ b/drivers/platform/x86/xo15-ebook.c
@@ -170,16 +170,4 @@ static struct acpi_driver xo15_ebook_driver = {
},
.drv.pm = &ebook_switch_pm,
};
-
-static int __init xo15_ebook_init(void)
-{
- return acpi_bus_register_driver(&xo15_ebook_driver);
-}
-
-static void __exit xo15_ebook_exit(void)
-{
- acpi_bus_unregister_driver(&xo15_ebook_driver);
-}
-
-module_init(xo15_ebook_init);
-module_exit(xo15_ebook_exit);
+module_acpi_driver(xo15_ebook_driver);
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index 507a8e2..26b5d4b 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -321,14 +321,9 @@ static int __init acpi_pnp_match(struct device *dev, void *_pnp)
{
struct acpi_device *acpi = to_acpi_device(dev);
struct pnp_dev *pnp = _pnp;
- struct device *physical_device;
-
- physical_device = acpi_get_physical_device(acpi->handle);
- if (physical_device)
- put_device(physical_device);
/* true means it matched */
- return !physical_device
+ return !acpi->physical_node_count
&& compare_pnp_id(pnp->id, acpi_device_hid(acpi));
}
diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c
index 3541b44..e7a4780 100644
--- a/drivers/remoteproc/remoteproc_virtio.c
+++ b/drivers/remoteproc/remoteproc_virtio.c
@@ -84,6 +84,9 @@ static struct virtqueue *rp_find_vq(struct virtio_device *vdev,
if (id >= ARRAY_SIZE(rvdev->vring))
return ERR_PTR(-EINVAL);
+ if (!name)
+ return NULL;
+
ret = rproc_alloc_vring(rvdev, id);
if (ret)
return ERR_PTR(ret);
@@ -103,7 +106,7 @@ static struct virtqueue *rp_find_vq(struct virtio_device *vdev,
* Create the new vq, and tell virtio we're not interested in
* the 'weak' smp barriers, since we're talking with a real device.
*/
- vq = vring_new_virtqueue(len, rvring->align, vdev, false, addr,
+ vq = vring_new_virtqueue(id, len, rvring->align, vdev, false, addr,
rproc_virtio_notify, callback, name);
if (!vq) {
dev_err(dev, "vring_new_virtqueue %s failed\n", name);
diff --git a/drivers/rpmsg/Kconfig b/drivers/rpmsg/Kconfig
index 32aead6..2bd911f 100644
--- a/drivers/rpmsg/Kconfig
+++ b/drivers/rpmsg/Kconfig
@@ -4,7 +4,6 @@ menu "Rpmsg drivers (EXPERIMENTAL)"
config RPMSG
tristate
select VIRTIO
- select VIRTIO_RING
depends on EXPERIMENTAL
endmenu
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
index 47cccd5..7dabef6 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/kvm/kvm_virtio.c
@@ -190,6 +190,9 @@ static struct virtqueue *kvm_find_vq(struct virtio_device *vdev,
if (index >= kdev->desc->num_vq)
return ERR_PTR(-ENOENT);
+ if (!name)
+ return NULL;
+
config = kvm_vq_config(kdev->desc)+index;
err = vmem_add_mapping(config->address,
@@ -198,7 +201,7 @@ static struct virtqueue *kvm_find_vq(struct virtio_device *vdev,
if (err)
goto out;
- vq = vring_new_virtqueue(config->num, KVM_S390_VIRTIO_RING_ALIGN,
+ vq = vring_new_virtqueue(index, config->num, KVM_S390_VIRTIO_RING_ALIGN,
vdev, true, (void *) config->address,
kvm_notify, callback, name);
if (!vq) {
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 9c5c5f2..be2c9a6 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1257,7 +1257,7 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
}
sfp->mmap_called = 1;
- vma->vm_flags |= VM_RESERVED;
+ vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_private_data = sfp;
vma->vm_ops = &sg_mmap_vm_ops;
return 0;
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index ecc31a1..1acae35 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -134,6 +134,7 @@ config SPI_DAVINCI
tristate "Texas Instruments DaVinci/DA8x/OMAP-L/AM1x SoC SPI controller"
depends on ARCH_DAVINCI
select SPI_BITBANG
+ select TI_EDMA
help
SPI master controller for DaVinci/DA8x/OMAP-L/AM1x SPI modules.
@@ -237,6 +238,13 @@ config SPI_OC_TINY
help
This is the driver for OpenCores tiny SPI master controller.
+config SPI_OCTEON
+ tristate "Cavium OCTEON SPI controller"
+ depends on CPU_CAVIUM_OCTEON
+ help
+ SPI host driver for the hardware found on some Cavium OCTEON
+ SOCs.
+
config SPI_OMAP_UWIRE
tristate "OMAP1 MicroWire"
depends on ARCH_OMAP1
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 22fd3a7..c48df47 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -39,6 +39,7 @@ obj-$(CONFIG_SPI_MPC52xx) += spi-mpc52xx.o
obj-$(CONFIG_SPI_MXS) += spi-mxs.o
obj-$(CONFIG_SPI_NUC900) += spi-nuc900.o
obj-$(CONFIG_SPI_OC_TINY) += spi-oc-tiny.o
+obj-$(CONFIG_SPI_OCTEON) += spi-octeon.o
obj-$(CONFIG_SPI_OMAP_UWIRE) += spi-omap-uwire.o
obj-$(CONFIG_SPI_OMAP_100K) += spi-omap-100k.o
obj-$(CONFIG_SPI_OMAP24XX) += spi-omap2-mcspi.o
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index 3afe2f4f..147dfa8 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -25,13 +25,14 @@
#include <linux/platform_device.h>
#include <linux/err.h>
#include <linux/clk.h>
+#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
+#include <linux/edma.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
#include <linux/slab.h>
#include <linux/platform_data/spi-davinci.h>
-#include <mach/edma.h>
#define SPI_NO_RESOURCE ((resource_size_t)-1)
@@ -113,14 +114,6 @@
#define SPIDEF 0x4c
#define SPIFMT0 0x50
-/* We have 2 DMA channels per CS, one for RX and one for TX */
-struct davinci_spi_dma {
- int tx_channel;
- int rx_channel;
- int dummy_param_slot;
- enum dma_event_q eventq;
-};
-
/* SPI Controller driver's private data. */
struct davinci_spi {
struct spi_bitbang bitbang;
@@ -134,11 +127,14 @@ struct davinci_spi {
const void *tx;
void *rx;
-#define SPI_TMP_BUFSZ (SMP_CACHE_BYTES + 1)
- u8 rx_tmp_buf[SPI_TMP_BUFSZ];
int rcount;
int wcount;
- struct davinci_spi_dma dma;
+
+ struct dma_chan *dma_rx;
+ struct dma_chan *dma_tx;
+ int dma_rx_chnum;
+ int dma_tx_chnum;
+
struct davinci_spi_platform_data *pdata;
void (*get_rx)(u32 rx_data, struct davinci_spi *);
@@ -496,21 +492,23 @@ out:
return errors;
}
-static void davinci_spi_dma_callback(unsigned lch, u16 status, void *data)
+static void davinci_spi_dma_rx_callback(void *data)
{
- struct davinci_spi *dspi = data;
- struct davinci_spi_dma *dma = &dspi->dma;
+ struct davinci_spi *dspi = (struct davinci_spi *)data;
- edma_stop(lch);
+ dspi->rcount = 0;
- if (status == DMA_COMPLETE) {
- if (lch == dma->rx_channel)
- dspi->rcount = 0;
- if (lch == dma->tx_channel)
- dspi->wcount = 0;
- }
+ if (!dspi->wcount && !dspi->rcount)
+ complete(&dspi->done);
+}
- if ((!dspi->wcount && !dspi->rcount) || (status != DMA_COMPLETE))
+static void davinci_spi_dma_tx_callback(void *data)
+{
+ struct davinci_spi *dspi = (struct davinci_spi *)data;
+
+ dspi->wcount = 0;
+
+ if (!dspi->wcount && !dspi->rcount)
complete(&dspi->done);
}
@@ -526,20 +524,20 @@ static void davinci_spi_dma_callback(unsigned lch, u16 status, void *data)
static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
{
struct davinci_spi *dspi;
- int data_type, ret;
+ int data_type, ret = -ENOMEM;
u32 tx_data, spidat1;
u32 errors = 0;
struct davinci_spi_config *spicfg;
struct davinci_spi_platform_data *pdata;
unsigned uninitialized_var(rx_buf_count);
- struct device *sdev;
+ void *dummy_buf = NULL;
+ struct scatterlist sg_rx, sg_tx;
dspi = spi_master_get_devdata(spi->master);
pdata = dspi->pdata;
spicfg = (struct davinci_spi_config *)spi->controller_data;
if (!spicfg)
spicfg = &davinci_spi_default_cfg;
- sdev = dspi->bitbang.master->dev.parent;
/* convert len to words based on bits_per_word */
data_type = dspi->bytes_per_word[spi->chip_select];
@@ -567,112 +565,83 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
spidat1 |= tx_data & 0xFFFF;
iowrite32(spidat1, dspi->base + SPIDAT1);
} else {
- struct davinci_spi_dma *dma;
- unsigned long tx_reg, rx_reg;
- struct edmacc_param param;
- void *rx_buf;
- int b, c;
-
- dma = &dspi->dma;
-
- tx_reg = (unsigned long)dspi->pbase + SPIDAT1;
- rx_reg = (unsigned long)dspi->pbase + SPIBUF;
-
- /*
- * Transmit DMA setup
- *
- * If there is transmit data, map the transmit buffer, set it
- * as the source of data and set the source B index to data
- * size. If there is no transmit data, set the transmit register
- * as the source of data, and set the source B index to zero.
- *
- * The destination is always the transmit register itself. And
- * the destination never increments.
- */
-
- if (t->tx_buf) {
- t->tx_dma = dma_map_single(&spi->dev, (void *)t->tx_buf,
- t->len, DMA_TO_DEVICE);
- if (dma_mapping_error(&spi->dev, t->tx_dma)) {
- dev_dbg(sdev, "Unable to DMA map %d bytes"
- "TX buffer\n", t->len);
- return -ENOMEM;
- }
- }
-
- /*
- * If number of words is greater than 65535, then we need
- * to configure a 3 dimension transfer. Use the BCNTRLD
- * feature to allow for transfers that aren't even multiples
- * of 65535 (or any other possible b size) by first transferring
- * the remainder amount then grabbing the next N blocks of
- * 65535 words.
- */
-
- c = dspi->wcount / (SZ_64K - 1); /* N 65535 Blocks */
- b = dspi->wcount - c * (SZ_64K - 1); /* Remainder */
- if (b)
- c++;
+ struct dma_slave_config dma_rx_conf = {
+ .direction = DMA_DEV_TO_MEM,
+ .src_addr = (unsigned long)dspi->pbase + SPIBUF,
+ .src_addr_width = data_type,
+ .src_maxburst = 1,
+ };
+ struct dma_slave_config dma_tx_conf = {
+ .direction = DMA_MEM_TO_DEV,
+ .dst_addr = (unsigned long)dspi->pbase + SPIDAT1,
+ .dst_addr_width = data_type,
+ .dst_maxburst = 1,
+ };
+ struct dma_async_tx_descriptor *rxdesc;
+ struct dma_async_tx_descriptor *txdesc;
+ void *buf;
+
+ dummy_buf = kzalloc(t->len, GFP_KERNEL);
+ if (!dummy_buf)
+ goto err_alloc_dummy_buf;
+
+ dmaengine_slave_config(dspi->dma_rx, &dma_rx_conf);
+ dmaengine_slave_config(dspi->dma_tx, &dma_tx_conf);
+
+ sg_init_table(&sg_rx, 1);
+ if (!t->rx_buf)
+ buf = dummy_buf;
else
- b = SZ_64K - 1;
-
- param.opt = TCINTEN | EDMA_TCC(dma->tx_channel);
- param.src = t->tx_buf ? t->tx_dma : tx_reg;
- param.a_b_cnt = b << 16 | data_type;
- param.dst = tx_reg;
- param.src_dst_bidx = t->tx_buf ? data_type : 0;
- param.link_bcntrld = 0xffffffff;
- param.src_dst_cidx = t->tx_buf ? data_type : 0;
- param.ccnt = c;
- edma_write_slot(dma->tx_channel, &param);
- edma_link(dma->tx_channel, dma->dummy_param_slot);
-
- /*
- * Receive DMA setup
- *
- * If there is receive buffer, use it to receive data. If there
- * is none provided, use a temporary receive buffer. Set the
- * destination B index to 0 so effectively only one byte is used
- * in the temporary buffer (address does not increment).
- *
- * The source of receive data is the receive data register. The
- * source address never increments.
- */
-
- if (t->rx_buf) {
- rx_buf = t->rx_buf;
- rx_buf_count = t->len;
- } else {
- rx_buf = dspi->rx_tmp_buf;
- rx_buf_count = sizeof(dspi->rx_tmp_buf);
+ buf = t->rx_buf;
+ t->rx_dma = dma_map_single(&spi->dev, buf,
+ t->len, DMA_FROM_DEVICE);
+ if (!t->rx_dma) {
+ ret = -EFAULT;
+ goto err_rx_map;
}
+ sg_dma_address(&sg_rx) = t->rx_dma;
+ sg_dma_len(&sg_rx) = t->len;
- t->rx_dma = dma_map_single(&spi->dev, rx_buf, rx_buf_count,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(&spi->dev, t->rx_dma)) {
- dev_dbg(sdev, "Couldn't DMA map a %d bytes RX buffer\n",
- rx_buf_count);
- if (t->tx_buf)
- dma_unmap_single(&spi->dev, t->tx_dma, t->len,
- DMA_TO_DEVICE);
- return -ENOMEM;
+ sg_init_table(&sg_tx, 1);
+ if (!t->tx_buf)
+ buf = dummy_buf;
+ else
+ buf = (void *)t->tx_buf;
+ t->tx_dma = dma_map_single(&spi->dev, buf,
+ t->len, DMA_FROM_DEVICE);
+ if (!t->tx_dma) {
+ ret = -EFAULT;
+ goto err_tx_map;
}
-
- param.opt = TCINTEN | EDMA_TCC(dma->rx_channel);
- param.src = rx_reg;
- param.a_b_cnt = b << 16 | data_type;
- param.dst = t->rx_dma;
- param.src_dst_bidx = (t->rx_buf ? data_type : 0) << 16;
- param.link_bcntrld = 0xffffffff;
- param.src_dst_cidx = (t->rx_buf ? data_type : 0) << 16;
- param.ccnt = c;
- edma_write_slot(dma->rx_channel, &param);
+ sg_dma_address(&sg_tx) = t->tx_dma;
+ sg_dma_len(&sg_tx) = t->len;
+
+ rxdesc = dmaengine_prep_slave_sg(dspi->dma_rx,
+ &sg_rx, 1, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!rxdesc)
+ goto err_desc;
+
+ txdesc = dmaengine_prep_slave_sg(dspi->dma_tx,
+ &sg_tx, 1, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!txdesc)
+ goto err_desc;
+
+ rxdesc->callback = davinci_spi_dma_rx_callback;
+ rxdesc->callback_param = (void *)dspi;
+ txdesc->callback = davinci_spi_dma_tx_callback;
+ txdesc->callback_param = (void *)dspi;
if (pdata->cshold_bug)
iowrite16(spidat1 >> 16, dspi->base + SPIDAT1 + 2);
- edma_start(dma->rx_channel);
- edma_start(dma->tx_channel);
+ dmaengine_submit(rxdesc);
+ dmaengine_submit(txdesc);
+
+ dma_async_issue_pending(dspi->dma_rx);
+ dma_async_issue_pending(dspi->dma_tx);
+
set_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN);
}
@@ -690,15 +659,13 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
clear_io_bits(dspi->base + SPIINT, SPIINT_MASKALL);
if (spicfg->io_type == SPI_IO_TYPE_DMA) {
-
- if (t->tx_buf)
- dma_unmap_single(&spi->dev, t->tx_dma, t->len,
- DMA_TO_DEVICE);
-
- dma_unmap_single(&spi->dev, t->rx_dma, rx_buf_count,
- DMA_FROM_DEVICE);
-
clear_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN);
+
+ dma_unmap_single(&spi->dev, t->rx_dma,
+ t->len, DMA_FROM_DEVICE);
+ dma_unmap_single(&spi->dev, t->tx_dma,
+ t->len, DMA_TO_DEVICE);
+ kfree(dummy_buf);
}
clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
@@ -716,11 +683,20 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
}
if (dspi->rcount != 0 || dspi->wcount != 0) {
- dev_err(sdev, "SPI data transfer error\n");
+ dev_err(&spi->dev, "SPI data transfer error\n");
return -EIO;
}
return t->len;
+
+err_desc:
+ dma_unmap_single(&spi->dev, t->tx_dma, t->len, DMA_TO_DEVICE);
+err_tx_map:
+ dma_unmap_single(&spi->dev, t->rx_dma, t->len, DMA_FROM_DEVICE);
+err_rx_map:
+ kfree(dummy_buf);
+err_alloc_dummy_buf:
+ return ret;
}
/**
@@ -751,39 +727,33 @@ static irqreturn_t davinci_spi_irq(s32 irq, void *data)
static int davinci_spi_request_dma(struct davinci_spi *dspi)
{
+ dma_cap_mask_t mask;
+ struct device *sdev = dspi->bitbang.master->dev.parent;
int r;
- struct davinci_spi_dma *dma = &dspi->dma;
- r = edma_alloc_channel(dma->rx_channel, davinci_spi_dma_callback, dspi,
- dma->eventq);
- if (r < 0) {
- pr_err("Unable to request DMA channel for SPI RX\n");
- r = -EAGAIN;
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ dspi->dma_rx = dma_request_channel(mask, edma_filter_fn,
+ &dspi->dma_rx_chnum);
+ if (!dspi->dma_rx) {
+ dev_err(sdev, "request RX DMA channel failed\n");
+ r = -ENODEV;
goto rx_dma_failed;
}
- r = edma_alloc_channel(dma->tx_channel, davinci_spi_dma_callback, dspi,
- dma->eventq);
- if (r < 0) {
- pr_err("Unable to request DMA channel for SPI TX\n");
- r = -EAGAIN;
+ dspi->dma_tx = dma_request_channel(mask, edma_filter_fn,
+ &dspi->dma_tx_chnum);
+ if (!dspi->dma_tx) {
+ dev_err(sdev, "request TX DMA channel failed\n");
+ r = -ENODEV;
goto tx_dma_failed;
}
- r = edma_alloc_slot(EDMA_CTLR(dma->tx_channel), EDMA_SLOT_ANY);
- if (r < 0) {
- pr_err("Unable to request SPI TX DMA param slot\n");
- r = -EAGAIN;
- goto param_failed;
- }
- dma->dummy_param_slot = r;
- edma_link(dma->dummy_param_slot, dma->dummy_param_slot);
-
return 0;
-param_failed:
- edma_free_channel(dma->tx_channel);
+
tx_dma_failed:
- edma_free_channel(dma->rx_channel);
+ dma_release_channel(dspi->dma_rx);
rx_dma_failed:
return r;
}
@@ -898,9 +868,8 @@ static int __devinit davinci_spi_probe(struct platform_device *pdev)
dspi->bitbang.txrx_bufs = davinci_spi_bufs;
if (dma_rx_chan != SPI_NO_RESOURCE &&
dma_tx_chan != SPI_NO_RESOURCE) {
- dspi->dma.rx_channel = dma_rx_chan;
- dspi->dma.tx_channel = dma_tx_chan;
- dspi->dma.eventq = pdata->dma_event_q;
+ dspi->dma_rx_chnum = dma_rx_chan;
+ dspi->dma_tx_chnum = dma_tx_chan;
ret = davinci_spi_request_dma(dspi);
if (ret)
@@ -955,9 +924,8 @@ static int __devinit davinci_spi_probe(struct platform_device *pdev)
return ret;
free_dma:
- edma_free_channel(dspi->dma.tx_channel);
- edma_free_channel(dspi->dma.rx_channel);
- edma_free_slot(dspi->dma.dummy_param_slot);
+ dma_release_channel(dspi->dma_rx);
+ dma_release_channel(dspi->dma_tx);
free_clk:
clk_disable(dspi->clk);
clk_put(dspi->clk);
diff --git a/drivers/spi/spi-octeon.c b/drivers/spi/spi-octeon.c
new file mode 100644
index 0000000..ea8fb2e
--- /dev/null
+++ b/drivers/spi/spi-octeon.c
@@ -0,0 +1,362 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2011, 2012 Cavium, Inc.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/spi/spi.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/of.h>
+
+#include <asm/octeon/octeon.h>
+#include <asm/octeon/cvmx-mpi-defs.h>
+
+#define OCTEON_SPI_CFG 0
+#define OCTEON_SPI_STS 0x08
+#define OCTEON_SPI_TX 0x10
+#define OCTEON_SPI_DAT0 0x80
+
+#define OCTEON_SPI_MAX_BYTES 9
+
+#define OCTEON_SPI_MAX_CLOCK_HZ 16000000
+
+struct octeon_spi {
+ struct spi_master *my_master;
+ u64 register_base;
+ u64 last_cfg;
+ u64 cs_enax;
+};
+
+struct octeon_spi_setup {
+ u32 max_speed_hz;
+ u8 chip_select;
+ u8 mode;
+ u8 bits_per_word;
+};
+
+static void octeon_spi_wait_ready(struct octeon_spi *p)
+{
+ union cvmx_mpi_sts mpi_sts;
+ unsigned int loops = 0;
+
+ do {
+ if (loops++)
+ __delay(500);
+ mpi_sts.u64 = cvmx_read_csr(p->register_base + OCTEON_SPI_STS);
+ } while (mpi_sts.s.busy);
+}
+
+static int octeon_spi_do_transfer(struct octeon_spi *p,
+ struct spi_message *msg,
+ struct spi_transfer *xfer,
+ bool last_xfer)
+{
+ union cvmx_mpi_cfg mpi_cfg;
+ union cvmx_mpi_tx mpi_tx;
+ unsigned int clkdiv;
+ unsigned int speed_hz;
+ int mode;
+ bool cpha, cpol;
+ int bits_per_word;
+ const u8 *tx_buf;
+ u8 *rx_buf;
+ int len;
+ int i;
+
+ struct octeon_spi_setup *msg_setup = spi_get_ctldata(msg->spi);
+
+ speed_hz = msg_setup->max_speed_hz;
+ mode = msg_setup->mode;
+ cpha = mode & SPI_CPHA;
+ cpol = mode & SPI_CPOL;
+ bits_per_word = msg_setup->bits_per_word;
+
+ if (xfer->speed_hz)
+ speed_hz = xfer->speed_hz;
+ if (xfer->bits_per_word)
+ bits_per_word = xfer->bits_per_word;
+
+ if (speed_hz > OCTEON_SPI_MAX_CLOCK_HZ)
+ speed_hz = OCTEON_SPI_MAX_CLOCK_HZ;
+
+ clkdiv = octeon_get_io_clock_rate() / (2 * speed_hz);
+
+ mpi_cfg.u64 = 0;
+
+ mpi_cfg.s.clkdiv = clkdiv;
+ mpi_cfg.s.cshi = (mode & SPI_CS_HIGH) ? 1 : 0;
+ mpi_cfg.s.lsbfirst = (mode & SPI_LSB_FIRST) ? 1 : 0;
+ mpi_cfg.s.wireor = (mode & SPI_3WIRE) ? 1 : 0;
+ mpi_cfg.s.idlelo = cpha != cpol;
+ mpi_cfg.s.cslate = cpha ? 1 : 0;
+ mpi_cfg.s.enable = 1;
+
+ if (msg_setup->chip_select < 4)
+ p->cs_enax |= 1ull << (12 + msg_setup->chip_select);
+ mpi_cfg.u64 |= p->cs_enax;
+
+ if (mpi_cfg.u64 != p->last_cfg) {
+ p->last_cfg = mpi_cfg.u64;
+ cvmx_write_csr(p->register_base + OCTEON_SPI_CFG, mpi_cfg.u64);
+ }
+ tx_buf = xfer->tx_buf;
+ rx_buf = xfer->rx_buf;
+ len = xfer->len;
+ while (len > OCTEON_SPI_MAX_BYTES) {
+ for (i = 0; i < OCTEON_SPI_MAX_BYTES; i++) {
+ u8 d;
+ if (tx_buf)
+ d = *tx_buf++;
+ else
+ d = 0;
+ cvmx_write_csr(p->register_base + OCTEON_SPI_DAT0 + (8 * i), d);
+ }
+ mpi_tx.u64 = 0;
+ mpi_tx.s.csid = msg_setup->chip_select;
+ mpi_tx.s.leavecs = 1;
+ mpi_tx.s.txnum = tx_buf ? OCTEON_SPI_MAX_BYTES : 0;
+ mpi_tx.s.totnum = OCTEON_SPI_MAX_BYTES;
+ cvmx_write_csr(p->register_base + OCTEON_SPI_TX, mpi_tx.u64);
+
+ octeon_spi_wait_ready(p);
+ if (rx_buf)
+ for (i = 0; i < OCTEON_SPI_MAX_BYTES; i++) {
+ u64 v = cvmx_read_csr(p->register_base + OCTEON_SPI_DAT0 + (8 * i));
+ *rx_buf++ = (u8)v;
+ }
+ len -= OCTEON_SPI_MAX_BYTES;
+ }
+
+ for (i = 0; i < len; i++) {
+ u8 d;
+ if (tx_buf)
+ d = *tx_buf++;
+ else
+ d = 0;
+ cvmx_write_csr(p->register_base + OCTEON_SPI_DAT0 + (8 * i), d);
+ }
+
+ mpi_tx.u64 = 0;
+ mpi_tx.s.csid = msg_setup->chip_select;
+ if (last_xfer)
+ mpi_tx.s.leavecs = xfer->cs_change;
+ else
+ mpi_tx.s.leavecs = !xfer->cs_change;
+ mpi_tx.s.txnum = tx_buf ? len : 0;
+ mpi_tx.s.totnum = len;
+ cvmx_write_csr(p->register_base + OCTEON_SPI_TX, mpi_tx.u64);
+
+ octeon_spi_wait_ready(p);
+ if (rx_buf)
+ for (i = 0; i < len; i++) {
+ u64 v = cvmx_read_csr(p->register_base + OCTEON_SPI_DAT0 + (8 * i));
+ *rx_buf++ = (u8)v;
+ }
+
+ if (xfer->delay_usecs)
+ udelay(xfer->delay_usecs);
+
+ return xfer->len;
+}
+
+static int octeon_spi_validate_bpw(struct spi_device *spi, u32 speed)
+{
+ switch (speed) {
+ case 8:
+ break;
+ default:
+ dev_err(&spi->dev, "Error: %d bits per word not supported\n",
+ speed);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int octeon_spi_transfer_one_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct octeon_spi *p = spi_master_get_devdata(master);
+ unsigned int total_len = 0;
+ int status = 0;
+ struct spi_transfer *xfer;
+
+ /*
+ * We better have set the configuration via a call to .setup
+ * before we get here.
+ */
+ if (spi_get_ctldata(msg->spi) == NULL) {
+ status = -EINVAL;
+ goto err;
+ }
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ if (xfer->bits_per_word) {
+ status = octeon_spi_validate_bpw(msg->spi,
+ xfer->bits_per_word);
+ if (status)
+ goto err;
+ }
+ }
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ bool last_xfer = &xfer->transfer_list == msg->transfers.prev;
+ int r = octeon_spi_do_transfer(p, msg, xfer, last_xfer);
+ if (r < 0) {
+ status = r;
+ goto err;
+ }
+ total_len += r;
+ }
+err:
+ msg->status = status;
+ msg->actual_length = total_len;
+ spi_finalize_current_message(master);
+ return status;
+}
+
+static struct octeon_spi_setup *octeon_spi_new_setup(struct spi_device *spi)
+{
+ struct octeon_spi_setup *setup = kzalloc(sizeof(*setup), GFP_KERNEL);
+ if (!setup)
+ return NULL;
+
+ setup->max_speed_hz = spi->max_speed_hz;
+ setup->chip_select = spi->chip_select;
+ setup->mode = spi->mode;
+ setup->bits_per_word = spi->bits_per_word;
+ return setup;
+}
+
+static int octeon_spi_setup(struct spi_device *spi)
+{
+ int r;
+ struct octeon_spi_setup *new_setup;
+ struct octeon_spi_setup *old_setup = spi_get_ctldata(spi);
+
+ r = octeon_spi_validate_bpw(spi, spi->bits_per_word);
+ if (r)
+ return r;
+
+ new_setup = octeon_spi_new_setup(spi);
+ if (!new_setup)
+ return -ENOMEM;
+
+ spi_set_ctldata(spi, new_setup);
+ kfree(old_setup);
+
+ return 0;
+}
+
+static void octeon_spi_cleanup(struct spi_device *spi)
+{
+ struct octeon_spi_setup *old_setup = spi_get_ctldata(spi);
+ spi_set_ctldata(spi, NULL);
+ kfree(old_setup);
+}
+
+static int octeon_spi_nop_transfer_hardware(struct spi_master *master)
+{
+ return 0;
+}
+
+static int __devinit octeon_spi_probe(struct platform_device *pdev)
+{
+
+ struct resource *res_mem;
+ struct spi_master *master;
+ struct octeon_spi *p;
+ int err = -ENOENT;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct octeon_spi));
+ if (!master)
+ return -ENOMEM;
+ p = spi_master_get_devdata(master);
+ platform_set_drvdata(pdev, p);
+ p->my_master = master;
+
+ res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ if (res_mem == NULL) {
+ dev_err(&pdev->dev, "found no memory resource\n");
+ err = -ENXIO;
+ goto fail;
+ }
+ if (!devm_request_mem_region(&pdev->dev, res_mem->start,
+ resource_size(res_mem), res_mem->name)) {
+ dev_err(&pdev->dev, "request_mem_region failed\n");
+ goto fail;
+ }
+ p->register_base = (u64)devm_ioremap(&pdev->dev, res_mem->start,
+ resource_size(res_mem));
+
+ /* Dynamic bus numbering */
+ master->bus_num = -1;
+ master->num_chipselect = 4;
+ master->mode_bits = SPI_CPHA |
+ SPI_CPOL |
+ SPI_CS_HIGH |
+ SPI_LSB_FIRST |
+ SPI_3WIRE;
+
+ master->setup = octeon_spi_setup;
+ master->cleanup = octeon_spi_cleanup;
+ master->prepare_transfer_hardware = octeon_spi_nop_transfer_hardware;
+ master->transfer_one_message = octeon_spi_transfer_one_message;
+ master->unprepare_transfer_hardware = octeon_spi_nop_transfer_hardware;
+
+ master->dev.of_node = pdev->dev.of_node;
+ err = spi_register_master(master);
+ if (err) {
+ dev_err(&pdev->dev, "register master failed: %d\n", err);
+ goto fail;
+ }
+
+ dev_info(&pdev->dev, "OCTEON SPI bus driver\n");
+
+ return 0;
+fail:
+ spi_master_put(master);
+ return err;
+}
+
+static int __devexit octeon_spi_remove(struct platform_device *pdev)
+{
+ struct octeon_spi *p = platform_get_drvdata(pdev);
+ u64 register_base = p->register_base;
+
+ spi_unregister_master(p->my_master);
+
+ /* Clear the CSENA* and put everything in a known state. */
+ cvmx_write_csr(register_base + OCTEON_SPI_CFG, 0);
+
+ return 0;
+}
+
+static struct of_device_id octeon_spi_match[] = {
+ { .compatible = "cavium,octeon-3010-spi", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, octeon_spi_match);
+
+static struct platform_driver octeon_spi_driver = {
+ .driver = {
+ .name = "spi-octeon",
+ .owner = THIS_MODULE,
+ .of_match_table = octeon_spi_match,
+ },
+ .probe = octeon_spi_probe,
+ .remove = __devexit_p(octeon_spi_remove),
+};
+
+module_platform_driver(octeon_spi_driver);
+
+MODULE_DESCRIPTION("Cavium, Inc. OCTEON SPI bus driver");
+MODULE_AUTHOR("David Daney");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-omap-100k.c b/drivers/spi/spi-omap-100k.c
index 9bd1c92..dfb4b7f 100644
--- a/drivers/spi/spi-omap-100k.c
+++ b/drivers/spi/spi-omap-100k.c
@@ -37,8 +37,6 @@
#include <linux/spi/spi.h>
-#include <plat/clock.h>
-
#define OMAP1_SPI100K_MAX_FREQ 48000000
#define ICR_SPITAS (OMAP7XX_ICR_BASE + 0x12)
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 474e217..3542fdc 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -43,7 +43,6 @@
#include <linux/spi/spi.h>
-#include <plat/clock.h>
#include <linux/platform_data/spi-omap2-mcspi.h>
#define OMAP2_MCSPI_MAX_FREQ 48000000
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 94a740d..634b9ae 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -332,7 +332,6 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
if (vma->vm_file)
fput(vma->vm_file);
vma->vm_file = asma->file;
- vma->vm_flags |= VM_CAN_NONLINEAR;
out:
mutex_unlock(&ashmem_mutex);
diff --git a/drivers/staging/omapdrm/omap_gem_dmabuf.c b/drivers/staging/omapdrm/omap_gem_dmabuf.c
index 42728e0..c6f3ef6 100644
--- a/drivers/staging/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/staging/omapdrm/omap_gem_dmabuf.c
@@ -160,7 +160,7 @@ static int omap_gem_dmabuf_mmap(struct dma_buf *buffer,
goto out_unlock;
}
- vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
+ vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_ops = obj->dev->driver->gem_vm_ops;
vma->vm_private_data = obj;
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
diff --git a/drivers/staging/tidspbridge/rmgr/drv_interface.c b/drivers/staging/tidspbridge/rmgr/drv_interface.c
index bddea1d..701a11a 100644
--- a/drivers/staging/tidspbridge/rmgr/drv_interface.c
+++ b/drivers/staging/tidspbridge/rmgr/drv_interface.c
@@ -261,7 +261,7 @@ static int bridge_mmap(struct file *filp, struct vm_area_struct *vma)
{
u32 status;
- vma->vm_flags |= VM_RESERVED | VM_IO;
+ /* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
dev_dbg(bridge, "%s: vm filp %p start %lx end %lx page_prot %ulx "
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index a783d53..5110f36 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -653,8 +653,6 @@ static int uio_mmap_physical(struct vm_area_struct *vma)
if (mi < 0)
return -EINVAL;
- vma->vm_flags |= VM_IO | VM_RESERVED;
-
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
return remap_pfn_range(vma,
@@ -666,7 +664,7 @@ static int uio_mmap_physical(struct vm_area_struct *vma)
static int uio_mmap_logical(struct vm_area_struct *vma)
{
- vma->vm_flags |= VM_RESERVED;
+ vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_ops = &uio_vm_ops;
uio_vma_open(vma);
return 0;
diff --git a/drivers/usb/core/usb-acpi.c b/drivers/usb/core/usb-acpi.c
index 0ef7d42..cef4252 100644
--- a/drivers/usb/core/usb-acpi.c
+++ b/drivers/usb/core/usb-acpi.c
@@ -87,7 +87,7 @@ static int usb_acpi_check_port_connect_type(struct usb_device *hdev,
acpi_status status;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *upc;
- struct acpi_pld pld;
+ struct acpi_pld_info *pld;
int ret = 0;
/*
@@ -111,16 +111,17 @@ static int usb_acpi_check_port_connect_type(struct usb_device *hdev,
}
if (upc->package.elements[0].integer.value)
- if (pld.user_visible)
+ if (pld->user_visible)
usb_set_hub_port_connect_type(hdev, port1,
USB_PORT_CONNECT_TYPE_HOT_PLUG);
else
usb_set_hub_port_connect_type(hdev, port1,
USB_PORT_CONNECT_TYPE_HARD_WIRED);
- else if (!pld.user_visible)
+ else if (!pld->user_visible)
usb_set_hub_port_connect_type(hdev, port1, USB_PORT_NOT_USED);
out:
+ ACPI_FREE(pld);
kfree(upc);
return ret;
}
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
index 91cd850..9a62e89 100644
--- a/drivers/usb/mon/mon_bin.c
+++ b/drivers/usb/mon/mon_bin.c
@@ -1247,7 +1247,7 @@ static int mon_bin_mmap(struct file *filp, struct vm_area_struct *vma)
{
/* don't do anything here: "fault" will set up page table entries */
vma->vm_ops = &mon_bin_vm_ops;
- vma->vm_flags |= VM_RESERVED;
+ vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_private_data = filp->private_data;
mon_bin_vma_open(vma);
return 0;
diff --git a/drivers/usb/musb/musb_io.h b/drivers/usb/musb/musb_io.h
index f7c1c8e..565ad16 100644
--- a/drivers/usb/musb/musb_io.h
+++ b/drivers/usb/musb/musb_io.h
@@ -40,7 +40,8 @@
#if !defined(CONFIG_ARM) && !defined(CONFIG_SUPERH) \
&& !defined(CONFIG_AVR32) && !defined(CONFIG_PPC32) \
&& !defined(CONFIG_PPC64) && !defined(CONFIG_BLACKFIN) \
- && !defined(CONFIG_MIPS) && !defined(CONFIG_M68K)
+ && !defined(CONFIG_MIPS) && !defined(CONFIG_M68K) \
+ && !defined(CONFIG_XTENSA)
static inline void readsl(const void __iomem *addr, void *buf, int len)
{ insl((unsigned long)addr, buf, len); }
static inline void readsw(const void __iomem *addr, void *buf, int len)
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 6968b72..6d369fe 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -461,7 +461,7 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
}
vma->vm_private_data = vdev;
- vma->vm_flags |= (VM_IO | VM_RESERVED);
+ vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
phys = (pci_resource_start(pdev, index) >> PAGE_SHIFT) + pgoff;
diff --git a/drivers/video/68328fb.c b/drivers/video/68328fb.c
index a425d65..fa44fbe 100644
--- a/drivers/video/68328fb.c
+++ b/drivers/video/68328fb.c
@@ -400,7 +400,7 @@ static int mc68x328fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
#ifndef MMU
/* this is uClinux (no MMU) specific code */
- vma->vm_flags |= VM_RESERVED;
+ vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_start = videomemory;
return 0;
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index 3f2e8c1..868932f 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -1942,8 +1942,7 @@ static int atyfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
off = vma->vm_pgoff << PAGE_SHIFT;
size = vma->vm_end - vma->vm_start;
- /* To stop the swapper from even considering these pages. */
- vma->vm_flags |= (VM_IO | VM_RESERVED);
+ /* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */
if (((vma->vm_pgoff == 0) && (size == info->fix.smem_len)) ||
((off == info->fix.smem_len) && (size == PAGE_SIZE)))
diff --git a/drivers/video/fb-puv3.c b/drivers/video/fb-puv3.c
index 60a787f..7d106f1f 100644
--- a/drivers/video/fb-puv3.c
+++ b/drivers/video/fb-puv3.c
@@ -653,9 +653,8 @@ int unifb_mmap(struct fb_info *info,
vma->vm_page_prot))
return -EAGAIN;
- vma->vm_flags |= VM_RESERVED; /* avoid to swap out this VMA */
+ /* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */
return 0;
-
}
static struct fb_ops unifb_ops = {
diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c
index 64cda56..88cad6b 100644
--- a/drivers/video/fb_defio.c
+++ b/drivers/video/fb_defio.c
@@ -166,7 +166,7 @@ static const struct address_space_operations fb_deferred_io_aops = {
static int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
vma->vm_ops = &fb_deferred_io_vm_ops;
- vma->vm_flags |= ( VM_RESERVED | VM_DONTEXPAND );
+ vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
if (!(info->flags & FBINFO_VIRTFB))
vma->vm_flags |= VM_IO;
vma->vm_private_data = info;
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index 0dff12a..3ff0105 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -1410,8 +1410,7 @@ fb_mmap(struct file *file, struct vm_area_struct * vma)
return -EINVAL;
off += start;
vma->vm_pgoff = off >> PAGE_SHIFT;
- /* This is an IO map - tell maydump to skip this VMA */
- vma->vm_flags |= VM_IO | VM_RESERVED;
+ /* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by io_remap_pfn_range()*/
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
fb_pgprotect(file, vma, off);
if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
diff --git a/drivers/video/gbefb.c b/drivers/video/gbefb.c
index 7e7b7a9..05e2a8a 100644
--- a/drivers/video/gbefb.c
+++ b/drivers/video/gbefb.c
@@ -1024,7 +1024,7 @@ static int gbefb_mmap(struct fb_info *info,
pgprot_val(vma->vm_page_prot) =
pgprot_fb(pgprot_val(vma->vm_page_prot));
- vma->vm_flags |= VM_IO | VM_RESERVED;
+ /* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */
/* look for the starting tile */
tile = &gbe_tiles.cpu[offset >> TILE_SHIFT];
diff --git a/drivers/video/omap2/omapfb/omapfb-main.c b/drivers/video/omap2/omapfb/omapfb-main.c
index 3c39aa8..15373f4 100644
--- a/drivers/video/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/omap2/omapfb/omapfb-main.c
@@ -1128,7 +1128,7 @@ static int omapfb_mmap(struct fb_info *fbi, struct vm_area_struct *vma)
DBG("user mmap region start %lx, len %d, off %lx\n", start, len, off);
vma->vm_pgoff = off >> PAGE_SHIFT;
- vma->vm_flags |= VM_IO | VM_RESERVED;
+ /* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
vma->vm_ops = &mmap_user_ops;
vma->vm_private_data = rg;
diff --git a/drivers/video/sbuslib.c b/drivers/video/sbuslib.c
index 3c1de98..296afae 100644
--- a/drivers/video/sbuslib.c
+++ b/drivers/video/sbuslib.c
@@ -57,9 +57,8 @@ int sbusfb_mmap_helper(struct sbus_mmap_map *map,
off = vma->vm_pgoff << PAGE_SHIFT;
- /* To stop the swapper from even considering these pages */
- vma->vm_flags |= (VM_IO | VM_RESERVED);
-
+ /* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */
+
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
/* Each page, see which map applies */
diff --git a/drivers/video/smscufx.c b/drivers/video/smscufx.c
index 5533a32..97bd662 100644
--- a/drivers/video/smscufx.c
+++ b/drivers/video/smscufx.c
@@ -803,7 +803,6 @@ static int ufx_ops_mmap(struct fb_info *info, struct vm_area_struct *vma)
size = 0;
}
- vma->vm_flags |= VM_RESERVED; /* avoid to swap out this VMA */
return 0;
}
diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
index 8af6414..f45eba3 100644
--- a/drivers/video/udlfb.c
+++ b/drivers/video/udlfb.c
@@ -345,7 +345,6 @@ static int dlfb_ops_mmap(struct fb_info *info, struct vm_area_struct *vma)
size = 0;
}
- vma->vm_flags |= VM_RESERVED; /* avoid to swap out this VMA */
return 0;
}
diff --git a/drivers/video/vermilion/vermilion.c b/drivers/video/vermilion/vermilion.c
index 970e43d..89aef34 100644
--- a/drivers/video/vermilion/vermilion.c
+++ b/drivers/video/vermilion/vermilion.c
@@ -1018,7 +1018,6 @@ static int vmlfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
offset += vinfo->vram_start;
pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT;
- vma->vm_flags |= VM_RESERVED | VM_IO;
if (remap_pfn_range(vma, vma->vm_start, offset >> PAGE_SHIFT,
size, vma->vm_page_prot))
return -EAGAIN;
diff --git a/drivers/video/vfb.c b/drivers/video/vfb.c
index 501a922..c7f69252 100644
--- a/drivers/video/vfb.c
+++ b/drivers/video/vfb.c
@@ -439,7 +439,6 @@ static int vfb_mmap(struct fb_info *info,
size = 0;
}
- vma->vm_flags |= VM_RESERVED; /* avoid to swap out this VMA */
return 0;
}
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
index f38b17a..8d5bddb 100644
--- a/drivers/virtio/Kconfig
+++ b/drivers/virtio/Kconfig
@@ -1,11 +1,9 @@
-# Virtio always gets selected by whoever wants it.
config VIRTIO
tristate
-
-# Similarly the virtio ring implementation.
-config VIRTIO_RING
- tristate
- depends on VIRTIO
+ ---help---
+ This option is selected by any driver which implements the virtio
+ bus, such as CONFIG_VIRTIO_PCI, CONFIG_VIRTIO_MMIO, CONFIG_LGUEST,
+ CONFIG_RPMSG or CONFIG_S390_GUEST.
menu "Virtio drivers"
@@ -13,7 +11,6 @@ config VIRTIO_PCI
tristate "PCI driver for virtio devices (EXPERIMENTAL)"
depends on PCI && EXPERIMENTAL
select VIRTIO
- select VIRTIO_RING
---help---
This drivers provides support for virtio based paravirtual device
drivers over PCI. This requires that your VMM has appropriate PCI
@@ -26,9 +23,8 @@ config VIRTIO_PCI
If unsure, say M.
config VIRTIO_BALLOON
- tristate "Virtio balloon driver (EXPERIMENTAL)"
- select VIRTIO
- select VIRTIO_RING
+ tristate "Virtio balloon driver"
+ depends on VIRTIO
---help---
This driver supports increasing and decreasing the amount
of memory within a KVM guest.
@@ -39,7 +35,6 @@ config VIRTIO_BALLOON
tristate "Platform bus driver for memory mapped virtio devices (EXPERIMENTAL)"
depends on HAS_IOMEM && EXPERIMENTAL
select VIRTIO
- select VIRTIO_RING
---help---
This drivers provides support for memory mapped virtio
platform device driver.
diff --git a/drivers/virtio/Makefile b/drivers/virtio/Makefile
index 5a4c63c..9076635 100644
--- a/drivers/virtio/Makefile
+++ b/drivers/virtio/Makefile
@@ -1,5 +1,4 @@
-obj-$(CONFIG_VIRTIO) += virtio.o
-obj-$(CONFIG_VIRTIO_RING) += virtio_ring.o
+obj-$(CONFIG_VIRTIO) += virtio.o virtio_ring.o
obj-$(CONFIG_VIRTIO_MMIO) += virtio_mmio.o
obj-$(CONFIG_VIRTIO_PCI) += virtio_pci.o
obj-$(CONFIG_VIRTIO_BALLOON) += virtio_balloon.o
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index c3b3f7f..1e8659c 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -159,7 +159,7 @@ static int virtio_dev_remove(struct device *_d)
drv->remove(dev);
/* Driver should have reset device. */
- BUG_ON(dev->config->get_status(dev));
+ WARN_ON_ONCE(dev->config->get_status(dev));
/* Acknowledge the device's existence again. */
add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index 453db0c..6b1b7e1 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -131,9 +131,6 @@ struct virtio_mmio_vq_info {
/* the number of entries in the queue */
unsigned int num;
- /* the index of the queue */
- int queue_index;
-
/* the virtual address of the ring queue */
void *queue;
@@ -225,11 +222,10 @@ static void vm_reset(struct virtio_device *vdev)
static void vm_notify(struct virtqueue *vq)
{
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev);
- struct virtio_mmio_vq_info *info = vq->priv;
/* We write the queue's selector into the notification register to
* signal the other end */
- writel(info->queue_index, vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY);
+ writel(virtqueue_get_queue_index(vq), vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY);
}
/* Notify all virtqueues on an interrupt. */
@@ -270,6 +266,7 @@ static void vm_del_vq(struct virtqueue *vq)
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev);
struct virtio_mmio_vq_info *info = vq->priv;
unsigned long flags, size;
+ unsigned int index = virtqueue_get_queue_index(vq);
spin_lock_irqsave(&vm_dev->lock, flags);
list_del(&info->node);
@@ -278,7 +275,7 @@ static void vm_del_vq(struct virtqueue *vq)
vring_del_virtqueue(vq);
/* Select and deactivate the queue */
- writel(info->queue_index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL);
+ writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL);
writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
size = PAGE_ALIGN(vring_size(info->num, VIRTIO_MMIO_VRING_ALIGN));
@@ -309,6 +306,9 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index,
unsigned long flags, size;
int err;
+ if (!name)
+ return NULL;
+
/* Select the queue we're interested in */
writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL);
@@ -324,7 +324,6 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index,
err = -ENOMEM;
goto error_kmalloc;
}
- info->queue_index = index;
/* Allocate pages for the queue - start with a queue as big as
* possible (limited by maximum size allowed by device), drop down
@@ -332,11 +331,21 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index,
* and two rings (which makes it "alignment_size * 2")
*/
info->num = readl(vm_dev->base + VIRTIO_MMIO_QUEUE_NUM_MAX);
+
+ /* If the device reports a 0 entry queue, we won't be able to
+ * use it to perform I/O, and vring_new_virtqueue() can't create
+ * empty queues anyway, so don't bother to set up the device.
+ */
+ if (info->num == 0) {
+ err = -ENOENT;
+ goto error_alloc_pages;
+ }
+
while (1) {
size = PAGE_ALIGN(vring_size(info->num,
VIRTIO_MMIO_VRING_ALIGN));
- /* Already smallest possible allocation? */
- if (size <= VIRTIO_MMIO_VRING_ALIGN * 2) {
+ /* Did the last iter shrink the queue below minimum size? */
+ if (size < VIRTIO_MMIO_VRING_ALIGN * 2) {
err = -ENOMEM;
goto error_alloc_pages;
}
@@ -356,7 +365,7 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index,
vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
/* Create the vring */
- vq = vring_new_virtqueue(info->num, VIRTIO_MMIO_VRING_ALIGN, vdev,
+ vq = vring_new_virtqueue(index, info->num, VIRTIO_MMIO_VRING_ALIGN, vdev,
true, info->queue, vm_notify, callback, name);
if (!vq) {
err = -ENOMEM;
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
index 2e03d41..c33aea3 100644
--- a/drivers/virtio/virtio_pci.c
+++ b/drivers/virtio/virtio_pci.c
@@ -48,6 +48,7 @@ struct virtio_pci_device
int msix_enabled;
int intx_enabled;
struct msix_entry *msix_entries;
+ cpumask_var_t *msix_affinity_masks;
/* Name strings for interrupts. This size should be enough,
* and I'm too lazy to allocate each name separately. */
char (*msix_names)[256];
@@ -79,9 +80,6 @@ struct virtio_pci_vq_info
/* the number of entries in the queue */
int num;
- /* the index of the queue */
- int queue_index;
-
/* the virtual address of the ring queue */
void *queue;
@@ -202,11 +200,11 @@ static void vp_reset(struct virtio_device *vdev)
static void vp_notify(struct virtqueue *vq)
{
struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
- struct virtio_pci_vq_info *info = vq->priv;
/* we write the queue's selector into the notification register to
* signal the other end */
- iowrite16(info->queue_index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY);
+ iowrite16(virtqueue_get_queue_index(vq),
+ vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY);
}
/* Handle a configuration change: Tell driver if it wants to know. */
@@ -279,6 +277,10 @@ static void vp_free_vectors(struct virtio_device *vdev)
for (i = 0; i < vp_dev->msix_used_vectors; ++i)
free_irq(vp_dev->msix_entries[i].vector, vp_dev);
+ for (i = 0; i < vp_dev->msix_vectors; i++)
+ if (vp_dev->msix_affinity_masks[i])
+ free_cpumask_var(vp_dev->msix_affinity_masks[i]);
+
if (vp_dev->msix_enabled) {
/* Disable the vector used for configuration */
iowrite16(VIRTIO_MSI_NO_VECTOR,
@@ -296,6 +298,8 @@ static void vp_free_vectors(struct virtio_device *vdev)
vp_dev->msix_names = NULL;
kfree(vp_dev->msix_entries);
vp_dev->msix_entries = NULL;
+ kfree(vp_dev->msix_affinity_masks);
+ vp_dev->msix_affinity_masks = NULL;
}
static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
@@ -314,6 +318,15 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
GFP_KERNEL);
if (!vp_dev->msix_names)
goto error;
+ vp_dev->msix_affinity_masks
+ = kzalloc(nvectors * sizeof *vp_dev->msix_affinity_masks,
+ GFP_KERNEL);
+ if (!vp_dev->msix_affinity_masks)
+ goto error;
+ for (i = 0; i < nvectors; ++i)
+ if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i],
+ GFP_KERNEL))
+ goto error;
for (i = 0; i < nvectors; ++i)
vp_dev->msix_entries[i].entry = i;
@@ -402,7 +415,6 @@ static struct virtqueue *setup_vq(struct virtio_device *vdev, unsigned index,
if (!info)
return ERR_PTR(-ENOMEM);
- info->queue_index = index;
info->num = num;
info->msix_vector = msix_vec;
@@ -418,7 +430,7 @@ static struct virtqueue *setup_vq(struct virtio_device *vdev, unsigned index,
vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
/* create the vring */
- vq = vring_new_virtqueue(info->num, VIRTIO_PCI_VRING_ALIGN, vdev,
+ vq = vring_new_virtqueue(index, info->num, VIRTIO_PCI_VRING_ALIGN, vdev,
true, info->queue, vp_notify, callback, name);
if (!vq) {
err = -ENOMEM;
@@ -467,7 +479,8 @@ static void vp_del_vq(struct virtqueue *vq)
list_del(&info->node);
spin_unlock_irqrestore(&vp_dev->lock, flags);
- iowrite16(info->queue_index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
+ iowrite16(virtqueue_get_queue_index(vq),
+ vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
if (vp_dev->msix_enabled) {
iowrite16(VIRTIO_MSI_NO_VECTOR,
@@ -542,7 +555,10 @@ static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs,
vp_dev->per_vq_vectors = per_vq_vectors;
allocated_vectors = vp_dev->msix_used_vectors;
for (i = 0; i < nvqs; ++i) {
- if (!callbacks[i] || !vp_dev->msix_enabled)
+ if (!names[i]) {
+ vqs[i] = NULL;
+ continue;
+ } else if (!callbacks[i] || !vp_dev->msix_enabled)
msix_vec = VIRTIO_MSI_NO_VECTOR;
else if (vp_dev->per_vq_vectors)
msix_vec = allocated_vectors++;
@@ -609,6 +625,35 @@ static const char *vp_bus_name(struct virtio_device *vdev)
return pci_name(vp_dev->pci_dev);
}
+/* Setup the affinity for a virtqueue:
+ * - force the affinity for per vq vector
+ * - OR over all affinities for shared MSI
+ * - ignore the affinity request if we're using INTX
+ */
+static int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
+{
+ struct virtio_device *vdev = vq->vdev;
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ struct virtio_pci_vq_info *info = vq->priv;
+ struct cpumask *mask;
+ unsigned int irq;
+
+ if (!vq->callback)
+ return -EINVAL;
+
+ if (vp_dev->msix_enabled) {
+ mask = vp_dev->msix_affinity_masks[info->msix_vector];
+ irq = vp_dev->msix_entries[info->msix_vector].vector;
+ if (cpu == -1)
+ irq_set_affinity_hint(irq, NULL);
+ else {
+ cpumask_set_cpu(cpu, mask);
+ irq_set_affinity_hint(irq, mask);
+ }
+ }
+ return 0;
+}
+
static struct virtio_config_ops virtio_pci_config_ops = {
.get = vp_get,
.set = vp_set,
@@ -620,6 +665,7 @@ static struct virtio_config_ops virtio_pci_config_ops = {
.get_features = vp_get_features,
.finalize_features = vp_finalize_features,
.bus_name = vp_bus_name,
+ .set_vq_affinity = vp_set_vq_affinity,
};
static void virtio_pci_release_dev(struct device *_d)
@@ -673,8 +719,10 @@ static int __devinit virtio_pci_probe(struct pci_dev *pci_dev,
goto out_enable_device;
vp_dev->ioaddr = pci_iomap(pci_dev, 0, 0);
- if (vp_dev->ioaddr == NULL)
+ if (vp_dev->ioaddr == NULL) {
+ err = -ENOMEM;
goto out_req_regions;
+ }
pci_set_drvdata(pci_dev, vp_dev);
pci_set_master(pci_dev);
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 5aa43c3..e639584 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -106,6 +106,9 @@ struct vring_virtqueue
/* How to notify other side. FIXME: commonalize hcalls! */
void (*notify)(struct virtqueue *vq);
+ /* Index of the queue */
+ int queue_index;
+
#ifdef DEBUG
/* They're supposed to lock for us. */
unsigned int in_use;
@@ -171,6 +174,13 @@ static int vring_add_indirect(struct vring_virtqueue *vq,
return head;
}
+int virtqueue_get_queue_index(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ return vq->queue_index;
+}
+EXPORT_SYMBOL_GPL(virtqueue_get_queue_index);
+
/**
* virtqueue_add_buf - expose buffer to other end
* @vq: the struct virtqueue we're talking about.
@@ -616,7 +626,8 @@ irqreturn_t vring_interrupt(int irq, void *_vq)
}
EXPORT_SYMBOL_GPL(vring_interrupt);
-struct virtqueue *vring_new_virtqueue(unsigned int num,
+struct virtqueue *vring_new_virtqueue(unsigned int index,
+ unsigned int num,
unsigned int vring_align,
struct virtio_device *vdev,
bool weak_barriers,
@@ -647,6 +658,7 @@ struct virtqueue *vring_new_virtqueue(unsigned int num,
vq->broken = false;
vq->last_used_idx = 0;
vq->num_added = 0;
+ vq->queue_index = index;
list_add_tail(&vq->vq.list, &vdev->vqs);
#ifdef DEBUG
vq->in_use = false;
diff --git a/drivers/watchdog/m54xx_wdt.c b/drivers/watchdog/m54xx_wdt.c
index 663cad8..173494a 100644
--- a/drivers/watchdog/m54xx_wdt.c
+++ b/drivers/watchdog/m54xx_wdt.c
@@ -46,17 +46,17 @@ static void wdt_enable(void)
unsigned int gms0;
/* preserve GPIO usage, if any */
- gms0 = __raw_readl(MCF_MBAR + MCF_GPT_GMS0);
+ gms0 = __raw_readl(MCF_GPT_GMS0);
if (gms0 & MCF_GPT_GMS_TMS_GPIO)
gms0 &= (MCF_GPT_GMS_TMS_GPIO | MCF_GPT_GMS_GPIO_MASK
| MCF_GPT_GMS_OD);
else
gms0 = MCF_GPT_GMS_TMS_GPIO | MCF_GPT_GMS_OD;
- __raw_writel(gms0, MCF_MBAR + MCF_GPT_GMS0);
+ __raw_writel(gms0, MCF_GPT_GMS0);
__raw_writel(MCF_GPT_GCIR_PRE(heartbeat*(MCF_BUSCLK/0xffff)) |
- MCF_GPT_GCIR_CNT(0xffff), MCF_MBAR + MCF_GPT_GCIR0);
+ MCF_GPT_GCIR_CNT(0xffff), MCF_GPT_GCIR0);
gms0 |= MCF_GPT_GMS_OCPW(0xA5) | MCF_GPT_GMS_WDEN | MCF_GPT_GMS_CE;
- __raw_writel(gms0, MCF_MBAR + MCF_GPT_GMS0);
+ __raw_writel(gms0, MCF_GPT_GMS0);
}
static void wdt_disable(void)
@@ -64,18 +64,18 @@ static void wdt_disable(void)
unsigned int gms0;
/* disable watchdog */
- gms0 = __raw_readl(MCF_MBAR + MCF_GPT_GMS0);
+ gms0 = __raw_readl(MCF_GPT_GMS0);
gms0 &= ~(MCF_GPT_GMS_WDEN | MCF_GPT_GMS_CE);
- __raw_writel(gms0, MCF_MBAR + MCF_GPT_GMS0);
+ __raw_writel(gms0, MCF_GPT_GMS0);
}
static void wdt_keepalive(void)
{
unsigned int gms0;
- gms0 = __raw_readl(MCF_MBAR + MCF_GPT_GMS0);
+ gms0 = __raw_readl(MCF_GPT_GMS0);
gms0 |= MCF_GPT_GMS_OCPW(0xA5);
- __raw_writel(gms0, MCF_MBAR + MCF_GPT_GMS0);
+ __raw_writel(gms0, MCF_GPT_GMS0);
}
static int m54xx_wdt_open(struct inode *inode, struct file *file)
@@ -195,8 +195,7 @@ static struct miscdevice m54xx_wdt_miscdev = {
static int __init m54xx_wdt_init(void)
{
- if (!request_mem_region(MCF_MBAR + MCF_GPT_GCIR0, 4,
- "Coldfire M54xx Watchdog")) {
+ if (!request_mem_region(MCF_GPT_GCIR0, 4, "Coldfire M54xx Watchdog")) {
pr_warn("I/O region busy\n");
return -EBUSY;
}
@@ -208,7 +207,7 @@ static int __init m54xx_wdt_init(void)
static void __exit m54xx_wdt_exit(void)
{
misc_deregister(&m54xx_wdt_miscdev);
- release_mem_region(MCF_MBAR + MCF_GPT_GCIR0, 4);
+ release_mem_region(MCF_GPT_GCIR0, 4);
}
module_init(m54xx_wdt_init);
diff --git a/drivers/xen/gntalloc.c b/drivers/xen/gntalloc.c
index 934985d..4097987 100644
--- a/drivers/xen/gntalloc.c
+++ b/drivers/xen/gntalloc.c
@@ -535,7 +535,7 @@ static int gntalloc_mmap(struct file *filp, struct vm_area_struct *vma)
vma->vm_private_data = vm_priv;
- vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND;
+ vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_ops = &gntalloc_vmops;
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 5df9fd8..610bfc6be 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -720,7 +720,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
vma->vm_ops = &gntdev_vmops;
- vma->vm_flags |= VM_RESERVED|VM_DONTEXPAND;
+ vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
if (use_ptemod)
vma->vm_flags |= VM_DONTCOPY;
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index ef63895..8adb9cc 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -455,7 +455,8 @@ static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
{
/* DONTCOPY is essential for Xen because copy_page_range doesn't know
* how to recreate these mappings */
- vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY | VM_PFNMAP;
+ vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTCOPY |
+ VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_ops = &privcmd_vm_ops;
vma->vm_private_data = NULL;