From 8c99268431a117207a89be5167ecd69429fd4bda Mon Sep 17 00:00:00 2001 From: Anatolij Gustschin Date: Wed, 1 Jun 2011 18:36:49 +0200 Subject: spi/bitbang: initialize bits_per_word as specified by spi message SPI protocol drivers can submit messages specifying needed bits_per_word parameter for a message transfer. The bitbang driver currently ignores bits_per_word given by a singe message and always uses master's bits_per_word parameter. Only use master's bits_per_word when a message didn't specify needed bits_per_word for ongoing transfer. Signed-off-by: Anatolij Gustschin Signed-off-by: Grant Likely diff --git a/drivers/spi/spi_bitbang.c b/drivers/spi/spi_bitbang.c index 14a63f6..bb38c83 100644 --- a/drivers/spi/spi_bitbang.c +++ b/drivers/spi/spi_bitbang.c @@ -68,7 +68,7 @@ static unsigned bitbang_txrx_8( unsigned ns, struct spi_transfer *t ) { - unsigned bits = spi->bits_per_word; + unsigned bits = t->bits_per_word ? : spi->bits_per_word; unsigned count = t->len; const u8 *tx = t->tx_buf; u8 *rx = t->rx_buf; @@ -94,7 +94,7 @@ static unsigned bitbang_txrx_16( unsigned ns, struct spi_transfer *t ) { - unsigned bits = spi->bits_per_word; + unsigned bits = t->bits_per_word ? : spi->bits_per_word; unsigned count = t->len; const u16 *tx = t->tx_buf; u16 *rx = t->rx_buf; @@ -120,7 +120,7 @@ static unsigned bitbang_txrx_32( unsigned ns, struct spi_transfer *t ) { - unsigned bits = spi->bits_per_word; + unsigned bits = t->bits_per_word ? : spi->bits_per_word; unsigned count = t->len; const u32 *tx = t->tx_buf; u32 *rx = t->rx_buf; -- cgit v0.10.2 From ca632f556697d45d67ed5cada7cedf3ddfe0db4b Mon Sep 17 00:00:00 2001 From: Grant Likely Date: Mon, 6 Jun 2011 01:16:30 -0600 Subject: spi: reorganize drivers Sort the SPI makefile and enforce the naming convention spi_*.c for spi drivers. This change also rolls the contents of atmel_spi.h into the .c file since there is only one user of that particular include file. v2: - Use 'spi-' prefix instead of 'spi_' to match what seems to be be the predominant pattern for subsystem prefixes. - Clean up filenames in Kconfig and header comment blocks Signed-off-by: Grant Likely Acked-by: Wolfram Sang Acked-by: Linus Walleij diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index de35c3a..9578a84 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -86,9 +86,6 @@ config SPI_BFIN_SPORT help Enable support for a SPI bus via the Blackfin SPORT peripheral. - This driver can also be built as a module. If so, the module - will be called spi_bfin_sport. - config SPI_AU1550 tristate "Au1550/Au12x0 SPI Controller" depends on (SOC_AU1550 || SOC_AU1200) && EXPERIMENTAL @@ -97,9 +94,6 @@ config SPI_AU1550 If you say yes to this option, support will be included for the Au1550 SPI controller (may also work with Au1200,Au1210,Au1250). - This driver can also be built as a module. If so, the module - will be called au1550_spi. - config SPI_BITBANG tristate "Utilities for Bitbanging SPI masters" help @@ -130,9 +124,6 @@ config SPI_COLDFIRE_QSPI This enables support for the Coldfire QSPI controller in master mode. - This driver can also be built as a module. If so, the module - will be called coldfire_qspi. - config SPI_DAVINCI tristate "Texas Instruments DaVinci/DA8x/OMAP-L/AM1x SoC SPI controller" depends on SPI_MASTER && ARCH_DAVINCI @@ -140,9 +131,6 @@ config SPI_DAVINCI help SPI master controller for DaVinci/DA8x/OMAP-L/AM1x SPI modules. - This driver can also be built as a module. The module will be called - davinci_spi. - config SPI_EP93XX tristate "Cirrus Logic EP93xx SPI controller" depends on ARCH_EP93XX @@ -150,9 +138,6 @@ config SPI_EP93XX This enables using the Cirrus EP93xx SPI controller in master mode. - To compile this driver as a module, choose M here. The module will be - called ep93xx_spi. - config SPI_GPIO tristate "GPIO-based bitbanging SPI Master" depends on GENERIC_GPIO @@ -385,9 +370,6 @@ config SPI_TI_SSP This selects an SPI master implementation using a TI sequencer serial port. - To compile this driver as a module, choose M here: the - module will be called ti-ssp-spi. - config SPI_TOPCLIFF_PCH tristate "Topcliff PCH SPI Controller" depends on PCI diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index 0f8c69b..b60b04b 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile @@ -7,68 +7,56 @@ ccflags-$(CONFIG_SPI_DEBUG) := -DDEBUG # small core, mostly translating board-specific # config declarations into driver model code obj-$(CONFIG_SPI_MASTER) += spi.o +obj-$(CONFIG_SPI_SPIDEV) += spidev.o # SPI master controller drivers (bus) -obj-$(CONFIG_SPI_ALTERA) += spi_altera.o -obj-$(CONFIG_SPI_ATMEL) += atmel_spi.o -obj-$(CONFIG_SPI_ATH79) += ath79_spi.o -obj-$(CONFIG_SPI_BFIN) += spi_bfin5xx.o -obj-$(CONFIG_SPI_BFIN_SPORT) += spi_bfin_sport.o -obj-$(CONFIG_SPI_BITBANG) += spi_bitbang.o -obj-$(CONFIG_SPI_AU1550) += au1550_spi.o -obj-$(CONFIG_SPI_BUTTERFLY) += spi_butterfly.o -obj-$(CONFIG_SPI_COLDFIRE_QSPI) += coldfire_qspi.o -obj-$(CONFIG_SPI_DAVINCI) += davinci_spi.o -obj-$(CONFIG_SPI_DESIGNWARE) += dw_spi.o -obj-$(CONFIG_SPI_DW_PCI) += dw_spi_midpci.o -dw_spi_midpci-objs := dw_spi_pci.o dw_spi_mid.o -obj-$(CONFIG_SPI_DW_MMIO) += dw_spi_mmio.o -obj-$(CONFIG_SPI_EP93XX) += ep93xx_spi.o -obj-$(CONFIG_SPI_GPIO) += spi_gpio.o -obj-$(CONFIG_SPI_IMX) += spi_imx.o -obj-$(CONFIG_SPI_LM70_LLP) += spi_lm70llp.o -obj-$(CONFIG_SPI_PXA2XX) += pxa2xx_spi.o -obj-$(CONFIG_SPI_PXA2XX_PCI) += pxa2xx_spi_pci.o -obj-$(CONFIG_SPI_OC_TINY) += spi_oc_tiny.o -obj-$(CONFIG_SPI_OMAP_UWIRE) += omap_uwire.o -obj-$(CONFIG_SPI_OMAP24XX) += omap2_mcspi.o -obj-$(CONFIG_SPI_OMAP_100K) += omap_spi_100k.o -obj-$(CONFIG_SPI_ORION) += orion_spi.o -obj-$(CONFIG_SPI_PL022) += amba-pl022.o -obj-$(CONFIG_SPI_MPC512x_PSC) += mpc512x_psc_spi.o -obj-$(CONFIG_SPI_MPC52xx_PSC) += mpc52xx_psc_spi.o -obj-$(CONFIG_SPI_MPC52xx) += mpc52xx_spi.o -obj-$(CONFIG_SPI_FSL_LIB) += spi_fsl_lib.o -obj-$(CONFIG_SPI_FSL_ESPI) += spi_fsl_espi.o -obj-$(CONFIG_SPI_FSL_SPI) += spi_fsl_spi.o -obj-$(CONFIG_SPI_PPC4xx) += spi_ppc4xx.o -obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi_s3c24xx_gpio.o -obj-$(CONFIG_SPI_S3C24XX) += spi_s3c24xx_hw.o -obj-$(CONFIG_SPI_S3C64XX) += spi_s3c64xx.o -obj-$(CONFIG_SPI_TEGRA) += spi_tegra.o -obj-$(CONFIG_SPI_TI_SSP) += ti-ssp-spi.o -obj-$(CONFIG_SPI_TOPCLIFF_PCH) += spi_topcliff_pch.o -obj-$(CONFIG_SPI_TXX9) += spi_txx9.o -obj-$(CONFIG_SPI_XILINX) += xilinx_spi.o -obj-$(CONFIG_SPI_SH) += spi_sh.o -obj-$(CONFIG_SPI_SH_SCI) += spi_sh_sci.o -obj-$(CONFIG_SPI_SH_MSIOF) += spi_sh_msiof.o -obj-$(CONFIG_SPI_STMP3XXX) += spi_stmp.o -obj-$(CONFIG_SPI_NUC900) += spi_nuc900.o +obj-$(CONFIG_SPI_ALTERA) += spi-altera.o +obj-$(CONFIG_SPI_ATMEL) += spi-atmel.o +obj-$(CONFIG_SPI_ATH79) += spi-ath79.o +obj-$(CONFIG_SPI_AU1550) += spi-au1550.o +obj-$(CONFIG_SPI_BFIN) += spi-bfin5xx.o +obj-$(CONFIG_SPI_BFIN_SPORT) += spi-bfin-sport.o +obj-$(CONFIG_SPI_BITBANG) += spi-bitbang.o +obj-$(CONFIG_SPI_BUTTERFLY) += spi-butterfly.o +obj-$(CONFIG_SPI_COLDFIRE_QSPI) += spi-coldfire-qspi.o +obj-$(CONFIG_SPI_DAVINCI) += spi-davinci.o +obj-$(CONFIG_SPI_DESIGNWARE) += spi-dw.o +obj-$(CONFIG_SPI_DW_MMIO) += spi-dw-mmio.o +obj-$(CONFIG_SPI_DW_PCI) += spi-dw-midpci.o +spi-dw-midpci-objs := spi-dw-pci.o spi-dw-mid.o +obj-$(CONFIG_SPI_EP93XX) += spi-ep93xx.o +obj-$(CONFIG_SPI_FSL_LIB) += spi-fsl-lib.o +obj-$(CONFIG_SPI_FSL_ESPI) += spi-fsl-espi.o +obj-$(CONFIG_SPI_FSL_SPI) += spi-fsl-spi.o +obj-$(CONFIG_SPI_GPIO) += spi-gpio.o +obj-$(CONFIG_SPI_IMX) += spi-imx.o +obj-$(CONFIG_SPI_LM70_LLP) += spi-lm70llp.o +obj-$(CONFIG_SPI_MPC512x_PSC) += spi-mpc512x-psc.o +obj-$(CONFIG_SPI_MPC52xx_PSC) += spi-mpc52xx-psc.o +obj-$(CONFIG_SPI_MPC52xx) += spi-mpc52xx.o +obj-$(CONFIG_SPI_NUC900) += spi-nuc900.o +obj-$(CONFIG_SPI_OC_TINY) += spi-oc-tiny.o +obj-$(CONFIG_SPI_OMAP_UWIRE) += spi-omap-uwire.o +obj-$(CONFIG_SPI_OMAP_100K) += spi-omap-100k.o +obj-$(CONFIG_SPI_OMAP24XX) += spi-omap2-mcspi.o +obj-$(CONFIG_SPI_ORION) += spi-orion.o +obj-$(CONFIG_SPI_PL022) += spi-pl022.o +obj-$(CONFIG_SPI_PPC4xx) += spi-ppc4xx.o +obj-$(CONFIG_SPI_PXA2XX) += spi-pxa2xx.o +obj-$(CONFIG_SPI_PXA2XX_PCI) += spi-pxa2xx-pci.o +obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi-s3c24xx-gpio.o +obj-$(CONFIG_SPI_S3C24XX) += spi-s3c24xx-hw.o +spi-s3c24xx-hw-y := spi-s3c24xx.o +spi-s3c24xx-hw-$(CONFIG_SPI_S3C24XX_FIQ) += spi-s3c24xx-fiq.o +obj-$(CONFIG_SPI_S3C64XX) += spi-s3c64xx.o +obj-$(CONFIG_SPI_SH) += spi-sh.o +obj-$(CONFIG_SPI_SH_MSIOF) += spi-sh-msiof.o +obj-$(CONFIG_SPI_SH_SCI) += spi-sh-sci.o +obj-$(CONFIG_SPI_STMP3XXX) += spi-stmp.o +obj-$(CONFIG_SPI_TEGRA) += spi-tegra.o +obj-$(CONFIG_SPI_TI_SSP) += spi-ti-ssp.o +obj-$(CONFIG_SPI_TLE62X0) += spi-tle62x0.o +obj-$(CONFIG_SPI_TOPCLIFF_PCH) += spi-topcliff-pch.o +obj-$(CONFIG_SPI_TXX9) += spi-txx9.o +obj-$(CONFIG_SPI_XILINX) += spi-xilinx.o -# special build for s3c24xx spi driver with fiq support -spi_s3c24xx_hw-y := spi_s3c24xx.o -spi_s3c24xx_hw-$(CONFIG_SPI_S3C24XX_FIQ) += spi_s3c24xx_fiq.o - -# ... add above this line ... - -# SPI protocol drivers (device/link on bus) -obj-$(CONFIG_SPI_SPIDEV) += spidev.o -obj-$(CONFIG_SPI_TLE62X0) += tle62x0.o -# ... add above this line ... - -# SPI slave controller drivers (upstream link) -# ... add above this line ... - -# SPI slave drivers (protocol for that link) -# ... add above this line ... diff --git a/drivers/spi/amba-pl022.c b/drivers/spi/amba-pl022.c deleted file mode 100644 index 6a9e58d..0000000 --- a/drivers/spi/amba-pl022.c +++ /dev/null @@ -1,2344 +0,0 @@ -/* - * drivers/spi/amba-pl022.c - * - * A driver for the ARM PL022 PrimeCell SSP/SPI bus master. - * - * Copyright (C) 2008-2009 ST-Ericsson AB - * Copyright (C) 2006 STMicroelectronics Pvt. Ltd. - * - * Author: Linus Walleij - * - * Initial version inspired by: - * linux-2.6.17-rc3-mm1/drivers/spi/pxa2xx_spi.c - * Initial adoption to PL022 by: - * Sachin Verma - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* - * This macro is used to define some register default values. - * reg is masked with mask, the OR:ed with an (again masked) - * val shifted sb steps to the left. - */ -#define SSP_WRITE_BITS(reg, val, mask, sb) \ - ((reg) = (((reg) & ~(mask)) | (((val)<<(sb)) & (mask)))) - -/* - * This macro is also used to define some default values. - * It will just shift val by sb steps to the left and mask - * the result with mask. - */ -#define GEN_MASK_BITS(val, mask, sb) \ - (((val)<<(sb)) & (mask)) - -#define DRIVE_TX 0 -#define DO_NOT_DRIVE_TX 1 - -#define DO_NOT_QUEUE_DMA 0 -#define QUEUE_DMA 1 - -#define RX_TRANSFER 1 -#define TX_TRANSFER 2 - -/* - * Macros to access SSP Registers with their offsets - */ -#define SSP_CR0(r) (r + 0x000) -#define SSP_CR1(r) (r + 0x004) -#define SSP_DR(r) (r + 0x008) -#define SSP_SR(r) (r + 0x00C) -#define SSP_CPSR(r) (r + 0x010) -#define SSP_IMSC(r) (r + 0x014) -#define SSP_RIS(r) (r + 0x018) -#define SSP_MIS(r) (r + 0x01C) -#define SSP_ICR(r) (r + 0x020) -#define SSP_DMACR(r) (r + 0x024) -#define SSP_ITCR(r) (r + 0x080) -#define SSP_ITIP(r) (r + 0x084) -#define SSP_ITOP(r) (r + 0x088) -#define SSP_TDR(r) (r + 0x08C) - -#define SSP_PID0(r) (r + 0xFE0) -#define SSP_PID1(r) (r + 0xFE4) -#define SSP_PID2(r) (r + 0xFE8) -#define SSP_PID3(r) (r + 0xFEC) - -#define SSP_CID0(r) (r + 0xFF0) -#define SSP_CID1(r) (r + 0xFF4) -#define SSP_CID2(r) (r + 0xFF8) -#define SSP_CID3(r) (r + 0xFFC) - -/* - * SSP Control Register 0 - SSP_CR0 - */ -#define SSP_CR0_MASK_DSS (0x0FUL << 0) -#define SSP_CR0_MASK_FRF (0x3UL << 4) -#define SSP_CR0_MASK_SPO (0x1UL << 6) -#define SSP_CR0_MASK_SPH (0x1UL << 7) -#define SSP_CR0_MASK_SCR (0xFFUL << 8) - -/* - * The ST version of this block moves som bits - * in SSP_CR0 and extends it to 32 bits - */ -#define SSP_CR0_MASK_DSS_ST (0x1FUL << 0) -#define SSP_CR0_MASK_HALFDUP_ST (0x1UL << 5) -#define SSP_CR0_MASK_CSS_ST (0x1FUL << 16) -#define SSP_CR0_MASK_FRF_ST (0x3UL << 21) - - -/* - * SSP Control Register 0 - SSP_CR1 - */ -#define SSP_CR1_MASK_LBM (0x1UL << 0) -#define SSP_CR1_MASK_SSE (0x1UL << 1) -#define SSP_CR1_MASK_MS (0x1UL << 2) -#define SSP_CR1_MASK_SOD (0x1UL << 3) - -/* - * The ST version of this block adds some bits - * in SSP_CR1 - */ -#define SSP_CR1_MASK_RENDN_ST (0x1UL << 4) -#define SSP_CR1_MASK_TENDN_ST (0x1UL << 5) -#define SSP_CR1_MASK_MWAIT_ST (0x1UL << 6) -#define SSP_CR1_MASK_RXIFLSEL_ST (0x7UL << 7) -#define SSP_CR1_MASK_TXIFLSEL_ST (0x7UL << 10) -/* This one is only in the PL023 variant */ -#define SSP_CR1_MASK_FBCLKDEL_ST (0x7UL << 13) - -/* - * SSP Status Register - SSP_SR - */ -#define SSP_SR_MASK_TFE (0x1UL << 0) /* Transmit FIFO empty */ -#define SSP_SR_MASK_TNF (0x1UL << 1) /* Transmit FIFO not full */ -#define SSP_SR_MASK_RNE (0x1UL << 2) /* Receive FIFO not empty */ -#define SSP_SR_MASK_RFF (0x1UL << 3) /* Receive FIFO full */ -#define SSP_SR_MASK_BSY (0x1UL << 4) /* Busy Flag */ - -/* - * SSP Clock Prescale Register - SSP_CPSR - */ -#define SSP_CPSR_MASK_CPSDVSR (0xFFUL << 0) - -/* - * SSP Interrupt Mask Set/Clear Register - SSP_IMSC - */ -#define SSP_IMSC_MASK_RORIM (0x1UL << 0) /* Receive Overrun Interrupt mask */ -#define SSP_IMSC_MASK_RTIM (0x1UL << 1) /* Receive timeout Interrupt mask */ -#define SSP_IMSC_MASK_RXIM (0x1UL << 2) /* Receive FIFO Interrupt mask */ -#define SSP_IMSC_MASK_TXIM (0x1UL << 3) /* Transmit FIFO Interrupt mask */ - -/* - * SSP Raw Interrupt Status Register - SSP_RIS - */ -/* Receive Overrun Raw Interrupt status */ -#define SSP_RIS_MASK_RORRIS (0x1UL << 0) -/* Receive Timeout Raw Interrupt status */ -#define SSP_RIS_MASK_RTRIS (0x1UL << 1) -/* Receive FIFO Raw Interrupt status */ -#define SSP_RIS_MASK_RXRIS (0x1UL << 2) -/* Transmit FIFO Raw Interrupt status */ -#define SSP_RIS_MASK_TXRIS (0x1UL << 3) - -/* - * SSP Masked Interrupt Status Register - SSP_MIS - */ -/* Receive Overrun Masked Interrupt status */ -#define SSP_MIS_MASK_RORMIS (0x1UL << 0) -/* Receive Timeout Masked Interrupt status */ -#define SSP_MIS_MASK_RTMIS (0x1UL << 1) -/* Receive FIFO Masked Interrupt status */ -#define SSP_MIS_MASK_RXMIS (0x1UL << 2) -/* Transmit FIFO Masked Interrupt status */ -#define SSP_MIS_MASK_TXMIS (0x1UL << 3) - -/* - * SSP Interrupt Clear Register - SSP_ICR - */ -/* Receive Overrun Raw Clear Interrupt bit */ -#define SSP_ICR_MASK_RORIC (0x1UL << 0) -/* Receive Timeout Clear Interrupt bit */ -#define SSP_ICR_MASK_RTIC (0x1UL << 1) - -/* - * SSP DMA Control Register - SSP_DMACR - */ -/* Receive DMA Enable bit */ -#define SSP_DMACR_MASK_RXDMAE (0x1UL << 0) -/* Transmit DMA Enable bit */ -#define SSP_DMACR_MASK_TXDMAE (0x1UL << 1) - -/* - * SSP Integration Test control Register - SSP_ITCR - */ -#define SSP_ITCR_MASK_ITEN (0x1UL << 0) -#define SSP_ITCR_MASK_TESTFIFO (0x1UL << 1) - -/* - * SSP Integration Test Input Register - SSP_ITIP - */ -#define ITIP_MASK_SSPRXD (0x1UL << 0) -#define ITIP_MASK_SSPFSSIN (0x1UL << 1) -#define ITIP_MASK_SSPCLKIN (0x1UL << 2) -#define ITIP_MASK_RXDMAC (0x1UL << 3) -#define ITIP_MASK_TXDMAC (0x1UL << 4) -#define ITIP_MASK_SSPTXDIN (0x1UL << 5) - -/* - * SSP Integration Test output Register - SSP_ITOP - */ -#define ITOP_MASK_SSPTXD (0x1UL << 0) -#define ITOP_MASK_SSPFSSOUT (0x1UL << 1) -#define ITOP_MASK_SSPCLKOUT (0x1UL << 2) -#define ITOP_MASK_SSPOEn (0x1UL << 3) -#define ITOP_MASK_SSPCTLOEn (0x1UL << 4) -#define ITOP_MASK_RORINTR (0x1UL << 5) -#define ITOP_MASK_RTINTR (0x1UL << 6) -#define ITOP_MASK_RXINTR (0x1UL << 7) -#define ITOP_MASK_TXINTR (0x1UL << 8) -#define ITOP_MASK_INTR (0x1UL << 9) -#define ITOP_MASK_RXDMABREQ (0x1UL << 10) -#define ITOP_MASK_RXDMASREQ (0x1UL << 11) -#define ITOP_MASK_TXDMABREQ (0x1UL << 12) -#define ITOP_MASK_TXDMASREQ (0x1UL << 13) - -/* - * SSP Test Data Register - SSP_TDR - */ -#define TDR_MASK_TESTDATA (0xFFFFFFFF) - -/* - * Message State - * we use the spi_message.state (void *) pointer to - * hold a single state value, that's why all this - * (void *) casting is done here. - */ -#define STATE_START ((void *) 0) -#define STATE_RUNNING ((void *) 1) -#define STATE_DONE ((void *) 2) -#define STATE_ERROR ((void *) -1) - -/* - * SSP State - Whether Enabled or Disabled - */ -#define SSP_DISABLED (0) -#define SSP_ENABLED (1) - -/* - * SSP DMA State - Whether DMA Enabled or Disabled - */ -#define SSP_DMA_DISABLED (0) -#define SSP_DMA_ENABLED (1) - -/* - * SSP Clock Defaults - */ -#define SSP_DEFAULT_CLKRATE 0x2 -#define SSP_DEFAULT_PRESCALE 0x40 - -/* - * SSP Clock Parameter ranges - */ -#define CPSDVR_MIN 0x02 -#define CPSDVR_MAX 0xFE -#define SCR_MIN 0x00 -#define SCR_MAX 0xFF - -/* - * SSP Interrupt related Macros - */ -#define DEFAULT_SSP_REG_IMSC 0x0UL -#define DISABLE_ALL_INTERRUPTS DEFAULT_SSP_REG_IMSC -#define ENABLE_ALL_INTERRUPTS (~DEFAULT_SSP_REG_IMSC) - -#define CLEAR_ALL_INTERRUPTS 0x3 - -#define SPI_POLLING_TIMEOUT 1000 - - -/* - * The type of reading going on on this chip - */ -enum ssp_reading { - READING_NULL, - READING_U8, - READING_U16, - READING_U32 -}; - -/** - * The type of writing going on on this chip - */ -enum ssp_writing { - WRITING_NULL, - WRITING_U8, - WRITING_U16, - WRITING_U32 -}; - -/** - * struct vendor_data - vendor-specific config parameters - * for PL022 derivates - * @fifodepth: depth of FIFOs (both) - * @max_bpw: maximum number of bits per word - * @unidir: supports unidirection transfers - * @extended_cr: 32 bit wide control register 0 with extra - * features and extra features in CR1 as found in the ST variants - * @pl023: supports a subset of the ST extensions called "PL023" - */ -struct vendor_data { - int fifodepth; - int max_bpw; - bool unidir; - bool extended_cr; - bool pl023; - bool loopback; -}; - -/** - * struct pl022 - This is the private SSP driver data structure - * @adev: AMBA device model hookup - * @vendor: vendor data for the IP block - * @phybase: the physical memory where the SSP device resides - * @virtbase: the virtual memory where the SSP is mapped - * @clk: outgoing clock "SPICLK" for the SPI bus - * @master: SPI framework hookup - * @master_info: controller-specific data from machine setup - * @workqueue: a workqueue on which any spi_message request is queued - * @pump_messages: work struct for scheduling work to the workqueue - * @queue_lock: spinlock to syncronise access to message queue - * @queue: message queue - * @busy: workqueue is busy - * @running: workqueue is running - * @pump_transfers: Tasklet used in Interrupt Transfer mode - * @cur_msg: Pointer to current spi_message being processed - * @cur_transfer: Pointer to current spi_transfer - * @cur_chip: pointer to current clients chip(assigned from controller_state) - * @tx: current position in TX buffer to be read - * @tx_end: end position in TX buffer to be read - * @rx: current position in RX buffer to be written - * @rx_end: end position in RX buffer to be written - * @read: the type of read currently going on - * @write: the type of write currently going on - * @exp_fifo_level: expected FIFO level - * @dma_rx_channel: optional channel for RX DMA - * @dma_tx_channel: optional channel for TX DMA - * @sgt_rx: scattertable for the RX transfer - * @sgt_tx: scattertable for the TX transfer - * @dummypage: a dummy page used for driving data on the bus with DMA - */ -struct pl022 { - struct amba_device *adev; - struct vendor_data *vendor; - resource_size_t phybase; - void __iomem *virtbase; - struct clk *clk; - struct spi_master *master; - struct pl022_ssp_controller *master_info; - /* Driver message queue */ - struct workqueue_struct *workqueue; - struct work_struct pump_messages; - spinlock_t queue_lock; - struct list_head queue; - bool busy; - bool running; - /* Message transfer pump */ - struct tasklet_struct pump_transfers; - struct spi_message *cur_msg; - struct spi_transfer *cur_transfer; - struct chip_data *cur_chip; - void *tx; - void *tx_end; - void *rx; - void *rx_end; - enum ssp_reading read; - enum ssp_writing write; - u32 exp_fifo_level; - /* DMA settings */ -#ifdef CONFIG_DMA_ENGINE - struct dma_chan *dma_rx_channel; - struct dma_chan *dma_tx_channel; - struct sg_table sgt_rx; - struct sg_table sgt_tx; - char *dummypage; -#endif -}; - -/** - * struct chip_data - To maintain runtime state of SSP for each client chip - * @cr0: Value of control register CR0 of SSP - on later ST variants this - * register is 32 bits wide rather than just 16 - * @cr1: Value of control register CR1 of SSP - * @dmacr: Value of DMA control Register of SSP - * @cpsr: Value of Clock prescale register - * @n_bytes: how many bytes(power of 2) reqd for a given data width of client - * @enable_dma: Whether to enable DMA or not - * @read: function ptr to be used to read when doing xfer for this chip - * @write: function ptr to be used to write when doing xfer for this chip - * @cs_control: chip select callback provided by chip - * @xfer_type: polling/interrupt/DMA - * - * Runtime state of the SSP controller, maintained per chip, - * This would be set according to the current message that would be served - */ -struct chip_data { - u32 cr0; - u16 cr1; - u16 dmacr; - u16 cpsr; - u8 n_bytes; - bool enable_dma; - enum ssp_reading read; - enum ssp_writing write; - void (*cs_control) (u32 command); - int xfer_type; -}; - -/** - * null_cs_control - Dummy chip select function - * @command: select/delect the chip - * - * If no chip select function is provided by client this is used as dummy - * chip select - */ -static void null_cs_control(u32 command) -{ - pr_debug("pl022: dummy chip select control, CS=0x%x\n", command); -} - -/** - * giveback - current spi_message is over, schedule next message and call - * callback of this message. Assumes that caller already - * set message->status; dma and pio irqs are blocked - * @pl022: SSP driver private data structure - */ -static void giveback(struct pl022 *pl022) -{ - struct spi_transfer *last_transfer; - unsigned long flags; - struct spi_message *msg; - void (*curr_cs_control) (u32 command); - - /* - * This local reference to the chip select function - * is needed because we set curr_chip to NULL - * as a step toward termininating the message. - */ - curr_cs_control = pl022->cur_chip->cs_control; - spin_lock_irqsave(&pl022->queue_lock, flags); - msg = pl022->cur_msg; - pl022->cur_msg = NULL; - pl022->cur_transfer = NULL; - pl022->cur_chip = NULL; - queue_work(pl022->workqueue, &pl022->pump_messages); - spin_unlock_irqrestore(&pl022->queue_lock, flags); - - last_transfer = list_entry(msg->transfers.prev, - struct spi_transfer, - transfer_list); - - /* Delay if requested before any change in chip select */ - if (last_transfer->delay_usecs) - /* - * FIXME: This runs in interrupt context. - * Is this really smart? - */ - udelay(last_transfer->delay_usecs); - - /* - * Drop chip select UNLESS cs_change is true or we are returning - * a message with an error, or next message is for another chip - */ - if (!last_transfer->cs_change) - curr_cs_control(SSP_CHIP_DESELECT); - else { - struct spi_message *next_msg; - - /* Holding of cs was hinted, but we need to make sure - * the next message is for the same chip. Don't waste - * time with the following tests unless this was hinted. - * - * We cannot postpone this until pump_messages, because - * after calling msg->complete (below) the driver that - * sent the current message could be unloaded, which - * could invalidate the cs_control() callback... - */ - - /* get a pointer to the next message, if any */ - spin_lock_irqsave(&pl022->queue_lock, flags); - if (list_empty(&pl022->queue)) - next_msg = NULL; - else - next_msg = list_entry(pl022->queue.next, - struct spi_message, queue); - spin_unlock_irqrestore(&pl022->queue_lock, flags); - - /* see if the next and current messages point - * to the same chip - */ - if (next_msg && next_msg->spi != msg->spi) - next_msg = NULL; - if (!next_msg || msg->state == STATE_ERROR) - curr_cs_control(SSP_CHIP_DESELECT); - } - msg->state = NULL; - if (msg->complete) - msg->complete(msg->context); - /* This message is completed, so let's turn off the clocks & power */ - clk_disable(pl022->clk); - amba_pclk_disable(pl022->adev); - amba_vcore_disable(pl022->adev); -} - -/** - * flush - flush the FIFO to reach a clean state - * @pl022: SSP driver private data structure - */ -static int flush(struct pl022 *pl022) -{ - unsigned long limit = loops_per_jiffy << 1; - - dev_dbg(&pl022->adev->dev, "flush\n"); - do { - while (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE) - readw(SSP_DR(pl022->virtbase)); - } while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_BSY) && limit--); - - pl022->exp_fifo_level = 0; - - return limit; -} - -/** - * restore_state - Load configuration of current chip - * @pl022: SSP driver private data structure - */ -static void restore_state(struct pl022 *pl022) -{ - struct chip_data *chip = pl022->cur_chip; - - if (pl022->vendor->extended_cr) - writel(chip->cr0, SSP_CR0(pl022->virtbase)); - else - writew(chip->cr0, SSP_CR0(pl022->virtbase)); - writew(chip->cr1, SSP_CR1(pl022->virtbase)); - writew(chip->dmacr, SSP_DMACR(pl022->virtbase)); - writew(chip->cpsr, SSP_CPSR(pl022->virtbase)); - writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase)); - writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase)); -} - -/* - * Default SSP Register Values - */ -#define DEFAULT_SSP_REG_CR0 ( \ - GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS, 0) | \ - GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF, 4) | \ - GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \ - GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \ - GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) \ -) - -/* ST versions have slightly different bit layout */ -#define DEFAULT_SSP_REG_CR0_ST ( \ - GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS_ST, 0) | \ - GEN_MASK_BITS(SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, SSP_CR0_MASK_HALFDUP_ST, 5) | \ - GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \ - GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \ - GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) | \ - GEN_MASK_BITS(SSP_BITS_8, SSP_CR0_MASK_CSS_ST, 16) | \ - GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF_ST, 21) \ -) - -/* The PL023 version is slightly different again */ -#define DEFAULT_SSP_REG_CR0_ST_PL023 ( \ - GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS_ST, 0) | \ - GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \ - GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \ - GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) \ -) - -#define DEFAULT_SSP_REG_CR1 ( \ - GEN_MASK_BITS(LOOPBACK_DISABLED, SSP_CR1_MASK_LBM, 0) | \ - GEN_MASK_BITS(SSP_DISABLED, SSP_CR1_MASK_SSE, 1) | \ - GEN_MASK_BITS(SSP_MASTER, SSP_CR1_MASK_MS, 2) | \ - GEN_MASK_BITS(DO_NOT_DRIVE_TX, SSP_CR1_MASK_SOD, 3) \ -) - -/* ST versions extend this register to use all 16 bits */ -#define DEFAULT_SSP_REG_CR1_ST ( \ - DEFAULT_SSP_REG_CR1 | \ - GEN_MASK_BITS(SSP_RX_MSB, SSP_CR1_MASK_RENDN_ST, 4) | \ - GEN_MASK_BITS(SSP_TX_MSB, SSP_CR1_MASK_TENDN_ST, 5) | \ - GEN_MASK_BITS(SSP_MWIRE_WAIT_ZERO, SSP_CR1_MASK_MWAIT_ST, 6) |\ - GEN_MASK_BITS(SSP_RX_1_OR_MORE_ELEM, SSP_CR1_MASK_RXIFLSEL_ST, 7) | \ - GEN_MASK_BITS(SSP_TX_1_OR_MORE_EMPTY_LOC, SSP_CR1_MASK_TXIFLSEL_ST, 10) \ -) - -/* - * The PL023 variant has further differences: no loopback mode, no microwire - * support, and a new clock feedback delay setting. - */ -#define DEFAULT_SSP_REG_CR1_ST_PL023 ( \ - GEN_MASK_BITS(SSP_DISABLED, SSP_CR1_MASK_SSE, 1) | \ - GEN_MASK_BITS(SSP_MASTER, SSP_CR1_MASK_MS, 2) | \ - GEN_MASK_BITS(DO_NOT_DRIVE_TX, SSP_CR1_MASK_SOD, 3) | \ - GEN_MASK_BITS(SSP_RX_MSB, SSP_CR1_MASK_RENDN_ST, 4) | \ - GEN_MASK_BITS(SSP_TX_MSB, SSP_CR1_MASK_TENDN_ST, 5) | \ - GEN_MASK_BITS(SSP_RX_1_OR_MORE_ELEM, SSP_CR1_MASK_RXIFLSEL_ST, 7) | \ - GEN_MASK_BITS(SSP_TX_1_OR_MORE_EMPTY_LOC, SSP_CR1_MASK_TXIFLSEL_ST, 10) | \ - GEN_MASK_BITS(SSP_FEEDBACK_CLK_DELAY_NONE, SSP_CR1_MASK_FBCLKDEL_ST, 13) \ -) - -#define DEFAULT_SSP_REG_CPSR ( \ - GEN_MASK_BITS(SSP_DEFAULT_PRESCALE, SSP_CPSR_MASK_CPSDVSR, 0) \ -) - -#define DEFAULT_SSP_REG_DMACR (\ - GEN_MASK_BITS(SSP_DMA_DISABLED, SSP_DMACR_MASK_RXDMAE, 0) | \ - GEN_MASK_BITS(SSP_DMA_DISABLED, SSP_DMACR_MASK_TXDMAE, 1) \ -) - -/** - * load_ssp_default_config - Load default configuration for SSP - * @pl022: SSP driver private data structure - */ -static void load_ssp_default_config(struct pl022 *pl022) -{ - if (pl022->vendor->pl023) { - writel(DEFAULT_SSP_REG_CR0_ST_PL023, SSP_CR0(pl022->virtbase)); - writew(DEFAULT_SSP_REG_CR1_ST_PL023, SSP_CR1(pl022->virtbase)); - } else if (pl022->vendor->extended_cr) { - writel(DEFAULT_SSP_REG_CR0_ST, SSP_CR0(pl022->virtbase)); - writew(DEFAULT_SSP_REG_CR1_ST, SSP_CR1(pl022->virtbase)); - } else { - writew(DEFAULT_SSP_REG_CR0, SSP_CR0(pl022->virtbase)); - writew(DEFAULT_SSP_REG_CR1, SSP_CR1(pl022->virtbase)); - } - writew(DEFAULT_SSP_REG_DMACR, SSP_DMACR(pl022->virtbase)); - writew(DEFAULT_SSP_REG_CPSR, SSP_CPSR(pl022->virtbase)); - writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase)); - writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase)); -} - -/** - * This will write to TX and read from RX according to the parameters - * set in pl022. - */ -static void readwriter(struct pl022 *pl022) -{ - - /* - * The FIFO depth is different between primecell variants. - * I believe filling in too much in the FIFO might cause - * errons in 8bit wide transfers on ARM variants (just 8 words - * FIFO, means only 8x8 = 64 bits in FIFO) at least. - * - * To prevent this issue, the TX FIFO is only filled to the - * unused RX FIFO fill length, regardless of what the TX - * FIFO status flag indicates. - */ - dev_dbg(&pl022->adev->dev, - "%s, rx: %p, rxend: %p, tx: %p, txend: %p\n", - __func__, pl022->rx, pl022->rx_end, pl022->tx, pl022->tx_end); - - /* Read as much as you can */ - while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE) - && (pl022->rx < pl022->rx_end)) { - switch (pl022->read) { - case READING_NULL: - readw(SSP_DR(pl022->virtbase)); - break; - case READING_U8: - *(u8 *) (pl022->rx) = - readw(SSP_DR(pl022->virtbase)) & 0xFFU; - break; - case READING_U16: - *(u16 *) (pl022->rx) = - (u16) readw(SSP_DR(pl022->virtbase)); - break; - case READING_U32: - *(u32 *) (pl022->rx) = - readl(SSP_DR(pl022->virtbase)); - break; - } - pl022->rx += (pl022->cur_chip->n_bytes); - pl022->exp_fifo_level--; - } - /* - * Write as much as possible up to the RX FIFO size - */ - while ((pl022->exp_fifo_level < pl022->vendor->fifodepth) - && (pl022->tx < pl022->tx_end)) { - switch (pl022->write) { - case WRITING_NULL: - writew(0x0, SSP_DR(pl022->virtbase)); - break; - case WRITING_U8: - writew(*(u8 *) (pl022->tx), SSP_DR(pl022->virtbase)); - break; - case WRITING_U16: - writew((*(u16 *) (pl022->tx)), SSP_DR(pl022->virtbase)); - break; - case WRITING_U32: - writel(*(u32 *) (pl022->tx), SSP_DR(pl022->virtbase)); - break; - } - pl022->tx += (pl022->cur_chip->n_bytes); - pl022->exp_fifo_level++; - /* - * This inner reader takes care of things appearing in the RX - * FIFO as we're transmitting. This will happen a lot since the - * clock starts running when you put things into the TX FIFO, - * and then things are continuously clocked into the RX FIFO. - */ - while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE) - && (pl022->rx < pl022->rx_end)) { - switch (pl022->read) { - case READING_NULL: - readw(SSP_DR(pl022->virtbase)); - break; - case READING_U8: - *(u8 *) (pl022->rx) = - readw(SSP_DR(pl022->virtbase)) & 0xFFU; - break; - case READING_U16: - *(u16 *) (pl022->rx) = - (u16) readw(SSP_DR(pl022->virtbase)); - break; - case READING_U32: - *(u32 *) (pl022->rx) = - readl(SSP_DR(pl022->virtbase)); - break; - } - pl022->rx += (pl022->cur_chip->n_bytes); - pl022->exp_fifo_level--; - } - } - /* - * When we exit here the TX FIFO should be full and the RX FIFO - * should be empty - */ -} - - -/** - * next_transfer - Move to the Next transfer in the current spi message - * @pl022: SSP driver private data structure - * - * This function moves though the linked list of spi transfers in the - * current spi message and returns with the state of current spi - * message i.e whether its last transfer is done(STATE_DONE) or - * Next transfer is ready(STATE_RUNNING) - */ -static void *next_transfer(struct pl022 *pl022) -{ - struct spi_message *msg = pl022->cur_msg; - struct spi_transfer *trans = pl022->cur_transfer; - - /* Move to next transfer */ - if (trans->transfer_list.next != &msg->transfers) { - pl022->cur_transfer = - list_entry(trans->transfer_list.next, - struct spi_transfer, transfer_list); - return STATE_RUNNING; - } - return STATE_DONE; -} - -/* - * This DMA functionality is only compiled in if we have - * access to the generic DMA devices/DMA engine. - */ -#ifdef CONFIG_DMA_ENGINE -static void unmap_free_dma_scatter(struct pl022 *pl022) -{ - /* Unmap and free the SG tables */ - dma_unmap_sg(pl022->dma_tx_channel->device->dev, pl022->sgt_tx.sgl, - pl022->sgt_tx.nents, DMA_TO_DEVICE); - dma_unmap_sg(pl022->dma_rx_channel->device->dev, pl022->sgt_rx.sgl, - pl022->sgt_rx.nents, DMA_FROM_DEVICE); - sg_free_table(&pl022->sgt_rx); - sg_free_table(&pl022->sgt_tx); -} - -static void dma_callback(void *data) -{ - struct pl022 *pl022 = data; - struct spi_message *msg = pl022->cur_msg; - - BUG_ON(!pl022->sgt_rx.sgl); - -#ifdef VERBOSE_DEBUG - /* - * Optionally dump out buffers to inspect contents, this is - * good if you want to convince yourself that the loopback - * read/write contents are the same, when adopting to a new - * DMA engine. - */ - { - struct scatterlist *sg; - unsigned int i; - - dma_sync_sg_for_cpu(&pl022->adev->dev, - pl022->sgt_rx.sgl, - pl022->sgt_rx.nents, - DMA_FROM_DEVICE); - - for_each_sg(pl022->sgt_rx.sgl, sg, pl022->sgt_rx.nents, i) { - dev_dbg(&pl022->adev->dev, "SPI RX SG ENTRY: %d", i); - print_hex_dump(KERN_ERR, "SPI RX: ", - DUMP_PREFIX_OFFSET, - 16, - 1, - sg_virt(sg), - sg_dma_len(sg), - 1); - } - for_each_sg(pl022->sgt_tx.sgl, sg, pl022->sgt_tx.nents, i) { - dev_dbg(&pl022->adev->dev, "SPI TX SG ENTRY: %d", i); - print_hex_dump(KERN_ERR, "SPI TX: ", - DUMP_PREFIX_OFFSET, - 16, - 1, - sg_virt(sg), - sg_dma_len(sg), - 1); - } - } -#endif - - unmap_free_dma_scatter(pl022); - - /* Update total bytes transferred */ - msg->actual_length += pl022->cur_transfer->len; - if (pl022->cur_transfer->cs_change) - pl022->cur_chip-> - cs_control(SSP_CHIP_DESELECT); - - /* Move to next transfer */ - msg->state = next_transfer(pl022); - tasklet_schedule(&pl022->pump_transfers); -} - -static void setup_dma_scatter(struct pl022 *pl022, - void *buffer, - unsigned int length, - struct sg_table *sgtab) -{ - struct scatterlist *sg; - int bytesleft = length; - void *bufp = buffer; - int mapbytes; - int i; - - if (buffer) { - for_each_sg(sgtab->sgl, sg, sgtab->nents, i) { - /* - * If there are less bytes left than what fits - * in the current page (plus page alignment offset) - * we just feed in this, else we stuff in as much - * as we can. - */ - if (bytesleft < (PAGE_SIZE - offset_in_page(bufp))) - mapbytes = bytesleft; - else - mapbytes = PAGE_SIZE - offset_in_page(bufp); - sg_set_page(sg, virt_to_page(bufp), - mapbytes, offset_in_page(bufp)); - bufp += mapbytes; - bytesleft -= mapbytes; - dev_dbg(&pl022->adev->dev, - "set RX/TX target page @ %p, %d bytes, %d left\n", - bufp, mapbytes, bytesleft); - } - } else { - /* Map the dummy buffer on every page */ - for_each_sg(sgtab->sgl, sg, sgtab->nents, i) { - if (bytesleft < PAGE_SIZE) - mapbytes = bytesleft; - else - mapbytes = PAGE_SIZE; - sg_set_page(sg, virt_to_page(pl022->dummypage), - mapbytes, 0); - bytesleft -= mapbytes; - dev_dbg(&pl022->adev->dev, - "set RX/TX to dummy page %d bytes, %d left\n", - mapbytes, bytesleft); - - } - } - BUG_ON(bytesleft); -} - -/** - * configure_dma - configures the channels for the next transfer - * @pl022: SSP driver's private data structure - */ -static int configure_dma(struct pl022 *pl022) -{ - struct dma_slave_config rx_conf = { - .src_addr = SSP_DR(pl022->phybase), - .direction = DMA_FROM_DEVICE, - .src_maxburst = pl022->vendor->fifodepth >> 1, - }; - struct dma_slave_config tx_conf = { - .dst_addr = SSP_DR(pl022->phybase), - .direction = DMA_TO_DEVICE, - .dst_maxburst = pl022->vendor->fifodepth >> 1, - }; - unsigned int pages; - int ret; - int rx_sglen, tx_sglen; - struct dma_chan *rxchan = pl022->dma_rx_channel; - struct dma_chan *txchan = pl022->dma_tx_channel; - struct dma_async_tx_descriptor *rxdesc; - struct dma_async_tx_descriptor *txdesc; - - /* Check that the channels are available */ - if (!rxchan || !txchan) - return -ENODEV; - - switch (pl022->read) { - case READING_NULL: - /* Use the same as for writing */ - rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_UNDEFINED; - break; - case READING_U8: - rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; - break; - case READING_U16: - rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; - break; - case READING_U32: - rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; - break; - } - - switch (pl022->write) { - case WRITING_NULL: - /* Use the same as for reading */ - tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_UNDEFINED; - break; - case WRITING_U8: - tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; - break; - case WRITING_U16: - tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; - break; - case WRITING_U32: - tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; - break; - } - - /* SPI pecularity: we need to read and write the same width */ - if (rx_conf.src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) - rx_conf.src_addr_width = tx_conf.dst_addr_width; - if (tx_conf.dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) - tx_conf.dst_addr_width = rx_conf.src_addr_width; - BUG_ON(rx_conf.src_addr_width != tx_conf.dst_addr_width); - - dmaengine_slave_config(rxchan, &rx_conf); - dmaengine_slave_config(txchan, &tx_conf); - - /* Create sglists for the transfers */ - pages = (pl022->cur_transfer->len >> PAGE_SHIFT) + 1; - dev_dbg(&pl022->adev->dev, "using %d pages for transfer\n", pages); - - ret = sg_alloc_table(&pl022->sgt_rx, pages, GFP_KERNEL); - if (ret) - goto err_alloc_rx_sg; - - ret = sg_alloc_table(&pl022->sgt_tx, pages, GFP_KERNEL); - if (ret) - goto err_alloc_tx_sg; - - /* Fill in the scatterlists for the RX+TX buffers */ - setup_dma_scatter(pl022, pl022->rx, - pl022->cur_transfer->len, &pl022->sgt_rx); - setup_dma_scatter(pl022, pl022->tx, - pl022->cur_transfer->len, &pl022->sgt_tx); - - /* Map DMA buffers */ - rx_sglen = dma_map_sg(rxchan->device->dev, pl022->sgt_rx.sgl, - pl022->sgt_rx.nents, DMA_FROM_DEVICE); - if (!rx_sglen) - goto err_rx_sgmap; - - tx_sglen = dma_map_sg(txchan->device->dev, pl022->sgt_tx.sgl, - pl022->sgt_tx.nents, DMA_TO_DEVICE); - if (!tx_sglen) - goto err_tx_sgmap; - - /* Send both scatterlists */ - rxdesc = rxchan->device->device_prep_slave_sg(rxchan, - pl022->sgt_rx.sgl, - rx_sglen, - DMA_FROM_DEVICE, - DMA_PREP_INTERRUPT | DMA_CTRL_ACK); - if (!rxdesc) - goto err_rxdesc; - - txdesc = txchan->device->device_prep_slave_sg(txchan, - pl022->sgt_tx.sgl, - tx_sglen, - DMA_TO_DEVICE, - DMA_PREP_INTERRUPT | DMA_CTRL_ACK); - if (!txdesc) - goto err_txdesc; - - /* Put the callback on the RX transfer only, that should finish last */ - rxdesc->callback = dma_callback; - rxdesc->callback_param = pl022; - - /* Submit and fire RX and TX with TX last so we're ready to read! */ - dmaengine_submit(rxdesc); - dmaengine_submit(txdesc); - dma_async_issue_pending(rxchan); - dma_async_issue_pending(txchan); - - return 0; - -err_txdesc: - dmaengine_terminate_all(txchan); -err_rxdesc: - dmaengine_terminate_all(rxchan); - dma_unmap_sg(txchan->device->dev, pl022->sgt_tx.sgl, - pl022->sgt_tx.nents, DMA_TO_DEVICE); -err_tx_sgmap: - dma_unmap_sg(rxchan->device->dev, pl022->sgt_rx.sgl, - pl022->sgt_tx.nents, DMA_FROM_DEVICE); -err_rx_sgmap: - sg_free_table(&pl022->sgt_tx); -err_alloc_tx_sg: - sg_free_table(&pl022->sgt_rx); -err_alloc_rx_sg: - return -ENOMEM; -} - -static int __init pl022_dma_probe(struct pl022 *pl022) -{ - dma_cap_mask_t mask; - - /* Try to acquire a generic DMA engine slave channel */ - dma_cap_zero(mask); - dma_cap_set(DMA_SLAVE, mask); - /* - * We need both RX and TX channels to do DMA, else do none - * of them. - */ - pl022->dma_rx_channel = dma_request_channel(mask, - pl022->master_info->dma_filter, - pl022->master_info->dma_rx_param); - if (!pl022->dma_rx_channel) { - dev_dbg(&pl022->adev->dev, "no RX DMA channel!\n"); - goto err_no_rxchan; - } - - pl022->dma_tx_channel = dma_request_channel(mask, - pl022->master_info->dma_filter, - pl022->master_info->dma_tx_param); - if (!pl022->dma_tx_channel) { - dev_dbg(&pl022->adev->dev, "no TX DMA channel!\n"); - goto err_no_txchan; - } - - pl022->dummypage = kmalloc(PAGE_SIZE, GFP_KERNEL); - if (!pl022->dummypage) { - dev_dbg(&pl022->adev->dev, "no DMA dummypage!\n"); - goto err_no_dummypage; - } - - dev_info(&pl022->adev->dev, "setup for DMA on RX %s, TX %s\n", - dma_chan_name(pl022->dma_rx_channel), - dma_chan_name(pl022->dma_tx_channel)); - - return 0; - -err_no_dummypage: - dma_release_channel(pl022->dma_tx_channel); -err_no_txchan: - dma_release_channel(pl022->dma_rx_channel); - pl022->dma_rx_channel = NULL; -err_no_rxchan: - dev_err(&pl022->adev->dev, - "Failed to work in dma mode, work without dma!\n"); - return -ENODEV; -} - -static void terminate_dma(struct pl022 *pl022) -{ - struct dma_chan *rxchan = pl022->dma_rx_channel; - struct dma_chan *txchan = pl022->dma_tx_channel; - - dmaengine_terminate_all(rxchan); - dmaengine_terminate_all(txchan); - unmap_free_dma_scatter(pl022); -} - -static void pl022_dma_remove(struct pl022 *pl022) -{ - if (pl022->busy) - terminate_dma(pl022); - if (pl022->dma_tx_channel) - dma_release_channel(pl022->dma_tx_channel); - if (pl022->dma_rx_channel) - dma_release_channel(pl022->dma_rx_channel); - kfree(pl022->dummypage); -} - -#else -static inline int configure_dma(struct pl022 *pl022) -{ - return -ENODEV; -} - -static inline int pl022_dma_probe(struct pl022 *pl022) -{ - return 0; -} - -static inline void pl022_dma_remove(struct pl022 *pl022) -{ -} -#endif - -/** - * pl022_interrupt_handler - Interrupt handler for SSP controller - * - * This function handles interrupts generated for an interrupt based transfer. - * If a receive overrun (ROR) interrupt is there then we disable SSP, flag the - * current message's state as STATE_ERROR and schedule the tasklet - * pump_transfers which will do the postprocessing of the current message by - * calling giveback(). Otherwise it reads data from RX FIFO till there is no - * more data, and writes data in TX FIFO till it is not full. If we complete - * the transfer we move to the next transfer and schedule the tasklet. - */ -static irqreturn_t pl022_interrupt_handler(int irq, void *dev_id) -{ - struct pl022 *pl022 = dev_id; - struct spi_message *msg = pl022->cur_msg; - u16 irq_status = 0; - u16 flag = 0; - - if (unlikely(!msg)) { - dev_err(&pl022->adev->dev, - "bad message state in interrupt handler"); - /* Never fail */ - return IRQ_HANDLED; - } - - /* Read the Interrupt Status Register */ - irq_status = readw(SSP_MIS(pl022->virtbase)); - - if (unlikely(!irq_status)) - return IRQ_NONE; - - /* - * This handles the FIFO interrupts, the timeout - * interrupts are flatly ignored, they cannot be - * trusted. - */ - if (unlikely(irq_status & SSP_MIS_MASK_RORMIS)) { - /* - * Overrun interrupt - bail out since our Data has been - * corrupted - */ - dev_err(&pl022->adev->dev, "FIFO overrun\n"); - if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RFF) - dev_err(&pl022->adev->dev, - "RXFIFO is full\n"); - if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_TNF) - dev_err(&pl022->adev->dev, - "TXFIFO is full\n"); - - /* - * Disable and clear interrupts, disable SSP, - * mark message with bad status so it can be - * retried. - */ - writew(DISABLE_ALL_INTERRUPTS, - SSP_IMSC(pl022->virtbase)); - writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase)); - writew((readw(SSP_CR1(pl022->virtbase)) & - (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase)); - msg->state = STATE_ERROR; - - /* Schedule message queue handler */ - tasklet_schedule(&pl022->pump_transfers); - return IRQ_HANDLED; - } - - readwriter(pl022); - - if ((pl022->tx == pl022->tx_end) && (flag == 0)) { - flag = 1; - /* Disable Transmit interrupt */ - writew(readw(SSP_IMSC(pl022->virtbase)) & - (~SSP_IMSC_MASK_TXIM), - SSP_IMSC(pl022->virtbase)); - } - - /* - * Since all transactions must write as much as shall be read, - * we can conclude the entire transaction once RX is complete. - * At this point, all TX will always be finished. - */ - if (pl022->rx >= pl022->rx_end) { - writew(DISABLE_ALL_INTERRUPTS, - SSP_IMSC(pl022->virtbase)); - writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase)); - if (unlikely(pl022->rx > pl022->rx_end)) { - dev_warn(&pl022->adev->dev, "read %u surplus " - "bytes (did you request an odd " - "number of bytes on a 16bit bus?)\n", - (u32) (pl022->rx - pl022->rx_end)); - } - /* Update total bytes transferred */ - msg->actual_length += pl022->cur_transfer->len; - if (pl022->cur_transfer->cs_change) - pl022->cur_chip-> - cs_control(SSP_CHIP_DESELECT); - /* Move to next transfer */ - msg->state = next_transfer(pl022); - tasklet_schedule(&pl022->pump_transfers); - return IRQ_HANDLED; - } - - return IRQ_HANDLED; -} - -/** - * This sets up the pointers to memory for the next message to - * send out on the SPI bus. - */ -static int set_up_next_transfer(struct pl022 *pl022, - struct spi_transfer *transfer) -{ - int residue; - - /* Sanity check the message for this bus width */ - residue = pl022->cur_transfer->len % pl022->cur_chip->n_bytes; - if (unlikely(residue != 0)) { - dev_err(&pl022->adev->dev, - "message of %u bytes to transmit but the current " - "chip bus has a data width of %u bytes!\n", - pl022->cur_transfer->len, - pl022->cur_chip->n_bytes); - dev_err(&pl022->adev->dev, "skipping this message\n"); - return -EIO; - } - pl022->tx = (void *)transfer->tx_buf; - pl022->tx_end = pl022->tx + pl022->cur_transfer->len; - pl022->rx = (void *)transfer->rx_buf; - pl022->rx_end = pl022->rx + pl022->cur_transfer->len; - pl022->write = - pl022->tx ? pl022->cur_chip->write : WRITING_NULL; - pl022->read = pl022->rx ? pl022->cur_chip->read : READING_NULL; - return 0; -} - -/** - * pump_transfers - Tasklet function which schedules next transfer - * when running in interrupt or DMA transfer mode. - * @data: SSP driver private data structure - * - */ -static void pump_transfers(unsigned long data) -{ - struct pl022 *pl022 = (struct pl022 *) data; - struct spi_message *message = NULL; - struct spi_transfer *transfer = NULL; - struct spi_transfer *previous = NULL; - - /* Get current state information */ - message = pl022->cur_msg; - transfer = pl022->cur_transfer; - - /* Handle for abort */ - if (message->state == STATE_ERROR) { - message->status = -EIO; - giveback(pl022); - return; - } - - /* Handle end of message */ - if (message->state == STATE_DONE) { - message->status = 0; - giveback(pl022); - return; - } - - /* Delay if requested at end of transfer before CS change */ - if (message->state == STATE_RUNNING) { - previous = list_entry(transfer->transfer_list.prev, - struct spi_transfer, - transfer_list); - if (previous->delay_usecs) - /* - * FIXME: This runs in interrupt context. - * Is this really smart? - */ - udelay(previous->delay_usecs); - - /* Drop chip select only if cs_change is requested */ - if (previous->cs_change) - pl022->cur_chip->cs_control(SSP_CHIP_SELECT); - } else { - /* STATE_START */ - message->state = STATE_RUNNING; - } - - if (set_up_next_transfer(pl022, transfer)) { - message->state = STATE_ERROR; - message->status = -EIO; - giveback(pl022); - return; - } - /* Flush the FIFOs and let's go! */ - flush(pl022); - - if (pl022->cur_chip->enable_dma) { - if (configure_dma(pl022)) { - dev_dbg(&pl022->adev->dev, - "configuration of DMA failed, fall back to interrupt mode\n"); - goto err_config_dma; - } - return; - } - -err_config_dma: - writew(ENABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase)); -} - -static void do_interrupt_dma_transfer(struct pl022 *pl022) -{ - u32 irqflags = ENABLE_ALL_INTERRUPTS; - - /* Enable target chip */ - pl022->cur_chip->cs_control(SSP_CHIP_SELECT); - if (set_up_next_transfer(pl022, pl022->cur_transfer)) { - /* Error path */ - pl022->cur_msg->state = STATE_ERROR; - pl022->cur_msg->status = -EIO; - giveback(pl022); - return; - } - /* If we're using DMA, set up DMA here */ - if (pl022->cur_chip->enable_dma) { - /* Configure DMA transfer */ - if (configure_dma(pl022)) { - dev_dbg(&pl022->adev->dev, - "configuration of DMA failed, fall back to interrupt mode\n"); - goto err_config_dma; - } - /* Disable interrupts in DMA mode, IRQ from DMA controller */ - irqflags = DISABLE_ALL_INTERRUPTS; - } -err_config_dma: - /* Enable SSP, turn on interrupts */ - writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE), - SSP_CR1(pl022->virtbase)); - writew(irqflags, SSP_IMSC(pl022->virtbase)); -} - -static void do_polling_transfer(struct pl022 *pl022) -{ - struct spi_message *message = NULL; - struct spi_transfer *transfer = NULL; - struct spi_transfer *previous = NULL; - struct chip_data *chip; - unsigned long time, timeout; - - chip = pl022->cur_chip; - message = pl022->cur_msg; - - while (message->state != STATE_DONE) { - /* Handle for abort */ - if (message->state == STATE_ERROR) - break; - transfer = pl022->cur_transfer; - - /* Delay if requested at end of transfer */ - if (message->state == STATE_RUNNING) { - previous = - list_entry(transfer->transfer_list.prev, - struct spi_transfer, transfer_list); - if (previous->delay_usecs) - udelay(previous->delay_usecs); - if (previous->cs_change) - pl022->cur_chip->cs_control(SSP_CHIP_SELECT); - } else { - /* STATE_START */ - message->state = STATE_RUNNING; - pl022->cur_chip->cs_control(SSP_CHIP_SELECT); - } - - /* Configuration Changing Per Transfer */ - if (set_up_next_transfer(pl022, transfer)) { - /* Error path */ - message->state = STATE_ERROR; - break; - } - /* Flush FIFOs and enable SSP */ - flush(pl022); - writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE), - SSP_CR1(pl022->virtbase)); - - dev_dbg(&pl022->adev->dev, "polling transfer ongoing ...\n"); - - timeout = jiffies + msecs_to_jiffies(SPI_POLLING_TIMEOUT); - while (pl022->tx < pl022->tx_end || pl022->rx < pl022->rx_end) { - time = jiffies; - readwriter(pl022); - if (time_after(time, timeout)) { - dev_warn(&pl022->adev->dev, - "%s: timeout!\n", __func__); - message->state = STATE_ERROR; - goto out; - } - cpu_relax(); - } - - /* Update total byte transferred */ - message->actual_length += pl022->cur_transfer->len; - if (pl022->cur_transfer->cs_change) - pl022->cur_chip->cs_control(SSP_CHIP_DESELECT); - /* Move to next transfer */ - message->state = next_transfer(pl022); - } -out: - /* Handle end of message */ - if (message->state == STATE_DONE) - message->status = 0; - else - message->status = -EIO; - - giveback(pl022); - return; -} - -/** - * pump_messages - Workqueue function which processes spi message queue - * @data: pointer to private data of SSP driver - * - * This function checks if there is any spi message in the queue that - * needs processing and delegate control to appropriate function - * do_polling_transfer()/do_interrupt_dma_transfer() - * based on the kind of the transfer - * - */ -static void pump_messages(struct work_struct *work) -{ - struct pl022 *pl022 = - container_of(work, struct pl022, pump_messages); - unsigned long flags; - - /* Lock queue and check for queue work */ - spin_lock_irqsave(&pl022->queue_lock, flags); - if (list_empty(&pl022->queue) || !pl022->running) { - pl022->busy = false; - spin_unlock_irqrestore(&pl022->queue_lock, flags); - return; - } - /* Make sure we are not already running a message */ - if (pl022->cur_msg) { - spin_unlock_irqrestore(&pl022->queue_lock, flags); - return; - } - /* Extract head of queue */ - pl022->cur_msg = - list_entry(pl022->queue.next, struct spi_message, queue); - - list_del_init(&pl022->cur_msg->queue); - pl022->busy = true; - spin_unlock_irqrestore(&pl022->queue_lock, flags); - - /* Initial message state */ - pl022->cur_msg->state = STATE_START; - pl022->cur_transfer = list_entry(pl022->cur_msg->transfers.next, - struct spi_transfer, - transfer_list); - - /* Setup the SPI using the per chip configuration */ - pl022->cur_chip = spi_get_ctldata(pl022->cur_msg->spi); - /* - * We enable the core voltage and clocks here, then the clocks - * and core will be disabled when giveback() is called in each method - * (poll/interrupt/DMA) - */ - amba_vcore_enable(pl022->adev); - amba_pclk_enable(pl022->adev); - clk_enable(pl022->clk); - restore_state(pl022); - flush(pl022); - - if (pl022->cur_chip->xfer_type == POLLING_TRANSFER) - do_polling_transfer(pl022); - else - do_interrupt_dma_transfer(pl022); -} - - -static int __init init_queue(struct pl022 *pl022) -{ - INIT_LIST_HEAD(&pl022->queue); - spin_lock_init(&pl022->queue_lock); - - pl022->running = false; - pl022->busy = false; - - tasklet_init(&pl022->pump_transfers, - pump_transfers, (unsigned long)pl022); - - INIT_WORK(&pl022->pump_messages, pump_messages); - pl022->workqueue = create_singlethread_workqueue( - dev_name(pl022->master->dev.parent)); - if (pl022->workqueue == NULL) - return -EBUSY; - - return 0; -} - - -static int start_queue(struct pl022 *pl022) -{ - unsigned long flags; - - spin_lock_irqsave(&pl022->queue_lock, flags); - - if (pl022->running || pl022->busy) { - spin_unlock_irqrestore(&pl022->queue_lock, flags); - return -EBUSY; - } - - pl022->running = true; - pl022->cur_msg = NULL; - pl022->cur_transfer = NULL; - pl022->cur_chip = NULL; - spin_unlock_irqrestore(&pl022->queue_lock, flags); - - queue_work(pl022->workqueue, &pl022->pump_messages); - - return 0; -} - - -static int stop_queue(struct pl022 *pl022) -{ - unsigned long flags; - unsigned limit = 500; - int status = 0; - - spin_lock_irqsave(&pl022->queue_lock, flags); - - /* This is a bit lame, but is optimized for the common execution path. - * A wait_queue on the pl022->busy could be used, but then the common - * execution path (pump_messages) would be required to call wake_up or - * friends on every SPI message. Do this instead */ - while ((!list_empty(&pl022->queue) || pl022->busy) && limit--) { - spin_unlock_irqrestore(&pl022->queue_lock, flags); - msleep(10); - spin_lock_irqsave(&pl022->queue_lock, flags); - } - - if (!list_empty(&pl022->queue) || pl022->busy) - status = -EBUSY; - else - pl022->running = false; - - spin_unlock_irqrestore(&pl022->queue_lock, flags); - - return status; -} - -static int destroy_queue(struct pl022 *pl022) -{ - int status; - - status = stop_queue(pl022); - /* we are unloading the module or failing to load (only two calls - * to this routine), and neither call can handle a return value. - * However, destroy_workqueue calls flush_workqueue, and that will - * block until all work is done. If the reason that stop_queue - * timed out is that the work will never finish, then it does no - * good to call destroy_workqueue, so return anyway. */ - if (status != 0) - return status; - - destroy_workqueue(pl022->workqueue); - - return 0; -} - -static int verify_controller_parameters(struct pl022 *pl022, - struct pl022_config_chip const *chip_info) -{ - if ((chip_info->iface < SSP_INTERFACE_MOTOROLA_SPI) - || (chip_info->iface > SSP_INTERFACE_UNIDIRECTIONAL)) { - dev_err(&pl022->adev->dev, - "interface is configured incorrectly\n"); - return -EINVAL; - } - if ((chip_info->iface == SSP_INTERFACE_UNIDIRECTIONAL) && - (!pl022->vendor->unidir)) { - dev_err(&pl022->adev->dev, - "unidirectional mode not supported in this " - "hardware version\n"); - return -EINVAL; - } - if ((chip_info->hierarchy != SSP_MASTER) - && (chip_info->hierarchy != SSP_SLAVE)) { - dev_err(&pl022->adev->dev, - "hierarchy is configured incorrectly\n"); - return -EINVAL; - } - if ((chip_info->com_mode != INTERRUPT_TRANSFER) - && (chip_info->com_mode != DMA_TRANSFER) - && (chip_info->com_mode != POLLING_TRANSFER)) { - dev_err(&pl022->adev->dev, - "Communication mode is configured incorrectly\n"); - return -EINVAL; - } - if ((chip_info->rx_lev_trig < SSP_RX_1_OR_MORE_ELEM) - || (chip_info->rx_lev_trig > SSP_RX_32_OR_MORE_ELEM)) { - dev_err(&pl022->adev->dev, - "RX FIFO Trigger Level is configured incorrectly\n"); - return -EINVAL; - } - if ((chip_info->tx_lev_trig < SSP_TX_1_OR_MORE_EMPTY_LOC) - || (chip_info->tx_lev_trig > SSP_TX_32_OR_MORE_EMPTY_LOC)) { - dev_err(&pl022->adev->dev, - "TX FIFO Trigger Level is configured incorrectly\n"); - return -EINVAL; - } - if (chip_info->iface == SSP_INTERFACE_NATIONAL_MICROWIRE) { - if ((chip_info->ctrl_len < SSP_BITS_4) - || (chip_info->ctrl_len > SSP_BITS_32)) { - dev_err(&pl022->adev->dev, - "CTRL LEN is configured incorrectly\n"); - return -EINVAL; - } - if ((chip_info->wait_state != SSP_MWIRE_WAIT_ZERO) - && (chip_info->wait_state != SSP_MWIRE_WAIT_ONE)) { - dev_err(&pl022->adev->dev, - "Wait State is configured incorrectly\n"); - return -EINVAL; - } - /* Half duplex is only available in the ST Micro version */ - if (pl022->vendor->extended_cr) { - if ((chip_info->duplex != - SSP_MICROWIRE_CHANNEL_FULL_DUPLEX) - && (chip_info->duplex != - SSP_MICROWIRE_CHANNEL_HALF_DUPLEX)) { - dev_err(&pl022->adev->dev, - "Microwire duplex mode is configured incorrectly\n"); - return -EINVAL; - } - } else { - if (chip_info->duplex != SSP_MICROWIRE_CHANNEL_FULL_DUPLEX) - dev_err(&pl022->adev->dev, - "Microwire half duplex mode requested," - " but this is only available in the" - " ST version of PL022\n"); - return -EINVAL; - } - } - return 0; -} - -/** - * pl022_transfer - transfer function registered to SPI master framework - * @spi: spi device which is requesting transfer - * @msg: spi message which is to handled is queued to driver queue - * - * This function is registered to the SPI framework for this SPI master - * controller. It will queue the spi_message in the queue of driver if - * the queue is not stopped and return. - */ -static int pl022_transfer(struct spi_device *spi, struct spi_message *msg) -{ - struct pl022 *pl022 = spi_master_get_devdata(spi->master); - unsigned long flags; - - spin_lock_irqsave(&pl022->queue_lock, flags); - - if (!pl022->running) { - spin_unlock_irqrestore(&pl022->queue_lock, flags); - return -ESHUTDOWN; - } - msg->actual_length = 0; - msg->status = -EINPROGRESS; - msg->state = STATE_START; - - list_add_tail(&msg->queue, &pl022->queue); - if (pl022->running && !pl022->busy) - queue_work(pl022->workqueue, &pl022->pump_messages); - - spin_unlock_irqrestore(&pl022->queue_lock, flags); - return 0; -} - -static int calculate_effective_freq(struct pl022 *pl022, - int freq, - struct ssp_clock_params *clk_freq) -{ - /* Lets calculate the frequency parameters */ - u16 cpsdvsr = 2; - u16 scr = 0; - bool freq_found = false; - u32 rate; - u32 max_tclk; - u32 min_tclk; - - rate = clk_get_rate(pl022->clk); - /* cpsdvscr = 2 & scr 0 */ - max_tclk = (rate / (CPSDVR_MIN * (1 + SCR_MIN))); - /* cpsdvsr = 254 & scr = 255 */ - min_tclk = (rate / (CPSDVR_MAX * (1 + SCR_MAX))); - - if ((freq <= max_tclk) && (freq >= min_tclk)) { - while (cpsdvsr <= CPSDVR_MAX && !freq_found) { - while (scr <= SCR_MAX && !freq_found) { - if ((rate / - (cpsdvsr * (1 + scr))) > freq) - scr += 1; - else { - /* - * This bool is made true when - * effective frequency >= - * target frequency is found - */ - freq_found = true; - if ((rate / - (cpsdvsr * (1 + scr))) != freq) { - if (scr == SCR_MIN) { - cpsdvsr -= 2; - scr = SCR_MAX; - } else - scr -= 1; - } - } - } - if (!freq_found) { - cpsdvsr += 2; - scr = SCR_MIN; - } - } - if (cpsdvsr != 0) { - dev_dbg(&pl022->adev->dev, - "SSP Effective Frequency is %u\n", - (rate / (cpsdvsr * (1 + scr)))); - clk_freq->cpsdvsr = (u8) (cpsdvsr & 0xFF); - clk_freq->scr = (u8) (scr & 0xFF); - dev_dbg(&pl022->adev->dev, - "SSP cpsdvsr = %d, scr = %d\n", - clk_freq->cpsdvsr, clk_freq->scr); - } - } else { - dev_err(&pl022->adev->dev, - "controller data is incorrect: out of range frequency"); - return -EINVAL; - } - return 0; -} - - -/* - * A piece of default chip info unless the platform - * supplies it. - */ -static const struct pl022_config_chip pl022_default_chip_info = { - .com_mode = POLLING_TRANSFER, - .iface = SSP_INTERFACE_MOTOROLA_SPI, - .hierarchy = SSP_SLAVE, - .slave_tx_disable = DO_NOT_DRIVE_TX, - .rx_lev_trig = SSP_RX_1_OR_MORE_ELEM, - .tx_lev_trig = SSP_TX_1_OR_MORE_EMPTY_LOC, - .ctrl_len = SSP_BITS_8, - .wait_state = SSP_MWIRE_WAIT_ZERO, - .duplex = SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, - .cs_control = null_cs_control, -}; - - -/** - * pl022_setup - setup function registered to SPI master framework - * @spi: spi device which is requesting setup - * - * This function is registered to the SPI framework for this SPI master - * controller. If it is the first time when setup is called by this device, - * this function will initialize the runtime state for this chip and save - * the same in the device structure. Else it will update the runtime info - * with the updated chip info. Nothing is really being written to the - * controller hardware here, that is not done until the actual transfer - * commence. - */ -static int pl022_setup(struct spi_device *spi) -{ - struct pl022_config_chip const *chip_info; - struct chip_data *chip; - struct ssp_clock_params clk_freq = {0, }; - int status = 0; - struct pl022 *pl022 = spi_master_get_devdata(spi->master); - unsigned int bits = spi->bits_per_word; - u32 tmp; - - if (!spi->max_speed_hz) - return -EINVAL; - - /* Get controller_state if one is supplied */ - chip = spi_get_ctldata(spi); - - if (chip == NULL) { - chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); - if (!chip) { - dev_err(&spi->dev, - "cannot allocate controller state\n"); - return -ENOMEM; - } - dev_dbg(&spi->dev, - "allocated memory for controller's runtime state\n"); - } - - /* Get controller data if one is supplied */ - chip_info = spi->controller_data; - - if (chip_info == NULL) { - chip_info = &pl022_default_chip_info; - /* spi_board_info.controller_data not is supplied */ - dev_dbg(&spi->dev, - "using default controller_data settings\n"); - } else - dev_dbg(&spi->dev, - "using user supplied controller_data settings\n"); - - /* - * We can override with custom divisors, else we use the board - * frequency setting - */ - if ((0 == chip_info->clk_freq.cpsdvsr) - && (0 == chip_info->clk_freq.scr)) { - status = calculate_effective_freq(pl022, - spi->max_speed_hz, - &clk_freq); - if (status < 0) - goto err_config_params; - } else { - memcpy(&clk_freq, &chip_info->clk_freq, sizeof(clk_freq)); - if ((clk_freq.cpsdvsr % 2) != 0) - clk_freq.cpsdvsr = - clk_freq.cpsdvsr - 1; - } - if ((clk_freq.cpsdvsr < CPSDVR_MIN) - || (clk_freq.cpsdvsr > CPSDVR_MAX)) { - dev_err(&spi->dev, - "cpsdvsr is configured incorrectly\n"); - goto err_config_params; - } - - - status = verify_controller_parameters(pl022, chip_info); - if (status) { - dev_err(&spi->dev, "controller data is incorrect"); - goto err_config_params; - } - - /* Now set controller state based on controller data */ - chip->xfer_type = chip_info->com_mode; - if (!chip_info->cs_control) { - chip->cs_control = null_cs_control; - dev_warn(&spi->dev, - "chip select function is NULL for this chip\n"); - } else - chip->cs_control = chip_info->cs_control; - - if (bits <= 3) { - /* PL022 doesn't support less than 4-bits */ - status = -ENOTSUPP; - goto err_config_params; - } else if (bits <= 8) { - dev_dbg(&spi->dev, "4 <= n <=8 bits per word\n"); - chip->n_bytes = 1; - chip->read = READING_U8; - chip->write = WRITING_U8; - } else if (bits <= 16) { - dev_dbg(&spi->dev, "9 <= n <= 16 bits per word\n"); - chip->n_bytes = 2; - chip->read = READING_U16; - chip->write = WRITING_U16; - } else { - if (pl022->vendor->max_bpw >= 32) { - dev_dbg(&spi->dev, "17 <= n <= 32 bits per word\n"); - chip->n_bytes = 4; - chip->read = READING_U32; - chip->write = WRITING_U32; - } else { - dev_err(&spi->dev, - "illegal data size for this controller!\n"); - dev_err(&spi->dev, - "a standard pl022 can only handle " - "1 <= n <= 16 bit words\n"); - status = -ENOTSUPP; - goto err_config_params; - } - } - - /* Now Initialize all register settings required for this chip */ - chip->cr0 = 0; - chip->cr1 = 0; - chip->dmacr = 0; - chip->cpsr = 0; - if ((chip_info->com_mode == DMA_TRANSFER) - && ((pl022->master_info)->enable_dma)) { - chip->enable_dma = true; - dev_dbg(&spi->dev, "DMA mode set in controller state\n"); - SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED, - SSP_DMACR_MASK_RXDMAE, 0); - SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED, - SSP_DMACR_MASK_TXDMAE, 1); - } else { - chip->enable_dma = false; - dev_dbg(&spi->dev, "DMA mode NOT set in controller state\n"); - SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED, - SSP_DMACR_MASK_RXDMAE, 0); - SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED, - SSP_DMACR_MASK_TXDMAE, 1); - } - - chip->cpsr = clk_freq.cpsdvsr; - - /* Special setup for the ST micro extended control registers */ - if (pl022->vendor->extended_cr) { - u32 etx; - - if (pl022->vendor->pl023) { - /* These bits are only in the PL023 */ - SSP_WRITE_BITS(chip->cr1, chip_info->clkdelay, - SSP_CR1_MASK_FBCLKDEL_ST, 13); - } else { - /* These bits are in the PL022 but not PL023 */ - SSP_WRITE_BITS(chip->cr0, chip_info->duplex, - SSP_CR0_MASK_HALFDUP_ST, 5); - SSP_WRITE_BITS(chip->cr0, chip_info->ctrl_len, - SSP_CR0_MASK_CSS_ST, 16); - SSP_WRITE_BITS(chip->cr0, chip_info->iface, - SSP_CR0_MASK_FRF_ST, 21); - SSP_WRITE_BITS(chip->cr1, chip_info->wait_state, - SSP_CR1_MASK_MWAIT_ST, 6); - } - SSP_WRITE_BITS(chip->cr0, bits - 1, - SSP_CR0_MASK_DSS_ST, 0); - - if (spi->mode & SPI_LSB_FIRST) { - tmp = SSP_RX_LSB; - etx = SSP_TX_LSB; - } else { - tmp = SSP_RX_MSB; - etx = SSP_TX_MSB; - } - SSP_WRITE_BITS(chip->cr1, tmp, SSP_CR1_MASK_RENDN_ST, 4); - SSP_WRITE_BITS(chip->cr1, etx, SSP_CR1_MASK_TENDN_ST, 5); - SSP_WRITE_BITS(chip->cr1, chip_info->rx_lev_trig, - SSP_CR1_MASK_RXIFLSEL_ST, 7); - SSP_WRITE_BITS(chip->cr1, chip_info->tx_lev_trig, - SSP_CR1_MASK_TXIFLSEL_ST, 10); - } else { - SSP_WRITE_BITS(chip->cr0, bits - 1, - SSP_CR0_MASK_DSS, 0); - SSP_WRITE_BITS(chip->cr0, chip_info->iface, - SSP_CR0_MASK_FRF, 4); - } - - /* Stuff that is common for all versions */ - if (spi->mode & SPI_CPOL) - tmp = SSP_CLK_POL_IDLE_HIGH; - else - tmp = SSP_CLK_POL_IDLE_LOW; - SSP_WRITE_BITS(chip->cr0, tmp, SSP_CR0_MASK_SPO, 6); - - if (spi->mode & SPI_CPHA) - tmp = SSP_CLK_SECOND_EDGE; - else - tmp = SSP_CLK_FIRST_EDGE; - SSP_WRITE_BITS(chip->cr0, tmp, SSP_CR0_MASK_SPH, 7); - - SSP_WRITE_BITS(chip->cr0, clk_freq.scr, SSP_CR0_MASK_SCR, 8); - /* Loopback is available on all versions except PL023 */ - if (pl022->vendor->loopback) { - if (spi->mode & SPI_LOOP) - tmp = LOOPBACK_ENABLED; - else - tmp = LOOPBACK_DISABLED; - SSP_WRITE_BITS(chip->cr1, tmp, SSP_CR1_MASK_LBM, 0); - } - SSP_WRITE_BITS(chip->cr1, SSP_DISABLED, SSP_CR1_MASK_SSE, 1); - SSP_WRITE_BITS(chip->cr1, chip_info->hierarchy, SSP_CR1_MASK_MS, 2); - SSP_WRITE_BITS(chip->cr1, chip_info->slave_tx_disable, SSP_CR1_MASK_SOD, 3); - - /* Save controller_state */ - spi_set_ctldata(spi, chip); - return status; - err_config_params: - spi_set_ctldata(spi, NULL); - kfree(chip); - return status; -} - -/** - * pl022_cleanup - cleanup function registered to SPI master framework - * @spi: spi device which is requesting cleanup - * - * This function is registered to the SPI framework for this SPI master - * controller. It will free the runtime state of chip. - */ -static void pl022_cleanup(struct spi_device *spi) -{ - struct chip_data *chip = spi_get_ctldata(spi); - - spi_set_ctldata(spi, NULL); - kfree(chip); -} - - -static int __devinit -pl022_probe(struct amba_device *adev, const struct amba_id *id) -{ - struct device *dev = &adev->dev; - struct pl022_ssp_controller *platform_info = adev->dev.platform_data; - struct spi_master *master; - struct pl022 *pl022 = NULL; /*Data for this driver */ - int status = 0; - - dev_info(&adev->dev, - "ARM PL022 driver, device ID: 0x%08x\n", adev->periphid); - if (platform_info == NULL) { - dev_err(&adev->dev, "probe - no platform data supplied\n"); - status = -ENODEV; - goto err_no_pdata; - } - - /* Allocate master with space for data */ - master = spi_alloc_master(dev, sizeof(struct pl022)); - if (master == NULL) { - dev_err(&adev->dev, "probe - cannot alloc SPI master\n"); - status = -ENOMEM; - goto err_no_master; - } - - pl022 = spi_master_get_devdata(master); - pl022->master = master; - pl022->master_info = platform_info; - pl022->adev = adev; - pl022->vendor = id->data; - - /* - * Bus Number Which has been Assigned to this SSP controller - * on this board - */ - master->bus_num = platform_info->bus_id; - master->num_chipselect = platform_info->num_chipselect; - master->cleanup = pl022_cleanup; - master->setup = pl022_setup; - master->transfer = pl022_transfer; - - /* - * Supports mode 0-3, loopback, and active low CS. Transfers are - * always MS bit first on the original pl022. - */ - master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP; - if (pl022->vendor->extended_cr) - master->mode_bits |= SPI_LSB_FIRST; - - dev_dbg(&adev->dev, "BUSNO: %d\n", master->bus_num); - - status = amba_request_regions(adev, NULL); - if (status) - goto err_no_ioregion; - - pl022->phybase = adev->res.start; - pl022->virtbase = ioremap(adev->res.start, resource_size(&adev->res)); - if (pl022->virtbase == NULL) { - status = -ENOMEM; - goto err_no_ioremap; - } - printk(KERN_INFO "pl022: mapped registers from 0x%08x to %p\n", - adev->res.start, pl022->virtbase); - - pl022->clk = clk_get(&adev->dev, NULL); - if (IS_ERR(pl022->clk)) { - status = PTR_ERR(pl022->clk); - dev_err(&adev->dev, "could not retrieve SSP/SPI bus clock\n"); - goto err_no_clk; - } - - /* Disable SSP */ - writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)), - SSP_CR1(pl022->virtbase)); - load_ssp_default_config(pl022); - - status = request_irq(adev->irq[0], pl022_interrupt_handler, 0, "pl022", - pl022); - if (status < 0) { - dev_err(&adev->dev, "probe - cannot get IRQ (%d)\n", status); - goto err_no_irq; - } - - /* Get DMA channels */ - if (platform_info->enable_dma) { - status = pl022_dma_probe(pl022); - if (status != 0) - platform_info->enable_dma = 0; - } - - /* Initialize and start queue */ - status = init_queue(pl022); - if (status != 0) { - dev_err(&adev->dev, "probe - problem initializing queue\n"); - goto err_init_queue; - } - status = start_queue(pl022); - if (status != 0) { - dev_err(&adev->dev, "probe - problem starting queue\n"); - goto err_start_queue; - } - /* Register with the SPI framework */ - amba_set_drvdata(adev, pl022); - status = spi_register_master(master); - if (status != 0) { - dev_err(&adev->dev, - "probe - problem registering spi master\n"); - goto err_spi_register; - } - dev_dbg(dev, "probe succeeded\n"); - /* - * Disable the silicon block pclk and any voltage domain and just - * power it up and clock it when it's needed - */ - amba_pclk_disable(adev); - amba_vcore_disable(adev); - return 0; - - err_spi_register: - err_start_queue: - err_init_queue: - destroy_queue(pl022); - pl022_dma_remove(pl022); - free_irq(adev->irq[0], pl022); - err_no_irq: - clk_put(pl022->clk); - err_no_clk: - iounmap(pl022->virtbase); - err_no_ioremap: - amba_release_regions(adev); - err_no_ioregion: - spi_master_put(master); - err_no_master: - err_no_pdata: - return status; -} - -static int __devexit -pl022_remove(struct amba_device *adev) -{ - struct pl022 *pl022 = amba_get_drvdata(adev); - int status = 0; - if (!pl022) - return 0; - - /* Remove the queue */ - status = destroy_queue(pl022); - if (status != 0) { - dev_err(&adev->dev, - "queue remove failed (%d)\n", status); - return status; - } - load_ssp_default_config(pl022); - pl022_dma_remove(pl022); - free_irq(adev->irq[0], pl022); - clk_disable(pl022->clk); - clk_put(pl022->clk); - iounmap(pl022->virtbase); - amba_release_regions(adev); - tasklet_disable(&pl022->pump_transfers); - spi_unregister_master(pl022->master); - spi_master_put(pl022->master); - amba_set_drvdata(adev, NULL); - dev_dbg(&adev->dev, "remove succeeded\n"); - return 0; -} - -#ifdef CONFIG_PM -static int pl022_suspend(struct amba_device *adev, pm_message_t state) -{ - struct pl022 *pl022 = amba_get_drvdata(adev); - int status = 0; - - status = stop_queue(pl022); - if (status) { - dev_warn(&adev->dev, "suspend cannot stop queue\n"); - return status; - } - - amba_vcore_enable(adev); - amba_pclk_enable(adev); - load_ssp_default_config(pl022); - amba_pclk_disable(adev); - amba_vcore_disable(adev); - dev_dbg(&adev->dev, "suspended\n"); - return 0; -} - -static int pl022_resume(struct amba_device *adev) -{ - struct pl022 *pl022 = amba_get_drvdata(adev); - int status = 0; - - /* Start the queue running */ - status = start_queue(pl022); - if (status) - dev_err(&adev->dev, "problem starting queue (%d)\n", status); - else - dev_dbg(&adev->dev, "resumed\n"); - - return status; -} -#else -#define pl022_suspend NULL -#define pl022_resume NULL -#endif /* CONFIG_PM */ - -static struct vendor_data vendor_arm = { - .fifodepth = 8, - .max_bpw = 16, - .unidir = false, - .extended_cr = false, - .pl023 = false, - .loopback = true, -}; - - -static struct vendor_data vendor_st = { - .fifodepth = 32, - .max_bpw = 32, - .unidir = false, - .extended_cr = true, - .pl023 = false, - .loopback = true, -}; - -static struct vendor_data vendor_st_pl023 = { - .fifodepth = 32, - .max_bpw = 32, - .unidir = false, - .extended_cr = true, - .pl023 = true, - .loopback = false, -}; - -static struct vendor_data vendor_db5500_pl023 = { - .fifodepth = 32, - .max_bpw = 32, - .unidir = false, - .extended_cr = true, - .pl023 = true, - .loopback = true, -}; - -static struct amba_id pl022_ids[] = { - { - /* - * ARM PL022 variant, this has a 16bit wide - * and 8 locations deep TX/RX FIFO - */ - .id = 0x00041022, - .mask = 0x000fffff, - .data = &vendor_arm, - }, - { - /* - * ST Micro derivative, this has 32bit wide - * and 32 locations deep TX/RX FIFO - */ - .id = 0x01080022, - .mask = 0xffffffff, - .data = &vendor_st, - }, - { - /* - * ST-Ericsson derivative "PL023" (this is not - * an official ARM number), this is a PL022 SSP block - * stripped to SPI mode only, it has 32bit wide - * and 32 locations deep TX/RX FIFO but no extended - * CR0/CR1 register - */ - .id = 0x00080023, - .mask = 0xffffffff, - .data = &vendor_st_pl023, - }, - { - .id = 0x10080023, - .mask = 0xffffffff, - .data = &vendor_db5500_pl023, - }, - { 0, 0 }, -}; - -static struct amba_driver pl022_driver = { - .drv = { - .name = "ssp-pl022", - }, - .id_table = pl022_ids, - .probe = pl022_probe, - .remove = __devexit_p(pl022_remove), - .suspend = pl022_suspend, - .resume = pl022_resume, -}; - - -static int __init pl022_init(void) -{ - return amba_driver_register(&pl022_driver); -} - -subsys_initcall(pl022_init); - -static void __exit pl022_exit(void) -{ - amba_driver_unregister(&pl022_driver); -} - -module_exit(pl022_exit); - -MODULE_AUTHOR("Linus Walleij "); -MODULE_DESCRIPTION("PL022 SSP Controller Driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/spi/ath79_spi.c b/drivers/spi/ath79_spi.c deleted file mode 100644 index fcff810..0000000 --- a/drivers/spi/ath79_spi.c +++ /dev/null @@ -1,292 +0,0 @@ -/* - * SPI controller driver for the Atheros AR71XX/AR724X/AR913X SoCs - * - * Copyright (C) 2009-2011 Gabor Juhos - * - * This driver has been based on the spi-gpio.c: - * Copyright (C) 2006,2008 David Brownell - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#define DRV_NAME "ath79-spi" - -struct ath79_spi { - struct spi_bitbang bitbang; - u32 ioc_base; - u32 reg_ctrl; - void __iomem *base; -}; - -static inline u32 ath79_spi_rr(struct ath79_spi *sp, unsigned reg) -{ - return ioread32(sp->base + reg); -} - -static inline void ath79_spi_wr(struct ath79_spi *sp, unsigned reg, u32 val) -{ - iowrite32(val, sp->base + reg); -} - -static inline struct ath79_spi *ath79_spidev_to_sp(struct spi_device *spi) -{ - return spi_master_get_devdata(spi->master); -} - -static void ath79_spi_chipselect(struct spi_device *spi, int is_active) -{ - struct ath79_spi *sp = ath79_spidev_to_sp(spi); - int cs_high = (spi->mode & SPI_CS_HIGH) ? is_active : !is_active; - - if (is_active) { - /* set initial clock polarity */ - if (spi->mode & SPI_CPOL) - sp->ioc_base |= AR71XX_SPI_IOC_CLK; - else - sp->ioc_base &= ~AR71XX_SPI_IOC_CLK; - - ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base); - } - - if (spi->chip_select) { - struct ath79_spi_controller_data *cdata = spi->controller_data; - - /* SPI is normally active-low */ - gpio_set_value(cdata->gpio, cs_high); - } else { - if (cs_high) - sp->ioc_base |= AR71XX_SPI_IOC_CS0; - else - sp->ioc_base &= ~AR71XX_SPI_IOC_CS0; - - ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base); - } - -} - -static int ath79_spi_setup_cs(struct spi_device *spi) -{ - struct ath79_spi *sp = ath79_spidev_to_sp(spi); - struct ath79_spi_controller_data *cdata; - - cdata = spi->controller_data; - if (spi->chip_select && !cdata) - return -EINVAL; - - /* enable GPIO mode */ - ath79_spi_wr(sp, AR71XX_SPI_REG_FS, AR71XX_SPI_FS_GPIO); - - /* save CTRL register */ - sp->reg_ctrl = ath79_spi_rr(sp, AR71XX_SPI_REG_CTRL); - sp->ioc_base = ath79_spi_rr(sp, AR71XX_SPI_REG_IOC); - - /* TODO: setup speed? */ - ath79_spi_wr(sp, AR71XX_SPI_REG_CTRL, 0x43); - - if (spi->chip_select) { - int status = 0; - - status = gpio_request(cdata->gpio, dev_name(&spi->dev)); - if (status) - return status; - - status = gpio_direction_output(cdata->gpio, - spi->mode & SPI_CS_HIGH); - if (status) { - gpio_free(cdata->gpio); - return status; - } - } else { - if (spi->mode & SPI_CS_HIGH) - sp->ioc_base |= AR71XX_SPI_IOC_CS0; - else - sp->ioc_base &= ~AR71XX_SPI_IOC_CS0; - ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base); - } - - return 0; -} - -static void ath79_spi_cleanup_cs(struct spi_device *spi) -{ - struct ath79_spi *sp = ath79_spidev_to_sp(spi); - - if (spi->chip_select) { - struct ath79_spi_controller_data *cdata = spi->controller_data; - gpio_free(cdata->gpio); - } - - /* restore CTRL register */ - ath79_spi_wr(sp, AR71XX_SPI_REG_CTRL, sp->reg_ctrl); - /* disable GPIO mode */ - ath79_spi_wr(sp, AR71XX_SPI_REG_FS, 0); -} - -static int ath79_spi_setup(struct spi_device *spi) -{ - int status = 0; - - if (spi->bits_per_word > 32) - return -EINVAL; - - if (!spi->controller_state) { - status = ath79_spi_setup_cs(spi); - if (status) - return status; - } - - status = spi_bitbang_setup(spi); - if (status && !spi->controller_state) - ath79_spi_cleanup_cs(spi); - - return status; -} - -static void ath79_spi_cleanup(struct spi_device *spi) -{ - ath79_spi_cleanup_cs(spi); - spi_bitbang_cleanup(spi); -} - -static u32 ath79_spi_txrx_mode0(struct spi_device *spi, unsigned nsecs, - u32 word, u8 bits) -{ - struct ath79_spi *sp = ath79_spidev_to_sp(spi); - u32 ioc = sp->ioc_base; - - /* clock starts at inactive polarity */ - for (word <<= (32 - bits); likely(bits); bits--) { - u32 out; - - if (word & (1 << 31)) - out = ioc | AR71XX_SPI_IOC_DO; - else - out = ioc & ~AR71XX_SPI_IOC_DO; - - /* setup MSB (to slave) on trailing edge */ - ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, out); - ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, out | AR71XX_SPI_IOC_CLK); - - word <<= 1; - } - - return ath79_spi_rr(sp, AR71XX_SPI_REG_RDS); -} - -static __devinit int ath79_spi_probe(struct platform_device *pdev) -{ - struct spi_master *master; - struct ath79_spi *sp; - struct ath79_spi_platform_data *pdata; - struct resource *r; - int ret; - - master = spi_alloc_master(&pdev->dev, sizeof(*sp)); - if (master == NULL) { - dev_err(&pdev->dev, "failed to allocate spi master\n"); - return -ENOMEM; - } - - sp = spi_master_get_devdata(master); - platform_set_drvdata(pdev, sp); - - pdata = pdev->dev.platform_data; - - master->setup = ath79_spi_setup; - master->cleanup = ath79_spi_cleanup; - if (pdata) { - master->bus_num = pdata->bus_num; - master->num_chipselect = pdata->num_chipselect; - } else { - master->bus_num = -1; - master->num_chipselect = 1; - } - - sp->bitbang.master = spi_master_get(master); - sp->bitbang.chipselect = ath79_spi_chipselect; - sp->bitbang.txrx_word[SPI_MODE_0] = ath79_spi_txrx_mode0; - sp->bitbang.setup_transfer = spi_bitbang_setup_transfer; - sp->bitbang.flags = SPI_CS_HIGH; - - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (r == NULL) { - ret = -ENOENT; - goto err_put_master; - } - - sp->base = ioremap(r->start, r->end - r->start + 1); - if (!sp->base) { - ret = -ENXIO; - goto err_put_master; - } - - ret = spi_bitbang_start(&sp->bitbang); - if (ret) - goto err_unmap; - - return 0; - -err_unmap: - iounmap(sp->base); -err_put_master: - platform_set_drvdata(pdev, NULL); - spi_master_put(sp->bitbang.master); - - return ret; -} - -static __devexit int ath79_spi_remove(struct platform_device *pdev) -{ - struct ath79_spi *sp = platform_get_drvdata(pdev); - - spi_bitbang_stop(&sp->bitbang); - iounmap(sp->base); - platform_set_drvdata(pdev, NULL); - spi_master_put(sp->bitbang.master); - - return 0; -} - -static struct platform_driver ath79_spi_driver = { - .probe = ath79_spi_probe, - .remove = __devexit_p(ath79_spi_remove), - .driver = { - .name = DRV_NAME, - .owner = THIS_MODULE, - }, -}; - -static __init int ath79_spi_init(void) -{ - return platform_driver_register(&ath79_spi_driver); -} -module_init(ath79_spi_init); - -static __exit void ath79_spi_exit(void) -{ - platform_driver_unregister(&ath79_spi_driver); -} -module_exit(ath79_spi_exit); - -MODULE_DESCRIPTION("SPI controller driver for Atheros AR71XX/AR724X/AR913X"); -MODULE_AUTHOR("Gabor Juhos "); -MODULE_LICENSE("GPL v2"); -MODULE_ALIAS("platform:" DRV_NAME); diff --git a/drivers/spi/atmel_spi.c b/drivers/spi/atmel_spi.c deleted file mode 100644 index 08711e9..0000000 --- a/drivers/spi/atmel_spi.c +++ /dev/null @@ -1,940 +0,0 @@ -/* - * Driver for Atmel AT32 and AT91 SPI Controllers - * - * Copyright (C) 2006 Atmel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#include "atmel_spi.h" - -/* - * The core SPI transfer engine just talks to a register bank to set up - * DMA transfers; transfer queue progress is driven by IRQs. The clock - * framework provides the base clock, subdivided for each spi_device. - */ -struct atmel_spi { - spinlock_t lock; - - void __iomem *regs; - int irq; - struct clk *clk; - struct platform_device *pdev; - struct spi_device *stay; - - u8 stopping; - struct list_head queue; - struct spi_transfer *current_transfer; - unsigned long current_remaining_bytes; - struct spi_transfer *next_transfer; - unsigned long next_remaining_bytes; - - void *buffer; - dma_addr_t buffer_dma; -}; - -/* Controller-specific per-slave state */ -struct atmel_spi_device { - unsigned int npcs_pin; - u32 csr; -}; - -#define BUFFER_SIZE PAGE_SIZE -#define INVALID_DMA_ADDRESS 0xffffffff - -/* - * Version 2 of the SPI controller has - * - CR.LASTXFER - * - SPI_MR.DIV32 may become FDIV or must-be-zero (here: always zero) - * - SPI_SR.TXEMPTY, SPI_SR.NSSR (and corresponding irqs) - * - SPI_CSRx.CSAAT - * - SPI_CSRx.SBCR allows faster clocking - * - * We can determine the controller version by reading the VERSION - * register, but I haven't checked that it exists on all chips, and - * this is cheaper anyway. - */ -static bool atmel_spi_is_v2(void) -{ - return !cpu_is_at91rm9200(); -} - -/* - * Earlier SPI controllers (e.g. on at91rm9200) have a design bug whereby - * they assume that spi slave device state will not change on deselect, so - * that automagic deselection is OK. ("NPCSx rises if no data is to be - * transmitted") Not so! Workaround uses nCSx pins as GPIOs; or newer - * controllers have CSAAT and friends. - * - * Since the CSAAT functionality is a bit weird on newer controllers as - * well, we use GPIO to control nCSx pins on all controllers, updating - * MR.PCS to avoid confusing the controller. Using GPIOs also lets us - * support active-high chipselects despite the controller's belief that - * only active-low devices/systems exists. - * - * However, at91rm9200 has a second erratum whereby nCS0 doesn't work - * right when driven with GPIO. ("Mode Fault does not allow more than one - * Master on Chip Select 0.") No workaround exists for that ... so for - * nCS0 on that chip, we (a) don't use the GPIO, (b) can't support CS_HIGH, - * and (c) will trigger that first erratum in some cases. - * - * TODO: Test if the atmel_spi_is_v2() branch below works on - * AT91RM9200 if we use some other register than CSR0. However, don't - * do this unconditionally since AP7000 has an errata where the BITS - * field in CSR0 overrides all other CSRs. - */ - -static void cs_activate(struct atmel_spi *as, struct spi_device *spi) -{ - struct atmel_spi_device *asd = spi->controller_state; - unsigned active = spi->mode & SPI_CS_HIGH; - u32 mr; - - if (atmel_spi_is_v2()) { - /* - * Always use CSR0. This ensures that the clock - * switches to the correct idle polarity before we - * toggle the CS. - */ - spi_writel(as, CSR0, asd->csr); - spi_writel(as, MR, SPI_BF(PCS, 0x0e) | SPI_BIT(MODFDIS) - | SPI_BIT(MSTR)); - mr = spi_readl(as, MR); - gpio_set_value(asd->npcs_pin, active); - } else { - u32 cpol = (spi->mode & SPI_CPOL) ? SPI_BIT(CPOL) : 0; - int i; - u32 csr; - - /* Make sure clock polarity is correct */ - for (i = 0; i < spi->master->num_chipselect; i++) { - csr = spi_readl(as, CSR0 + 4 * i); - if ((csr ^ cpol) & SPI_BIT(CPOL)) - spi_writel(as, CSR0 + 4 * i, - csr ^ SPI_BIT(CPOL)); - } - - mr = spi_readl(as, MR); - mr = SPI_BFINS(PCS, ~(1 << spi->chip_select), mr); - if (spi->chip_select != 0) - gpio_set_value(asd->npcs_pin, active); - spi_writel(as, MR, mr); - } - - dev_dbg(&spi->dev, "activate %u%s, mr %08x\n", - asd->npcs_pin, active ? " (high)" : "", - mr); -} - -static void cs_deactivate(struct atmel_spi *as, struct spi_device *spi) -{ - struct atmel_spi_device *asd = spi->controller_state; - unsigned active = spi->mode & SPI_CS_HIGH; - u32 mr; - - /* only deactivate *this* device; sometimes transfers to - * another device may be active when this routine is called. - */ - mr = spi_readl(as, MR); - if (~SPI_BFEXT(PCS, mr) & (1 << spi->chip_select)) { - mr = SPI_BFINS(PCS, 0xf, mr); - spi_writel(as, MR, mr); - } - - dev_dbg(&spi->dev, "DEactivate %u%s, mr %08x\n", - asd->npcs_pin, active ? " (low)" : "", - mr); - - if (atmel_spi_is_v2() || spi->chip_select != 0) - gpio_set_value(asd->npcs_pin, !active); -} - -static inline int atmel_spi_xfer_is_last(struct spi_message *msg, - struct spi_transfer *xfer) -{ - return msg->transfers.prev == &xfer->transfer_list; -} - -static inline int atmel_spi_xfer_can_be_chained(struct spi_transfer *xfer) -{ - return xfer->delay_usecs == 0 && !xfer->cs_change; -} - -static void atmel_spi_next_xfer_data(struct spi_master *master, - struct spi_transfer *xfer, - dma_addr_t *tx_dma, - dma_addr_t *rx_dma, - u32 *plen) -{ - struct atmel_spi *as = spi_master_get_devdata(master); - u32 len = *plen; - - /* use scratch buffer only when rx or tx data is unspecified */ - if (xfer->rx_buf) - *rx_dma = xfer->rx_dma + xfer->len - *plen; - else { - *rx_dma = as->buffer_dma; - if (len > BUFFER_SIZE) - len = BUFFER_SIZE; - } - if (xfer->tx_buf) - *tx_dma = xfer->tx_dma + xfer->len - *plen; - else { - *tx_dma = as->buffer_dma; - if (len > BUFFER_SIZE) - len = BUFFER_SIZE; - memset(as->buffer, 0, len); - dma_sync_single_for_device(&as->pdev->dev, - as->buffer_dma, len, DMA_TO_DEVICE); - } - - *plen = len; -} - -/* - * Submit next transfer for DMA. - * lock is held, spi irq is blocked - */ -static void atmel_spi_next_xfer(struct spi_master *master, - struct spi_message *msg) -{ - struct atmel_spi *as = spi_master_get_devdata(master); - struct spi_transfer *xfer; - u32 len, remaining; - u32 ieval; - dma_addr_t tx_dma, rx_dma; - - if (!as->current_transfer) - xfer = list_entry(msg->transfers.next, - struct spi_transfer, transfer_list); - else if (!as->next_transfer) - xfer = list_entry(as->current_transfer->transfer_list.next, - struct spi_transfer, transfer_list); - else - xfer = NULL; - - if (xfer) { - spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS)); - - len = xfer->len; - atmel_spi_next_xfer_data(master, xfer, &tx_dma, &rx_dma, &len); - remaining = xfer->len - len; - - spi_writel(as, RPR, rx_dma); - spi_writel(as, TPR, tx_dma); - - if (msg->spi->bits_per_word > 8) - len >>= 1; - spi_writel(as, RCR, len); - spi_writel(as, TCR, len); - - dev_dbg(&msg->spi->dev, - " start xfer %p: len %u tx %p/%08x rx %p/%08x\n", - xfer, xfer->len, xfer->tx_buf, xfer->tx_dma, - xfer->rx_buf, xfer->rx_dma); - } else { - xfer = as->next_transfer; - remaining = as->next_remaining_bytes; - } - - as->current_transfer = xfer; - as->current_remaining_bytes = remaining; - - if (remaining > 0) - len = remaining; - else if (!atmel_spi_xfer_is_last(msg, xfer) - && atmel_spi_xfer_can_be_chained(xfer)) { - xfer = list_entry(xfer->transfer_list.next, - struct spi_transfer, transfer_list); - len = xfer->len; - } else - xfer = NULL; - - as->next_transfer = xfer; - - if (xfer) { - u32 total; - - total = len; - atmel_spi_next_xfer_data(master, xfer, &tx_dma, &rx_dma, &len); - as->next_remaining_bytes = total - len; - - spi_writel(as, RNPR, rx_dma); - spi_writel(as, TNPR, tx_dma); - - if (msg->spi->bits_per_word > 8) - len >>= 1; - spi_writel(as, RNCR, len); - spi_writel(as, TNCR, len); - - dev_dbg(&msg->spi->dev, - " next xfer %p: len %u tx %p/%08x rx %p/%08x\n", - xfer, xfer->len, xfer->tx_buf, xfer->tx_dma, - xfer->rx_buf, xfer->rx_dma); - ieval = SPI_BIT(ENDRX) | SPI_BIT(OVRES); - } else { - spi_writel(as, RNCR, 0); - spi_writel(as, TNCR, 0); - ieval = SPI_BIT(RXBUFF) | SPI_BIT(ENDRX) | SPI_BIT(OVRES); - } - - /* REVISIT: We're waiting for ENDRX before we start the next - * transfer because we need to handle some difficult timing - * issues otherwise. If we wait for ENDTX in one transfer and - * then starts waiting for ENDRX in the next, it's difficult - * to tell the difference between the ENDRX interrupt we're - * actually waiting for and the ENDRX interrupt of the - * previous transfer. - * - * It should be doable, though. Just not now... - */ - spi_writel(as, IER, ieval); - spi_writel(as, PTCR, SPI_BIT(TXTEN) | SPI_BIT(RXTEN)); -} - -static void atmel_spi_next_message(struct spi_master *master) -{ - struct atmel_spi *as = spi_master_get_devdata(master); - struct spi_message *msg; - struct spi_device *spi; - - BUG_ON(as->current_transfer); - - msg = list_entry(as->queue.next, struct spi_message, queue); - spi = msg->spi; - - dev_dbg(master->dev.parent, "start message %p for %s\n", - msg, dev_name(&spi->dev)); - - /* select chip if it's not still active */ - if (as->stay) { - if (as->stay != spi) { - cs_deactivate(as, as->stay); - cs_activate(as, spi); - } - as->stay = NULL; - } else - cs_activate(as, spi); - - atmel_spi_next_xfer(master, msg); -} - -/* - * For DMA, tx_buf/tx_dma have the same relationship as rx_buf/rx_dma: - * - The buffer is either valid for CPU access, else NULL - * - If the buffer is valid, so is its DMA address - * - * This driver manages the dma address unless message->is_dma_mapped. - */ -static int -atmel_spi_dma_map_xfer(struct atmel_spi *as, struct spi_transfer *xfer) -{ - struct device *dev = &as->pdev->dev; - - xfer->tx_dma = xfer->rx_dma = INVALID_DMA_ADDRESS; - if (xfer->tx_buf) { - /* tx_buf is a const void* where we need a void * for the dma - * mapping */ - void *nonconst_tx = (void *)xfer->tx_buf; - - xfer->tx_dma = dma_map_single(dev, - nonconst_tx, xfer->len, - DMA_TO_DEVICE); - if (dma_mapping_error(dev, xfer->tx_dma)) - return -ENOMEM; - } - if (xfer->rx_buf) { - xfer->rx_dma = dma_map_single(dev, - xfer->rx_buf, xfer->len, - DMA_FROM_DEVICE); - if (dma_mapping_error(dev, xfer->rx_dma)) { - if (xfer->tx_buf) - dma_unmap_single(dev, - xfer->tx_dma, xfer->len, - DMA_TO_DEVICE); - return -ENOMEM; - } - } - return 0; -} - -static void atmel_spi_dma_unmap_xfer(struct spi_master *master, - struct spi_transfer *xfer) -{ - if (xfer->tx_dma != INVALID_DMA_ADDRESS) - dma_unmap_single(master->dev.parent, xfer->tx_dma, - xfer->len, DMA_TO_DEVICE); - if (xfer->rx_dma != INVALID_DMA_ADDRESS) - dma_unmap_single(master->dev.parent, xfer->rx_dma, - xfer->len, DMA_FROM_DEVICE); -} - -static void -atmel_spi_msg_done(struct spi_master *master, struct atmel_spi *as, - struct spi_message *msg, int status, int stay) -{ - if (!stay || status < 0) - cs_deactivate(as, msg->spi); - else - as->stay = msg->spi; - - list_del(&msg->queue); - msg->status = status; - - dev_dbg(master->dev.parent, - "xfer complete: %u bytes transferred\n", - msg->actual_length); - - spin_unlock(&as->lock); - msg->complete(msg->context); - spin_lock(&as->lock); - - as->current_transfer = NULL; - as->next_transfer = NULL; - - /* continue if needed */ - if (list_empty(&as->queue) || as->stopping) - spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS)); - else - atmel_spi_next_message(master); -} - -static irqreturn_t -atmel_spi_interrupt(int irq, void *dev_id) -{ - struct spi_master *master = dev_id; - struct atmel_spi *as = spi_master_get_devdata(master); - struct spi_message *msg; - struct spi_transfer *xfer; - u32 status, pending, imr; - int ret = IRQ_NONE; - - spin_lock(&as->lock); - - xfer = as->current_transfer; - msg = list_entry(as->queue.next, struct spi_message, queue); - - imr = spi_readl(as, IMR); - status = spi_readl(as, SR); - pending = status & imr; - - if (pending & SPI_BIT(OVRES)) { - int timeout; - - ret = IRQ_HANDLED; - - spi_writel(as, IDR, (SPI_BIT(RXBUFF) | SPI_BIT(ENDRX) - | SPI_BIT(OVRES))); - - /* - * When we get an overrun, we disregard the current - * transfer. Data will not be copied back from any - * bounce buffer and msg->actual_len will not be - * updated with the last xfer. - * - * We will also not process any remaning transfers in - * the message. - * - * First, stop the transfer and unmap the DMA buffers. - */ - spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS)); - if (!msg->is_dma_mapped) - atmel_spi_dma_unmap_xfer(master, xfer); - - /* REVISIT: udelay in irq is unfriendly */ - if (xfer->delay_usecs) - udelay(xfer->delay_usecs); - - dev_warn(master->dev.parent, "overrun (%u/%u remaining)\n", - spi_readl(as, TCR), spi_readl(as, RCR)); - - /* - * Clean up DMA registers and make sure the data - * registers are empty. - */ - spi_writel(as, RNCR, 0); - spi_writel(as, TNCR, 0); - spi_writel(as, RCR, 0); - spi_writel(as, TCR, 0); - for (timeout = 1000; timeout; timeout--) - if (spi_readl(as, SR) & SPI_BIT(TXEMPTY)) - break; - if (!timeout) - dev_warn(master->dev.parent, - "timeout waiting for TXEMPTY"); - while (spi_readl(as, SR) & SPI_BIT(RDRF)) - spi_readl(as, RDR); - - /* Clear any overrun happening while cleaning up */ - spi_readl(as, SR); - - atmel_spi_msg_done(master, as, msg, -EIO, 0); - } else if (pending & (SPI_BIT(RXBUFF) | SPI_BIT(ENDRX))) { - ret = IRQ_HANDLED; - - spi_writel(as, IDR, pending); - - if (as->current_remaining_bytes == 0) { - msg->actual_length += xfer->len; - - if (!msg->is_dma_mapped) - atmel_spi_dma_unmap_xfer(master, xfer); - - /* REVISIT: udelay in irq is unfriendly */ - if (xfer->delay_usecs) - udelay(xfer->delay_usecs); - - if (atmel_spi_xfer_is_last(msg, xfer)) { - /* report completed message */ - atmel_spi_msg_done(master, as, msg, 0, - xfer->cs_change); - } else { - if (xfer->cs_change) { - cs_deactivate(as, msg->spi); - udelay(1); - cs_activate(as, msg->spi); - } - - /* - * Not done yet. Submit the next transfer. - * - * FIXME handle protocol options for xfer - */ - atmel_spi_next_xfer(master, msg); - } - } else { - /* - * Keep going, we still have data to send in - * the current transfer. - */ - atmel_spi_next_xfer(master, msg); - } - } - - spin_unlock(&as->lock); - - return ret; -} - -static int atmel_spi_setup(struct spi_device *spi) -{ - struct atmel_spi *as; - struct atmel_spi_device *asd; - u32 scbr, csr; - unsigned int bits = spi->bits_per_word; - unsigned long bus_hz; - unsigned int npcs_pin; - int ret; - - as = spi_master_get_devdata(spi->master); - - if (as->stopping) - return -ESHUTDOWN; - - if (spi->chip_select > spi->master->num_chipselect) { - dev_dbg(&spi->dev, - "setup: invalid chipselect %u (%u defined)\n", - spi->chip_select, spi->master->num_chipselect); - return -EINVAL; - } - - if (bits < 8 || bits > 16) { - dev_dbg(&spi->dev, - "setup: invalid bits_per_word %u (8 to 16)\n", - bits); - return -EINVAL; - } - - /* see notes above re chipselect */ - if (!atmel_spi_is_v2() - && spi->chip_select == 0 - && (spi->mode & SPI_CS_HIGH)) { - dev_dbg(&spi->dev, "setup: can't be active-high\n"); - return -EINVAL; - } - - /* v1 chips start out at half the peripheral bus speed. */ - bus_hz = clk_get_rate(as->clk); - if (!atmel_spi_is_v2()) - bus_hz /= 2; - - if (spi->max_speed_hz) { - /* - * Calculate the lowest divider that satisfies the - * constraint, assuming div32/fdiv/mbz == 0. - */ - scbr = DIV_ROUND_UP(bus_hz, spi->max_speed_hz); - - /* - * If the resulting divider doesn't fit into the - * register bitfield, we can't satisfy the constraint. - */ - if (scbr >= (1 << SPI_SCBR_SIZE)) { - dev_dbg(&spi->dev, - "setup: %d Hz too slow, scbr %u; min %ld Hz\n", - spi->max_speed_hz, scbr, bus_hz/255); - return -EINVAL; - } - } else - /* speed zero means "as slow as possible" */ - scbr = 0xff; - - csr = SPI_BF(SCBR, scbr) | SPI_BF(BITS, bits - 8); - if (spi->mode & SPI_CPOL) - csr |= SPI_BIT(CPOL); - if (!(spi->mode & SPI_CPHA)) - csr |= SPI_BIT(NCPHA); - - /* DLYBS is mostly irrelevant since we manage chipselect using GPIOs. - * - * DLYBCT would add delays between words, slowing down transfers. - * It could potentially be useful to cope with DMA bottlenecks, but - * in those cases it's probably best to just use a lower bitrate. - */ - csr |= SPI_BF(DLYBS, 0); - csr |= SPI_BF(DLYBCT, 0); - - /* chipselect must have been muxed as GPIO (e.g. in board setup) */ - npcs_pin = (unsigned int)spi->controller_data; - asd = spi->controller_state; - if (!asd) { - asd = kzalloc(sizeof(struct atmel_spi_device), GFP_KERNEL); - if (!asd) - return -ENOMEM; - - ret = gpio_request(npcs_pin, dev_name(&spi->dev)); - if (ret) { - kfree(asd); - return ret; - } - - asd->npcs_pin = npcs_pin; - spi->controller_state = asd; - gpio_direction_output(npcs_pin, !(spi->mode & SPI_CS_HIGH)); - } else { - unsigned long flags; - - spin_lock_irqsave(&as->lock, flags); - if (as->stay == spi) - as->stay = NULL; - cs_deactivate(as, spi); - spin_unlock_irqrestore(&as->lock, flags); - } - - asd->csr = csr; - - dev_dbg(&spi->dev, - "setup: %lu Hz bpw %u mode 0x%x -> csr%d %08x\n", - bus_hz / scbr, bits, spi->mode, spi->chip_select, csr); - - if (!atmel_spi_is_v2()) - spi_writel(as, CSR0 + 4 * spi->chip_select, csr); - - return 0; -} - -static int atmel_spi_transfer(struct spi_device *spi, struct spi_message *msg) -{ - struct atmel_spi *as; - struct spi_transfer *xfer; - unsigned long flags; - struct device *controller = spi->master->dev.parent; - u8 bits; - struct atmel_spi_device *asd; - - as = spi_master_get_devdata(spi->master); - - dev_dbg(controller, "new message %p submitted for %s\n", - msg, dev_name(&spi->dev)); - - if (unlikely(list_empty(&msg->transfers))) - return -EINVAL; - - if (as->stopping) - return -ESHUTDOWN; - - list_for_each_entry(xfer, &msg->transfers, transfer_list) { - if (!(xfer->tx_buf || xfer->rx_buf) && xfer->len) { - dev_dbg(&spi->dev, "missing rx or tx buf\n"); - return -EINVAL; - } - - if (xfer->bits_per_word) { - asd = spi->controller_state; - bits = (asd->csr >> 4) & 0xf; - if (bits != xfer->bits_per_word - 8) { - dev_dbg(&spi->dev, "you can't yet change " - "bits_per_word in transfers\n"); - return -ENOPROTOOPT; - } - } - - /* FIXME implement these protocol options!! */ - if (xfer->speed_hz) { - dev_dbg(&spi->dev, "no protocol options yet\n"); - return -ENOPROTOOPT; - } - - /* - * DMA map early, for performance (empties dcache ASAP) and - * better fault reporting. This is a DMA-only driver. - * - * NOTE that if dma_unmap_single() ever starts to do work on - * platforms supported by this driver, we would need to clean - * up mappings for previously-mapped transfers. - */ - if (!msg->is_dma_mapped) { - if (atmel_spi_dma_map_xfer(as, xfer) < 0) - return -ENOMEM; - } - } - -#ifdef VERBOSE - list_for_each_entry(xfer, &msg->transfers, transfer_list) { - dev_dbg(controller, - " xfer %p: len %u tx %p/%08x rx %p/%08x\n", - xfer, xfer->len, - xfer->tx_buf, xfer->tx_dma, - xfer->rx_buf, xfer->rx_dma); - } -#endif - - msg->status = -EINPROGRESS; - msg->actual_length = 0; - - spin_lock_irqsave(&as->lock, flags); - list_add_tail(&msg->queue, &as->queue); - if (!as->current_transfer) - atmel_spi_next_message(spi->master); - spin_unlock_irqrestore(&as->lock, flags); - - return 0; -} - -static void atmel_spi_cleanup(struct spi_device *spi) -{ - struct atmel_spi *as = spi_master_get_devdata(spi->master); - struct atmel_spi_device *asd = spi->controller_state; - unsigned gpio = (unsigned) spi->controller_data; - unsigned long flags; - - if (!asd) - return; - - spin_lock_irqsave(&as->lock, flags); - if (as->stay == spi) { - as->stay = NULL; - cs_deactivate(as, spi); - } - spin_unlock_irqrestore(&as->lock, flags); - - spi->controller_state = NULL; - gpio_free(gpio); - kfree(asd); -} - -/*-------------------------------------------------------------------------*/ - -static int __init atmel_spi_probe(struct platform_device *pdev) -{ - struct resource *regs; - int irq; - struct clk *clk; - int ret; - struct spi_master *master; - struct atmel_spi *as; - - regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!regs) - return -ENXIO; - - irq = platform_get_irq(pdev, 0); - if (irq < 0) - return irq; - - clk = clk_get(&pdev->dev, "spi_clk"); - if (IS_ERR(clk)) - return PTR_ERR(clk); - - /* setup spi core then atmel-specific driver state */ - ret = -ENOMEM; - master = spi_alloc_master(&pdev->dev, sizeof *as); - if (!master) - goto out_free; - - /* the spi->mode bits understood by this driver: */ - master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; - - master->bus_num = pdev->id; - master->num_chipselect = 4; - master->setup = atmel_spi_setup; - master->transfer = atmel_spi_transfer; - master->cleanup = atmel_spi_cleanup; - platform_set_drvdata(pdev, master); - - as = spi_master_get_devdata(master); - - /* - * Scratch buffer is used for throwaway rx and tx data. - * It's coherent to minimize dcache pollution. - */ - as->buffer = dma_alloc_coherent(&pdev->dev, BUFFER_SIZE, - &as->buffer_dma, GFP_KERNEL); - if (!as->buffer) - goto out_free; - - spin_lock_init(&as->lock); - INIT_LIST_HEAD(&as->queue); - as->pdev = pdev; - as->regs = ioremap(regs->start, resource_size(regs)); - if (!as->regs) - goto out_free_buffer; - as->irq = irq; - as->clk = clk; - - ret = request_irq(irq, atmel_spi_interrupt, 0, - dev_name(&pdev->dev), master); - if (ret) - goto out_unmap_regs; - - /* Initialize the hardware */ - clk_enable(clk); - spi_writel(as, CR, SPI_BIT(SWRST)); - spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */ - spi_writel(as, MR, SPI_BIT(MSTR) | SPI_BIT(MODFDIS)); - spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS)); - spi_writel(as, CR, SPI_BIT(SPIEN)); - - /* go! */ - dev_info(&pdev->dev, "Atmel SPI Controller at 0x%08lx (irq %d)\n", - (unsigned long)regs->start, irq); - - ret = spi_register_master(master); - if (ret) - goto out_reset_hw; - - return 0; - -out_reset_hw: - spi_writel(as, CR, SPI_BIT(SWRST)); - spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */ - clk_disable(clk); - free_irq(irq, master); -out_unmap_regs: - iounmap(as->regs); -out_free_buffer: - dma_free_coherent(&pdev->dev, BUFFER_SIZE, as->buffer, - as->buffer_dma); -out_free: - clk_put(clk); - spi_master_put(master); - return ret; -} - -static int __exit atmel_spi_remove(struct platform_device *pdev) -{ - struct spi_master *master = platform_get_drvdata(pdev); - struct atmel_spi *as = spi_master_get_devdata(master); - struct spi_message *msg; - - /* reset the hardware and block queue progress */ - spin_lock_irq(&as->lock); - as->stopping = 1; - spi_writel(as, CR, SPI_BIT(SWRST)); - spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */ - spi_readl(as, SR); - spin_unlock_irq(&as->lock); - - /* Terminate remaining queued transfers */ - list_for_each_entry(msg, &as->queue, queue) { - /* REVISIT unmapping the dma is a NOP on ARM and AVR32 - * but we shouldn't depend on that... - */ - msg->status = -ESHUTDOWN; - msg->complete(msg->context); - } - - dma_free_coherent(&pdev->dev, BUFFER_SIZE, as->buffer, - as->buffer_dma); - - clk_disable(as->clk); - clk_put(as->clk); - free_irq(as->irq, master); - iounmap(as->regs); - - spi_unregister_master(master); - - return 0; -} - -#ifdef CONFIG_PM - -static int atmel_spi_suspend(struct platform_device *pdev, pm_message_t mesg) -{ - struct spi_master *master = platform_get_drvdata(pdev); - struct atmel_spi *as = spi_master_get_devdata(master); - - clk_disable(as->clk); - return 0; -} - -static int atmel_spi_resume(struct platform_device *pdev) -{ - struct spi_master *master = platform_get_drvdata(pdev); - struct atmel_spi *as = spi_master_get_devdata(master); - - clk_enable(as->clk); - return 0; -} - -#else -#define atmel_spi_suspend NULL -#define atmel_spi_resume NULL -#endif - - -static struct platform_driver atmel_spi_driver = { - .driver = { - .name = "atmel_spi", - .owner = THIS_MODULE, - }, - .suspend = atmel_spi_suspend, - .resume = atmel_spi_resume, - .remove = __exit_p(atmel_spi_remove), -}; - -static int __init atmel_spi_init(void) -{ - return platform_driver_probe(&atmel_spi_driver, atmel_spi_probe); -} -module_init(atmel_spi_init); - -static void __exit atmel_spi_exit(void) -{ - platform_driver_unregister(&atmel_spi_driver); -} -module_exit(atmel_spi_exit); - -MODULE_DESCRIPTION("Atmel AT32/AT91 SPI Controller driver"); -MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); -MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:atmel_spi"); diff --git a/drivers/spi/atmel_spi.h b/drivers/spi/atmel_spi.h deleted file mode 100644 index 6e06b6a..0000000 --- a/drivers/spi/atmel_spi.h +++ /dev/null @@ -1,167 +0,0 @@ -/* - * Register definitions for Atmel Serial Peripheral Interface (SPI) - * - * Copyright (C) 2006 Atmel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#ifndef __ATMEL_SPI_H__ -#define __ATMEL_SPI_H__ - -/* SPI register offsets */ -#define SPI_CR 0x0000 -#define SPI_MR 0x0004 -#define SPI_RDR 0x0008 -#define SPI_TDR 0x000c -#define SPI_SR 0x0010 -#define SPI_IER 0x0014 -#define SPI_IDR 0x0018 -#define SPI_IMR 0x001c -#define SPI_CSR0 0x0030 -#define SPI_CSR1 0x0034 -#define SPI_CSR2 0x0038 -#define SPI_CSR3 0x003c -#define SPI_RPR 0x0100 -#define SPI_RCR 0x0104 -#define SPI_TPR 0x0108 -#define SPI_TCR 0x010c -#define SPI_RNPR 0x0110 -#define SPI_RNCR 0x0114 -#define SPI_TNPR 0x0118 -#define SPI_TNCR 0x011c -#define SPI_PTCR 0x0120 -#define SPI_PTSR 0x0124 - -/* Bitfields in CR */ -#define SPI_SPIEN_OFFSET 0 -#define SPI_SPIEN_SIZE 1 -#define SPI_SPIDIS_OFFSET 1 -#define SPI_SPIDIS_SIZE 1 -#define SPI_SWRST_OFFSET 7 -#define SPI_SWRST_SIZE 1 -#define SPI_LASTXFER_OFFSET 24 -#define SPI_LASTXFER_SIZE 1 - -/* Bitfields in MR */ -#define SPI_MSTR_OFFSET 0 -#define SPI_MSTR_SIZE 1 -#define SPI_PS_OFFSET 1 -#define SPI_PS_SIZE 1 -#define SPI_PCSDEC_OFFSET 2 -#define SPI_PCSDEC_SIZE 1 -#define SPI_FDIV_OFFSET 3 -#define SPI_FDIV_SIZE 1 -#define SPI_MODFDIS_OFFSET 4 -#define SPI_MODFDIS_SIZE 1 -#define SPI_LLB_OFFSET 7 -#define SPI_LLB_SIZE 1 -#define SPI_PCS_OFFSET 16 -#define SPI_PCS_SIZE 4 -#define SPI_DLYBCS_OFFSET 24 -#define SPI_DLYBCS_SIZE 8 - -/* Bitfields in RDR */ -#define SPI_RD_OFFSET 0 -#define SPI_RD_SIZE 16 - -/* Bitfields in TDR */ -#define SPI_TD_OFFSET 0 -#define SPI_TD_SIZE 16 - -/* Bitfields in SR */ -#define SPI_RDRF_OFFSET 0 -#define SPI_RDRF_SIZE 1 -#define SPI_TDRE_OFFSET 1 -#define SPI_TDRE_SIZE 1 -#define SPI_MODF_OFFSET 2 -#define SPI_MODF_SIZE 1 -#define SPI_OVRES_OFFSET 3 -#define SPI_OVRES_SIZE 1 -#define SPI_ENDRX_OFFSET 4 -#define SPI_ENDRX_SIZE 1 -#define SPI_ENDTX_OFFSET 5 -#define SPI_ENDTX_SIZE 1 -#define SPI_RXBUFF_OFFSET 6 -#define SPI_RXBUFF_SIZE 1 -#define SPI_TXBUFE_OFFSET 7 -#define SPI_TXBUFE_SIZE 1 -#define SPI_NSSR_OFFSET 8 -#define SPI_NSSR_SIZE 1 -#define SPI_TXEMPTY_OFFSET 9 -#define SPI_TXEMPTY_SIZE 1 -#define SPI_SPIENS_OFFSET 16 -#define SPI_SPIENS_SIZE 1 - -/* Bitfields in CSR0 */ -#define SPI_CPOL_OFFSET 0 -#define SPI_CPOL_SIZE 1 -#define SPI_NCPHA_OFFSET 1 -#define SPI_NCPHA_SIZE 1 -#define SPI_CSAAT_OFFSET 3 -#define SPI_CSAAT_SIZE 1 -#define SPI_BITS_OFFSET 4 -#define SPI_BITS_SIZE 4 -#define SPI_SCBR_OFFSET 8 -#define SPI_SCBR_SIZE 8 -#define SPI_DLYBS_OFFSET 16 -#define SPI_DLYBS_SIZE 8 -#define SPI_DLYBCT_OFFSET 24 -#define SPI_DLYBCT_SIZE 8 - -/* Bitfields in RCR */ -#define SPI_RXCTR_OFFSET 0 -#define SPI_RXCTR_SIZE 16 - -/* Bitfields in TCR */ -#define SPI_TXCTR_OFFSET 0 -#define SPI_TXCTR_SIZE 16 - -/* Bitfields in RNCR */ -#define SPI_RXNCR_OFFSET 0 -#define SPI_RXNCR_SIZE 16 - -/* Bitfields in TNCR */ -#define SPI_TXNCR_OFFSET 0 -#define SPI_TXNCR_SIZE 16 - -/* Bitfields in PTCR */ -#define SPI_RXTEN_OFFSET 0 -#define SPI_RXTEN_SIZE 1 -#define SPI_RXTDIS_OFFSET 1 -#define SPI_RXTDIS_SIZE 1 -#define SPI_TXTEN_OFFSET 8 -#define SPI_TXTEN_SIZE 1 -#define SPI_TXTDIS_OFFSET 9 -#define SPI_TXTDIS_SIZE 1 - -/* Constants for BITS */ -#define SPI_BITS_8_BPT 0 -#define SPI_BITS_9_BPT 1 -#define SPI_BITS_10_BPT 2 -#define SPI_BITS_11_BPT 3 -#define SPI_BITS_12_BPT 4 -#define SPI_BITS_13_BPT 5 -#define SPI_BITS_14_BPT 6 -#define SPI_BITS_15_BPT 7 -#define SPI_BITS_16_BPT 8 - -/* Bit manipulation macros */ -#define SPI_BIT(name) \ - (1 << SPI_##name##_OFFSET) -#define SPI_BF(name,value) \ - (((value) & ((1 << SPI_##name##_SIZE) - 1)) << SPI_##name##_OFFSET) -#define SPI_BFEXT(name,value) \ - (((value) >> SPI_##name##_OFFSET) & ((1 << SPI_##name##_SIZE) - 1)) -#define SPI_BFINS(name,value,old) \ - ( ((old) & ~(((1 << SPI_##name##_SIZE) - 1) << SPI_##name##_OFFSET)) \ - | SPI_BF(name,value)) - -/* Register access macros */ -#define spi_readl(port,reg) \ - __raw_readl((port)->regs + SPI_##reg) -#define spi_writel(port,reg,value) \ - __raw_writel((value), (port)->regs + SPI_##reg) - -#endif /* __ATMEL_SPI_H__ */ diff --git a/drivers/spi/au1550_spi.c b/drivers/spi/au1550_spi.c deleted file mode 100644 index b50563d..0000000 --- a/drivers/spi/au1550_spi.c +++ /dev/null @@ -1,1032 +0,0 @@ -/* - * au1550_spi.c - au1550 psc spi controller driver - * may work also with au1200, au1210, au1250 - * will not work on au1000, au1100 and au1500 (no full spi controller there) - * - * Copyright (c) 2006 ATRON electronic GmbH - * Author: Jan Nikitenko - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -static unsigned usedma = 1; -module_param(usedma, uint, 0644); - -/* -#define AU1550_SPI_DEBUG_LOOPBACK -*/ - - -#define AU1550_SPI_DBDMA_DESCRIPTORS 1 -#define AU1550_SPI_DMA_RXTMP_MINSIZE 2048U - -struct au1550_spi { - struct spi_bitbang bitbang; - - volatile psc_spi_t __iomem *regs; - int irq; - unsigned freq_max; - unsigned freq_min; - - unsigned len; - unsigned tx_count; - unsigned rx_count; - const u8 *tx; - u8 *rx; - - void (*rx_word)(struct au1550_spi *hw); - void (*tx_word)(struct au1550_spi *hw); - int (*txrx_bufs)(struct spi_device *spi, struct spi_transfer *t); - irqreturn_t (*irq_callback)(struct au1550_spi *hw); - - struct completion master_done; - - unsigned usedma; - u32 dma_tx_id; - u32 dma_rx_id; - u32 dma_tx_ch; - u32 dma_rx_ch; - - u8 *dma_rx_tmpbuf; - unsigned dma_rx_tmpbuf_size; - u32 dma_rx_tmpbuf_addr; - - struct spi_master *master; - struct device *dev; - struct au1550_spi_info *pdata; - struct resource *ioarea; -}; - - -/* we use an 8-bit memory device for dma transfers to/from spi fifo */ -static dbdev_tab_t au1550_spi_mem_dbdev = -{ - .dev_id = DBDMA_MEM_CHAN, - .dev_flags = DEV_FLAGS_ANYUSE|DEV_FLAGS_SYNC, - .dev_tsize = 0, - .dev_devwidth = 8, - .dev_physaddr = 0x00000000, - .dev_intlevel = 0, - .dev_intpolarity = 0 -}; - -static int ddma_memid; /* id to above mem dma device */ - -static void au1550_spi_bits_handlers_set(struct au1550_spi *hw, int bpw); - - -/* - * compute BRG and DIV bits to setup spi clock based on main input clock rate - * that was specified in platform data structure - * according to au1550 datasheet: - * psc_tempclk = psc_mainclk / (2 << DIV) - * spiclk = psc_tempclk / (2 * (BRG + 1)) - * BRG valid range is 4..63 - * DIV valid range is 0..3 - */ -static u32 au1550_spi_baudcfg(struct au1550_spi *hw, unsigned speed_hz) -{ - u32 mainclk_hz = hw->pdata->mainclk_hz; - u32 div, brg; - - for (div = 0; div < 4; div++) { - brg = mainclk_hz / speed_hz / (4 << div); - /* now we have BRG+1 in brg, so count with that */ - if (brg < (4 + 1)) { - brg = (4 + 1); /* speed_hz too big */ - break; /* set lowest brg (div is == 0) */ - } - if (brg <= (63 + 1)) - break; /* we have valid brg and div */ - } - if (div == 4) { - div = 3; /* speed_hz too small */ - brg = (63 + 1); /* set highest brg and div */ - } - brg--; - return PSC_SPICFG_SET_BAUD(brg) | PSC_SPICFG_SET_DIV(div); -} - -static inline void au1550_spi_mask_ack_all(struct au1550_spi *hw) -{ - hw->regs->psc_spimsk = - PSC_SPIMSK_MM | PSC_SPIMSK_RR | PSC_SPIMSK_RO - | PSC_SPIMSK_RU | PSC_SPIMSK_TR | PSC_SPIMSK_TO - | PSC_SPIMSK_TU | PSC_SPIMSK_SD | PSC_SPIMSK_MD; - au_sync(); - - hw->regs->psc_spievent = - PSC_SPIEVNT_MM | PSC_SPIEVNT_RR | PSC_SPIEVNT_RO - | PSC_SPIEVNT_RU | PSC_SPIEVNT_TR | PSC_SPIEVNT_TO - | PSC_SPIEVNT_TU | PSC_SPIEVNT_SD | PSC_SPIEVNT_MD; - au_sync(); -} - -static void au1550_spi_reset_fifos(struct au1550_spi *hw) -{ - u32 pcr; - - hw->regs->psc_spipcr = PSC_SPIPCR_RC | PSC_SPIPCR_TC; - au_sync(); - do { - pcr = hw->regs->psc_spipcr; - au_sync(); - } while (pcr != 0); -} - -/* - * dma transfers are used for the most common spi word size of 8-bits - * we cannot easily change already set up dma channels' width, so if we wanted - * dma support for more than 8-bit words (up to 24 bits), we would need to - * setup dma channels from scratch on each spi transfer, based on bits_per_word - * instead we have pre set up 8 bit dma channels supporting spi 4 to 8 bits - * transfers, and 9 to 24 bits spi transfers will be done in pio irq based mode - * callbacks to handle dma or pio are set up in au1550_spi_bits_handlers_set() - */ -static void au1550_spi_chipsel(struct spi_device *spi, int value) -{ - struct au1550_spi *hw = spi_master_get_devdata(spi->master); - unsigned cspol = spi->mode & SPI_CS_HIGH ? 1 : 0; - u32 cfg, stat; - - switch (value) { - case BITBANG_CS_INACTIVE: - if (hw->pdata->deactivate_cs) - hw->pdata->deactivate_cs(hw->pdata, spi->chip_select, - cspol); - break; - - case BITBANG_CS_ACTIVE: - au1550_spi_bits_handlers_set(hw, spi->bits_per_word); - - cfg = hw->regs->psc_spicfg; - au_sync(); - hw->regs->psc_spicfg = cfg & ~PSC_SPICFG_DE_ENABLE; - au_sync(); - - if (spi->mode & SPI_CPOL) - cfg |= PSC_SPICFG_BI; - else - cfg &= ~PSC_SPICFG_BI; - if (spi->mode & SPI_CPHA) - cfg &= ~PSC_SPICFG_CDE; - else - cfg |= PSC_SPICFG_CDE; - - if (spi->mode & SPI_LSB_FIRST) - cfg |= PSC_SPICFG_MLF; - else - cfg &= ~PSC_SPICFG_MLF; - - if (hw->usedma && spi->bits_per_word <= 8) - cfg &= ~PSC_SPICFG_DD_DISABLE; - else - cfg |= PSC_SPICFG_DD_DISABLE; - cfg = PSC_SPICFG_CLR_LEN(cfg); - cfg |= PSC_SPICFG_SET_LEN(spi->bits_per_word); - - cfg = PSC_SPICFG_CLR_BAUD(cfg); - cfg &= ~PSC_SPICFG_SET_DIV(3); - cfg |= au1550_spi_baudcfg(hw, spi->max_speed_hz); - - hw->regs->psc_spicfg = cfg | PSC_SPICFG_DE_ENABLE; - au_sync(); - do { - stat = hw->regs->psc_spistat; - au_sync(); - } while ((stat & PSC_SPISTAT_DR) == 0); - - if (hw->pdata->activate_cs) - hw->pdata->activate_cs(hw->pdata, spi->chip_select, - cspol); - break; - } -} - -static int au1550_spi_setupxfer(struct spi_device *spi, struct spi_transfer *t) -{ - struct au1550_spi *hw = spi_master_get_devdata(spi->master); - unsigned bpw, hz; - u32 cfg, stat; - - bpw = spi->bits_per_word; - hz = spi->max_speed_hz; - if (t) { - if (t->bits_per_word) - bpw = t->bits_per_word; - if (t->speed_hz) - hz = t->speed_hz; - } - - if (bpw < 4 || bpw > 24) { - dev_err(&spi->dev, "setupxfer: invalid bits_per_word=%d\n", - bpw); - return -EINVAL; - } - if (hz > spi->max_speed_hz || hz > hw->freq_max || hz < hw->freq_min) { - dev_err(&spi->dev, "setupxfer: clock rate=%d out of range\n", - hz); - return -EINVAL; - } - - au1550_spi_bits_handlers_set(hw, spi->bits_per_word); - - cfg = hw->regs->psc_spicfg; - au_sync(); - hw->regs->psc_spicfg = cfg & ~PSC_SPICFG_DE_ENABLE; - au_sync(); - - if (hw->usedma && bpw <= 8) - cfg &= ~PSC_SPICFG_DD_DISABLE; - else - cfg |= PSC_SPICFG_DD_DISABLE; - cfg = PSC_SPICFG_CLR_LEN(cfg); - cfg |= PSC_SPICFG_SET_LEN(bpw); - - cfg = PSC_SPICFG_CLR_BAUD(cfg); - cfg &= ~PSC_SPICFG_SET_DIV(3); - cfg |= au1550_spi_baudcfg(hw, hz); - - hw->regs->psc_spicfg = cfg; - au_sync(); - - if (cfg & PSC_SPICFG_DE_ENABLE) { - do { - stat = hw->regs->psc_spistat; - au_sync(); - } while ((stat & PSC_SPISTAT_DR) == 0); - } - - au1550_spi_reset_fifos(hw); - au1550_spi_mask_ack_all(hw); - return 0; -} - -static int au1550_spi_setup(struct spi_device *spi) -{ - struct au1550_spi *hw = spi_master_get_devdata(spi->master); - - if (spi->bits_per_word < 4 || spi->bits_per_word > 24) { - dev_err(&spi->dev, "setup: invalid bits_per_word=%d\n", - spi->bits_per_word); - return -EINVAL; - } - - if (spi->max_speed_hz == 0) - spi->max_speed_hz = hw->freq_max; - if (spi->max_speed_hz > hw->freq_max - || spi->max_speed_hz < hw->freq_min) - return -EINVAL; - /* - * NOTE: cannot change speed and other hw settings immediately, - * otherwise sharing of spi bus is not possible, - * so do not call setupxfer(spi, NULL) here - */ - return 0; -} - -/* - * for dma spi transfers, we have to setup rx channel, otherwise there is - * no reliable way how to recognize that spi transfer is done - * dma complete callbacks are called before real spi transfer is finished - * and if only tx dma channel is set up (and rx fifo overflow event masked) - * spi master done event irq is not generated unless rx fifo is empty (emptied) - * so we need rx tmp buffer to use for rx dma if user does not provide one - */ -static int au1550_spi_dma_rxtmp_alloc(struct au1550_spi *hw, unsigned size) -{ - hw->dma_rx_tmpbuf = kmalloc(size, GFP_KERNEL); - if (!hw->dma_rx_tmpbuf) - return -ENOMEM; - hw->dma_rx_tmpbuf_size = size; - hw->dma_rx_tmpbuf_addr = dma_map_single(hw->dev, hw->dma_rx_tmpbuf, - size, DMA_FROM_DEVICE); - if (dma_mapping_error(hw->dev, hw->dma_rx_tmpbuf_addr)) { - kfree(hw->dma_rx_tmpbuf); - hw->dma_rx_tmpbuf = 0; - hw->dma_rx_tmpbuf_size = 0; - return -EFAULT; - } - return 0; -} - -static void au1550_spi_dma_rxtmp_free(struct au1550_spi *hw) -{ - dma_unmap_single(hw->dev, hw->dma_rx_tmpbuf_addr, - hw->dma_rx_tmpbuf_size, DMA_FROM_DEVICE); - kfree(hw->dma_rx_tmpbuf); - hw->dma_rx_tmpbuf = 0; - hw->dma_rx_tmpbuf_size = 0; -} - -static int au1550_spi_dma_txrxb(struct spi_device *spi, struct spi_transfer *t) -{ - struct au1550_spi *hw = spi_master_get_devdata(spi->master); - dma_addr_t dma_tx_addr; - dma_addr_t dma_rx_addr; - u32 res; - - hw->len = t->len; - hw->tx_count = 0; - hw->rx_count = 0; - - hw->tx = t->tx_buf; - hw->rx = t->rx_buf; - dma_tx_addr = t->tx_dma; - dma_rx_addr = t->rx_dma; - - /* - * check if buffers are already dma mapped, map them otherwise: - * - first map the TX buffer, so cache data gets written to memory - * - then map the RX buffer, so that cache entries (with - * soon-to-be-stale data) get removed - * use rx buffer in place of tx if tx buffer was not provided - * use temp rx buffer (preallocated or realloc to fit) for rx dma - */ - if (t->tx_buf) { - if (t->tx_dma == 0) { /* if DMA_ADDR_INVALID, map it */ - dma_tx_addr = dma_map_single(hw->dev, - (void *)t->tx_buf, - t->len, DMA_TO_DEVICE); - if (dma_mapping_error(hw->dev, dma_tx_addr)) - dev_err(hw->dev, "tx dma map error\n"); - } - } - - if (t->rx_buf) { - if (t->rx_dma == 0) { /* if DMA_ADDR_INVALID, map it */ - dma_rx_addr = dma_map_single(hw->dev, - (void *)t->rx_buf, - t->len, DMA_FROM_DEVICE); - if (dma_mapping_error(hw->dev, dma_rx_addr)) - dev_err(hw->dev, "rx dma map error\n"); - } - } else { - if (t->len > hw->dma_rx_tmpbuf_size) { - int ret; - - au1550_spi_dma_rxtmp_free(hw); - ret = au1550_spi_dma_rxtmp_alloc(hw, max(t->len, - AU1550_SPI_DMA_RXTMP_MINSIZE)); - if (ret < 0) - return ret; - } - hw->rx = hw->dma_rx_tmpbuf; - dma_rx_addr = hw->dma_rx_tmpbuf_addr; - dma_sync_single_for_device(hw->dev, dma_rx_addr, - t->len, DMA_FROM_DEVICE); - } - - if (!t->tx_buf) { - dma_sync_single_for_device(hw->dev, dma_rx_addr, - t->len, DMA_BIDIRECTIONAL); - hw->tx = hw->rx; - } - - /* put buffers on the ring */ - res = au1xxx_dbdma_put_dest(hw->dma_rx_ch, virt_to_phys(hw->rx), - t->len, DDMA_FLAGS_IE); - if (!res) - dev_err(hw->dev, "rx dma put dest error\n"); - - res = au1xxx_dbdma_put_source(hw->dma_tx_ch, virt_to_phys(hw->tx), - t->len, DDMA_FLAGS_IE); - if (!res) - dev_err(hw->dev, "tx dma put source error\n"); - - au1xxx_dbdma_start(hw->dma_rx_ch); - au1xxx_dbdma_start(hw->dma_tx_ch); - - /* by default enable nearly all events interrupt */ - hw->regs->psc_spimsk = PSC_SPIMSK_SD; - au_sync(); - - /* start the transfer */ - hw->regs->psc_spipcr = PSC_SPIPCR_MS; - au_sync(); - - wait_for_completion(&hw->master_done); - - au1xxx_dbdma_stop(hw->dma_tx_ch); - au1xxx_dbdma_stop(hw->dma_rx_ch); - - if (!t->rx_buf) { - /* using the temporal preallocated and premapped buffer */ - dma_sync_single_for_cpu(hw->dev, dma_rx_addr, t->len, - DMA_FROM_DEVICE); - } - /* unmap buffers if mapped above */ - if (t->rx_buf && t->rx_dma == 0 ) - dma_unmap_single(hw->dev, dma_rx_addr, t->len, - DMA_FROM_DEVICE); - if (t->tx_buf && t->tx_dma == 0 ) - dma_unmap_single(hw->dev, dma_tx_addr, t->len, - DMA_TO_DEVICE); - - return hw->rx_count < hw->tx_count ? hw->rx_count : hw->tx_count; -} - -static irqreturn_t au1550_spi_dma_irq_callback(struct au1550_spi *hw) -{ - u32 stat, evnt; - - stat = hw->regs->psc_spistat; - evnt = hw->regs->psc_spievent; - au_sync(); - if ((stat & PSC_SPISTAT_DI) == 0) { - dev_err(hw->dev, "Unexpected IRQ!\n"); - return IRQ_NONE; - } - - if ((evnt & (PSC_SPIEVNT_MM | PSC_SPIEVNT_RO - | PSC_SPIEVNT_RU | PSC_SPIEVNT_TO - | PSC_SPIEVNT_TU | PSC_SPIEVNT_SD)) - != 0) { - /* - * due to an spi error we consider transfer as done, - * so mask all events until before next transfer start - * and stop the possibly running dma immediatelly - */ - au1550_spi_mask_ack_all(hw); - au1xxx_dbdma_stop(hw->dma_rx_ch); - au1xxx_dbdma_stop(hw->dma_tx_ch); - - /* get number of transferred bytes */ - hw->rx_count = hw->len - au1xxx_get_dma_residue(hw->dma_rx_ch); - hw->tx_count = hw->len - au1xxx_get_dma_residue(hw->dma_tx_ch); - - au1xxx_dbdma_reset(hw->dma_rx_ch); - au1xxx_dbdma_reset(hw->dma_tx_ch); - au1550_spi_reset_fifos(hw); - - if (evnt == PSC_SPIEVNT_RO) - dev_err(hw->dev, - "dma transfer: receive FIFO overflow!\n"); - else - dev_err(hw->dev, - "dma transfer: unexpected SPI error " - "(event=0x%x stat=0x%x)!\n", evnt, stat); - - complete(&hw->master_done); - return IRQ_HANDLED; - } - - if ((evnt & PSC_SPIEVNT_MD) != 0) { - /* transfer completed successfully */ - au1550_spi_mask_ack_all(hw); - hw->rx_count = hw->len; - hw->tx_count = hw->len; - complete(&hw->master_done); - } - return IRQ_HANDLED; -} - - -/* routines to handle different word sizes in pio mode */ -#define AU1550_SPI_RX_WORD(size, mask) \ -static void au1550_spi_rx_word_##size(struct au1550_spi *hw) \ -{ \ - u32 fifoword = hw->regs->psc_spitxrx & (u32)(mask); \ - au_sync(); \ - if (hw->rx) { \ - *(u##size *)hw->rx = (u##size)fifoword; \ - hw->rx += (size) / 8; \ - } \ - hw->rx_count += (size) / 8; \ -} - -#define AU1550_SPI_TX_WORD(size, mask) \ -static void au1550_spi_tx_word_##size(struct au1550_spi *hw) \ -{ \ - u32 fifoword = 0; \ - if (hw->tx) { \ - fifoword = *(u##size *)hw->tx & (u32)(mask); \ - hw->tx += (size) / 8; \ - } \ - hw->tx_count += (size) / 8; \ - if (hw->tx_count >= hw->len) \ - fifoword |= PSC_SPITXRX_LC; \ - hw->regs->psc_spitxrx = fifoword; \ - au_sync(); \ -} - -AU1550_SPI_RX_WORD(8,0xff) -AU1550_SPI_RX_WORD(16,0xffff) -AU1550_SPI_RX_WORD(32,0xffffff) -AU1550_SPI_TX_WORD(8,0xff) -AU1550_SPI_TX_WORD(16,0xffff) -AU1550_SPI_TX_WORD(32,0xffffff) - -static int au1550_spi_pio_txrxb(struct spi_device *spi, struct spi_transfer *t) -{ - u32 stat, mask; - struct au1550_spi *hw = spi_master_get_devdata(spi->master); - - hw->tx = t->tx_buf; - hw->rx = t->rx_buf; - hw->len = t->len; - hw->tx_count = 0; - hw->rx_count = 0; - - /* by default enable nearly all events after filling tx fifo */ - mask = PSC_SPIMSK_SD; - - /* fill the transmit FIFO */ - while (hw->tx_count < hw->len) { - - hw->tx_word(hw); - - if (hw->tx_count >= hw->len) { - /* mask tx fifo request interrupt as we are done */ - mask |= PSC_SPIMSK_TR; - } - - stat = hw->regs->psc_spistat; - au_sync(); - if (stat & PSC_SPISTAT_TF) - break; - } - - /* enable event interrupts */ - hw->regs->psc_spimsk = mask; - au_sync(); - - /* start the transfer */ - hw->regs->psc_spipcr = PSC_SPIPCR_MS; - au_sync(); - - wait_for_completion(&hw->master_done); - - return hw->rx_count < hw->tx_count ? hw->rx_count : hw->tx_count; -} - -static irqreturn_t au1550_spi_pio_irq_callback(struct au1550_spi *hw) -{ - int busy; - u32 stat, evnt; - - stat = hw->regs->psc_spistat; - evnt = hw->regs->psc_spievent; - au_sync(); - if ((stat & PSC_SPISTAT_DI) == 0) { - dev_err(hw->dev, "Unexpected IRQ!\n"); - return IRQ_NONE; - } - - if ((evnt & (PSC_SPIEVNT_MM | PSC_SPIEVNT_RO - | PSC_SPIEVNT_RU | PSC_SPIEVNT_TO - | PSC_SPIEVNT_SD)) - != 0) { - /* - * due to an error we consider transfer as done, - * so mask all events until before next transfer start - */ - au1550_spi_mask_ack_all(hw); - au1550_spi_reset_fifos(hw); - dev_err(hw->dev, - "pio transfer: unexpected SPI error " - "(event=0x%x stat=0x%x)!\n", evnt, stat); - complete(&hw->master_done); - return IRQ_HANDLED; - } - - /* - * while there is something to read from rx fifo - * or there is a space to write to tx fifo: - */ - do { - busy = 0; - stat = hw->regs->psc_spistat; - au_sync(); - - /* - * Take care to not let the Rx FIFO overflow. - * - * We only write a byte if we have read one at least. Initially, - * the write fifo is full, so we should read from the read fifo - * first. - * In case we miss a word from the read fifo, we should get a - * RO event and should back out. - */ - if (!(stat & PSC_SPISTAT_RE) && hw->rx_count < hw->len) { - hw->rx_word(hw); - busy = 1; - - if (!(stat & PSC_SPISTAT_TF) && hw->tx_count < hw->len) - hw->tx_word(hw); - } - } while (busy); - - hw->regs->psc_spievent = PSC_SPIEVNT_RR | PSC_SPIEVNT_TR; - au_sync(); - - /* - * Restart the SPI transmission in case of a transmit underflow. - * This seems to work despite the notes in the Au1550 data book - * of Figure 8-4 with flowchart for SPI master operation: - * - * """Note 1: An XFR Error Interrupt occurs, unless masked, - * for any of the following events: Tx FIFO Underflow, - * Rx FIFO Overflow, or Multiple-master Error - * Note 2: In case of a Tx Underflow Error, all zeroes are - * transmitted.""" - * - * By simply restarting the spi transfer on Tx Underflow Error, - * we assume that spi transfer was paused instead of zeroes - * transmittion mentioned in the Note 2 of Au1550 data book. - */ - if (evnt & PSC_SPIEVNT_TU) { - hw->regs->psc_spievent = PSC_SPIEVNT_TU | PSC_SPIEVNT_MD; - au_sync(); - hw->regs->psc_spipcr = PSC_SPIPCR_MS; - au_sync(); - } - - if (hw->rx_count >= hw->len) { - /* transfer completed successfully */ - au1550_spi_mask_ack_all(hw); - complete(&hw->master_done); - } - return IRQ_HANDLED; -} - -static int au1550_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t) -{ - struct au1550_spi *hw = spi_master_get_devdata(spi->master); - return hw->txrx_bufs(spi, t); -} - -static irqreturn_t au1550_spi_irq(int irq, void *dev) -{ - struct au1550_spi *hw = dev; - return hw->irq_callback(hw); -} - -static void au1550_spi_bits_handlers_set(struct au1550_spi *hw, int bpw) -{ - if (bpw <= 8) { - if (hw->usedma) { - hw->txrx_bufs = &au1550_spi_dma_txrxb; - hw->irq_callback = &au1550_spi_dma_irq_callback; - } else { - hw->rx_word = &au1550_spi_rx_word_8; - hw->tx_word = &au1550_spi_tx_word_8; - hw->txrx_bufs = &au1550_spi_pio_txrxb; - hw->irq_callback = &au1550_spi_pio_irq_callback; - } - } else if (bpw <= 16) { - hw->rx_word = &au1550_spi_rx_word_16; - hw->tx_word = &au1550_spi_tx_word_16; - hw->txrx_bufs = &au1550_spi_pio_txrxb; - hw->irq_callback = &au1550_spi_pio_irq_callback; - } else { - hw->rx_word = &au1550_spi_rx_word_32; - hw->tx_word = &au1550_spi_tx_word_32; - hw->txrx_bufs = &au1550_spi_pio_txrxb; - hw->irq_callback = &au1550_spi_pio_irq_callback; - } -} - -static void __init au1550_spi_setup_psc_as_spi(struct au1550_spi *hw) -{ - u32 stat, cfg; - - /* set up the PSC for SPI mode */ - hw->regs->psc_ctrl = PSC_CTRL_DISABLE; - au_sync(); - hw->regs->psc_sel = PSC_SEL_PS_SPIMODE; - au_sync(); - - hw->regs->psc_spicfg = 0; - au_sync(); - - hw->regs->psc_ctrl = PSC_CTRL_ENABLE; - au_sync(); - - do { - stat = hw->regs->psc_spistat; - au_sync(); - } while ((stat & PSC_SPISTAT_SR) == 0); - - - cfg = hw->usedma ? 0 : PSC_SPICFG_DD_DISABLE; - cfg |= PSC_SPICFG_SET_LEN(8); - cfg |= PSC_SPICFG_RT_FIFO8 | PSC_SPICFG_TT_FIFO8; - /* use minimal allowed brg and div values as initial setting: */ - cfg |= PSC_SPICFG_SET_BAUD(4) | PSC_SPICFG_SET_DIV(0); - -#ifdef AU1550_SPI_DEBUG_LOOPBACK - cfg |= PSC_SPICFG_LB; -#endif - - hw->regs->psc_spicfg = cfg; - au_sync(); - - au1550_spi_mask_ack_all(hw); - - hw->regs->psc_spicfg |= PSC_SPICFG_DE_ENABLE; - au_sync(); - - do { - stat = hw->regs->psc_spistat; - au_sync(); - } while ((stat & PSC_SPISTAT_DR) == 0); - - au1550_spi_reset_fifos(hw); -} - - -static int __init au1550_spi_probe(struct platform_device *pdev) -{ - struct au1550_spi *hw; - struct spi_master *master; - struct resource *r; - int err = 0; - - master = spi_alloc_master(&pdev->dev, sizeof(struct au1550_spi)); - if (master == NULL) { - dev_err(&pdev->dev, "No memory for spi_master\n"); - err = -ENOMEM; - goto err_nomem; - } - - /* the spi->mode bits understood by this driver: */ - master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST; - - hw = spi_master_get_devdata(master); - - hw->master = spi_master_get(master); - hw->pdata = pdev->dev.platform_data; - hw->dev = &pdev->dev; - - if (hw->pdata == NULL) { - dev_err(&pdev->dev, "No platform data supplied\n"); - err = -ENOENT; - goto err_no_pdata; - } - - r = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (!r) { - dev_err(&pdev->dev, "no IRQ\n"); - err = -ENODEV; - goto err_no_iores; - } - hw->irq = r->start; - - hw->usedma = 0; - r = platform_get_resource(pdev, IORESOURCE_DMA, 0); - if (r) { - hw->dma_tx_id = r->start; - r = platform_get_resource(pdev, IORESOURCE_DMA, 1); - if (r) { - hw->dma_rx_id = r->start; - if (usedma && ddma_memid) { - if (pdev->dev.dma_mask == NULL) - dev_warn(&pdev->dev, "no dma mask\n"); - else - hw->usedma = 1; - } - } - } - - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!r) { - dev_err(&pdev->dev, "no mmio resource\n"); - err = -ENODEV; - goto err_no_iores; - } - - hw->ioarea = request_mem_region(r->start, sizeof(psc_spi_t), - pdev->name); - if (!hw->ioarea) { - dev_err(&pdev->dev, "Cannot reserve iomem region\n"); - err = -ENXIO; - goto err_no_iores; - } - - hw->regs = (psc_spi_t __iomem *)ioremap(r->start, sizeof(psc_spi_t)); - if (!hw->regs) { - dev_err(&pdev->dev, "cannot ioremap\n"); - err = -ENXIO; - goto err_ioremap; - } - - platform_set_drvdata(pdev, hw); - - init_completion(&hw->master_done); - - hw->bitbang.master = hw->master; - hw->bitbang.setup_transfer = au1550_spi_setupxfer; - hw->bitbang.chipselect = au1550_spi_chipsel; - hw->bitbang.master->setup = au1550_spi_setup; - hw->bitbang.txrx_bufs = au1550_spi_txrx_bufs; - - if (hw->usedma) { - hw->dma_tx_ch = au1xxx_dbdma_chan_alloc(ddma_memid, - hw->dma_tx_id, NULL, (void *)hw); - if (hw->dma_tx_ch == 0) { - dev_err(&pdev->dev, - "Cannot allocate tx dma channel\n"); - err = -ENXIO; - goto err_no_txdma; - } - au1xxx_dbdma_set_devwidth(hw->dma_tx_ch, 8); - if (au1xxx_dbdma_ring_alloc(hw->dma_tx_ch, - AU1550_SPI_DBDMA_DESCRIPTORS) == 0) { - dev_err(&pdev->dev, - "Cannot allocate tx dma descriptors\n"); - err = -ENXIO; - goto err_no_txdma_descr; - } - - - hw->dma_rx_ch = au1xxx_dbdma_chan_alloc(hw->dma_rx_id, - ddma_memid, NULL, (void *)hw); - if (hw->dma_rx_ch == 0) { - dev_err(&pdev->dev, - "Cannot allocate rx dma channel\n"); - err = -ENXIO; - goto err_no_rxdma; - } - au1xxx_dbdma_set_devwidth(hw->dma_rx_ch, 8); - if (au1xxx_dbdma_ring_alloc(hw->dma_rx_ch, - AU1550_SPI_DBDMA_DESCRIPTORS) == 0) { - dev_err(&pdev->dev, - "Cannot allocate rx dma descriptors\n"); - err = -ENXIO; - goto err_no_rxdma_descr; - } - - err = au1550_spi_dma_rxtmp_alloc(hw, - AU1550_SPI_DMA_RXTMP_MINSIZE); - if (err < 0) { - dev_err(&pdev->dev, - "Cannot allocate initial rx dma tmp buffer\n"); - goto err_dma_rxtmp_alloc; - } - } - - au1550_spi_bits_handlers_set(hw, 8); - - err = request_irq(hw->irq, au1550_spi_irq, 0, pdev->name, hw); - if (err) { - dev_err(&pdev->dev, "Cannot claim IRQ\n"); - goto err_no_irq; - } - - master->bus_num = pdev->id; - master->num_chipselect = hw->pdata->num_chipselect; - - /* - * precompute valid range for spi freq - from au1550 datasheet: - * psc_tempclk = psc_mainclk / (2 << DIV) - * spiclk = psc_tempclk / (2 * (BRG + 1)) - * BRG valid range is 4..63 - * DIV valid range is 0..3 - * round the min and max frequencies to values that would still - * produce valid brg and div - */ - { - int min_div = (2 << 0) * (2 * (4 + 1)); - int max_div = (2 << 3) * (2 * (63 + 1)); - hw->freq_max = hw->pdata->mainclk_hz / min_div; - hw->freq_min = hw->pdata->mainclk_hz / (max_div + 1) + 1; - } - - au1550_spi_setup_psc_as_spi(hw); - - err = spi_bitbang_start(&hw->bitbang); - if (err) { - dev_err(&pdev->dev, "Failed to register SPI master\n"); - goto err_register; - } - - dev_info(&pdev->dev, - "spi master registered: bus_num=%d num_chipselect=%d\n", - master->bus_num, master->num_chipselect); - - return 0; - -err_register: - free_irq(hw->irq, hw); - -err_no_irq: - au1550_spi_dma_rxtmp_free(hw); - -err_dma_rxtmp_alloc: -err_no_rxdma_descr: - if (hw->usedma) - au1xxx_dbdma_chan_free(hw->dma_rx_ch); - -err_no_rxdma: -err_no_txdma_descr: - if (hw->usedma) - au1xxx_dbdma_chan_free(hw->dma_tx_ch); - -err_no_txdma: - iounmap((void __iomem *)hw->regs); - -err_ioremap: - release_resource(hw->ioarea); - kfree(hw->ioarea); - -err_no_iores: -err_no_pdata: - spi_master_put(hw->master); - -err_nomem: - return err; -} - -static int __exit au1550_spi_remove(struct platform_device *pdev) -{ - struct au1550_spi *hw = platform_get_drvdata(pdev); - - dev_info(&pdev->dev, "spi master remove: bus_num=%d\n", - hw->master->bus_num); - - spi_bitbang_stop(&hw->bitbang); - free_irq(hw->irq, hw); - iounmap((void __iomem *)hw->regs); - release_resource(hw->ioarea); - kfree(hw->ioarea); - - if (hw->usedma) { - au1550_spi_dma_rxtmp_free(hw); - au1xxx_dbdma_chan_free(hw->dma_rx_ch); - au1xxx_dbdma_chan_free(hw->dma_tx_ch); - } - - platform_set_drvdata(pdev, NULL); - - spi_master_put(hw->master); - return 0; -} - -/* work with hotplug and coldplug */ -MODULE_ALIAS("platform:au1550-spi"); - -static struct platform_driver au1550_spi_drv = { - .remove = __exit_p(au1550_spi_remove), - .driver = { - .name = "au1550-spi", - .owner = THIS_MODULE, - }, -}; - -static int __init au1550_spi_init(void) -{ - /* - * create memory device with 8 bits dev_devwidth - * needed for proper byte ordering to spi fifo - */ - if (usedma) { - ddma_memid = au1xxx_ddma_add_device(&au1550_spi_mem_dbdev); - if (!ddma_memid) - printk(KERN_ERR "au1550-spi: cannot add memory" - "dbdma device\n"); - } - return platform_driver_probe(&au1550_spi_drv, au1550_spi_probe); -} -module_init(au1550_spi_init); - -static void __exit au1550_spi_exit(void) -{ - if (usedma && ddma_memid) - au1xxx_ddma_del_device(ddma_memid); - platform_driver_unregister(&au1550_spi_drv); -} -module_exit(au1550_spi_exit); - -MODULE_DESCRIPTION("Au1550 PSC SPI Driver"); -MODULE_AUTHOR("Jan Nikitenko "); -MODULE_LICENSE("GPL"); diff --git a/drivers/spi/coldfire_qspi.c b/drivers/spi/coldfire_qspi.c deleted file mode 100644 index ae2cd1c..0000000 --- a/drivers/spi/coldfire_qspi.c +++ /dev/null @@ -1,642 +0,0 @@ -/* - * Freescale/Motorola Coldfire Queued SPI driver - * - * Copyright 2010 Steven King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA - * -*/ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#define DRIVER_NAME "mcfqspi" - -#define MCFQSPI_BUSCLK (MCF_BUSCLK / 2) - -#define MCFQSPI_QMR 0x00 -#define MCFQSPI_QMR_MSTR 0x8000 -#define MCFQSPI_QMR_CPOL 0x0200 -#define MCFQSPI_QMR_CPHA 0x0100 -#define MCFQSPI_QDLYR 0x04 -#define MCFQSPI_QDLYR_SPE 0x8000 -#define MCFQSPI_QWR 0x08 -#define MCFQSPI_QWR_HALT 0x8000 -#define MCFQSPI_QWR_WREN 0x4000 -#define MCFQSPI_QWR_CSIV 0x1000 -#define MCFQSPI_QIR 0x0C -#define MCFQSPI_QIR_WCEFB 0x8000 -#define MCFQSPI_QIR_ABRTB 0x4000 -#define MCFQSPI_QIR_ABRTL 0x1000 -#define MCFQSPI_QIR_WCEFE 0x0800 -#define MCFQSPI_QIR_ABRTE 0x0400 -#define MCFQSPI_QIR_SPIFE 0x0100 -#define MCFQSPI_QIR_WCEF 0x0008 -#define MCFQSPI_QIR_ABRT 0x0004 -#define MCFQSPI_QIR_SPIF 0x0001 -#define MCFQSPI_QAR 0x010 -#define MCFQSPI_QAR_TXBUF 0x00 -#define MCFQSPI_QAR_RXBUF 0x10 -#define MCFQSPI_QAR_CMDBUF 0x20 -#define MCFQSPI_QDR 0x014 -#define MCFQSPI_QCR 0x014 -#define MCFQSPI_QCR_CONT 0x8000 -#define MCFQSPI_QCR_BITSE 0x4000 -#define MCFQSPI_QCR_DT 0x2000 - -struct mcfqspi { - void __iomem *iobase; - int irq; - struct clk *clk; - struct mcfqspi_cs_control *cs_control; - - wait_queue_head_t waitq; - - struct work_struct work; - struct workqueue_struct *workq; - spinlock_t lock; - struct list_head msgq; -}; - -static void mcfqspi_wr_qmr(struct mcfqspi *mcfqspi, u16 val) -{ - writew(val, mcfqspi->iobase + MCFQSPI_QMR); -} - -static void mcfqspi_wr_qdlyr(struct mcfqspi *mcfqspi, u16 val) -{ - writew(val, mcfqspi->iobase + MCFQSPI_QDLYR); -} - -static u16 mcfqspi_rd_qdlyr(struct mcfqspi *mcfqspi) -{ - return readw(mcfqspi->iobase + MCFQSPI_QDLYR); -} - -static void mcfqspi_wr_qwr(struct mcfqspi *mcfqspi, u16 val) -{ - writew(val, mcfqspi->iobase + MCFQSPI_QWR); -} - -static void mcfqspi_wr_qir(struct mcfqspi *mcfqspi, u16 val) -{ - writew(val, mcfqspi->iobase + MCFQSPI_QIR); -} - -static void mcfqspi_wr_qar(struct mcfqspi *mcfqspi, u16 val) -{ - writew(val, mcfqspi->iobase + MCFQSPI_QAR); -} - -static void mcfqspi_wr_qdr(struct mcfqspi *mcfqspi, u16 val) -{ - writew(val, mcfqspi->iobase + MCFQSPI_QDR); -} - -static u16 mcfqspi_rd_qdr(struct mcfqspi *mcfqspi) -{ - return readw(mcfqspi->iobase + MCFQSPI_QDR); -} - -static void mcfqspi_cs_select(struct mcfqspi *mcfqspi, u8 chip_select, - bool cs_high) -{ - mcfqspi->cs_control->select(mcfqspi->cs_control, chip_select, cs_high); -} - -static void mcfqspi_cs_deselect(struct mcfqspi *mcfqspi, u8 chip_select, - bool cs_high) -{ - mcfqspi->cs_control->deselect(mcfqspi->cs_control, chip_select, cs_high); -} - -static int mcfqspi_cs_setup(struct mcfqspi *mcfqspi) -{ - return (mcfqspi->cs_control && mcfqspi->cs_control->setup) ? - mcfqspi->cs_control->setup(mcfqspi->cs_control) : 0; -} - -static void mcfqspi_cs_teardown(struct mcfqspi *mcfqspi) -{ - if (mcfqspi->cs_control && mcfqspi->cs_control->teardown) - mcfqspi->cs_control->teardown(mcfqspi->cs_control); -} - -static u8 mcfqspi_qmr_baud(u32 speed_hz) -{ - return clamp((MCFQSPI_BUSCLK + speed_hz - 1) / speed_hz, 2u, 255u); -} - -static bool mcfqspi_qdlyr_spe(struct mcfqspi *mcfqspi) -{ - return mcfqspi_rd_qdlyr(mcfqspi) & MCFQSPI_QDLYR_SPE; -} - -static irqreturn_t mcfqspi_irq_handler(int this_irq, void *dev_id) -{ - struct mcfqspi *mcfqspi = dev_id; - - /* clear interrupt */ - mcfqspi_wr_qir(mcfqspi, MCFQSPI_QIR_SPIFE | MCFQSPI_QIR_SPIF); - wake_up(&mcfqspi->waitq); - - return IRQ_HANDLED; -} - -static void mcfqspi_transfer_msg8(struct mcfqspi *mcfqspi, unsigned count, - const u8 *txbuf, u8 *rxbuf) -{ - unsigned i, n, offset = 0; - - n = min(count, 16u); - - mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_CMDBUF); - for (i = 0; i < n; ++i) - mcfqspi_wr_qdr(mcfqspi, MCFQSPI_QCR_BITSE); - - mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_TXBUF); - if (txbuf) - for (i = 0; i < n; ++i) - mcfqspi_wr_qdr(mcfqspi, *txbuf++); - else - for (i = 0; i < count; ++i) - mcfqspi_wr_qdr(mcfqspi, 0); - - count -= n; - if (count) { - u16 qwr = 0xf08; - mcfqspi_wr_qwr(mcfqspi, 0x700); - mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); - - do { - wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi)); - mcfqspi_wr_qwr(mcfqspi, qwr); - mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); - if (rxbuf) { - mcfqspi_wr_qar(mcfqspi, - MCFQSPI_QAR_RXBUF + offset); - for (i = 0; i < 8; ++i) - *rxbuf++ = mcfqspi_rd_qdr(mcfqspi); - } - n = min(count, 8u); - if (txbuf) { - mcfqspi_wr_qar(mcfqspi, - MCFQSPI_QAR_TXBUF + offset); - for (i = 0; i < n; ++i) - mcfqspi_wr_qdr(mcfqspi, *txbuf++); - } - qwr = (offset ? 0x808 : 0) + ((n - 1) << 8); - offset ^= 8; - count -= n; - } while (count); - wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi)); - mcfqspi_wr_qwr(mcfqspi, qwr); - mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); - if (rxbuf) { - mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_RXBUF + offset); - for (i = 0; i < 8; ++i) - *rxbuf++ = mcfqspi_rd_qdr(mcfqspi); - offset ^= 8; - } - } else { - mcfqspi_wr_qwr(mcfqspi, (n - 1) << 8); - mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); - } - wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi)); - if (rxbuf) { - mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_RXBUF + offset); - for (i = 0; i < n; ++i) - *rxbuf++ = mcfqspi_rd_qdr(mcfqspi); - } -} - -static void mcfqspi_transfer_msg16(struct mcfqspi *mcfqspi, unsigned count, - const u16 *txbuf, u16 *rxbuf) -{ - unsigned i, n, offset = 0; - - n = min(count, 16u); - - mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_CMDBUF); - for (i = 0; i < n; ++i) - mcfqspi_wr_qdr(mcfqspi, MCFQSPI_QCR_BITSE); - - mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_TXBUF); - if (txbuf) - for (i = 0; i < n; ++i) - mcfqspi_wr_qdr(mcfqspi, *txbuf++); - else - for (i = 0; i < count; ++i) - mcfqspi_wr_qdr(mcfqspi, 0); - - count -= n; - if (count) { - u16 qwr = 0xf08; - mcfqspi_wr_qwr(mcfqspi, 0x700); - mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); - - do { - wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi)); - mcfqspi_wr_qwr(mcfqspi, qwr); - mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); - if (rxbuf) { - mcfqspi_wr_qar(mcfqspi, - MCFQSPI_QAR_RXBUF + offset); - for (i = 0; i < 8; ++i) - *rxbuf++ = mcfqspi_rd_qdr(mcfqspi); - } - n = min(count, 8u); - if (txbuf) { - mcfqspi_wr_qar(mcfqspi, - MCFQSPI_QAR_TXBUF + offset); - for (i = 0; i < n; ++i) - mcfqspi_wr_qdr(mcfqspi, *txbuf++); - } - qwr = (offset ? 0x808 : 0x000) + ((n - 1) << 8); - offset ^= 8; - count -= n; - } while (count); - wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi)); - mcfqspi_wr_qwr(mcfqspi, qwr); - mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); - if (rxbuf) { - mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_RXBUF + offset); - for (i = 0; i < 8; ++i) - *rxbuf++ = mcfqspi_rd_qdr(mcfqspi); - offset ^= 8; - } - } else { - mcfqspi_wr_qwr(mcfqspi, (n - 1) << 8); - mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); - } - wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi)); - if (rxbuf) { - mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_RXBUF + offset); - for (i = 0; i < n; ++i) - *rxbuf++ = mcfqspi_rd_qdr(mcfqspi); - } -} - -static void mcfqspi_work(struct work_struct *work) -{ - struct mcfqspi *mcfqspi = container_of(work, struct mcfqspi, work); - unsigned long flags; - - spin_lock_irqsave(&mcfqspi->lock, flags); - while (!list_empty(&mcfqspi->msgq)) { - struct spi_message *msg; - struct spi_device *spi; - struct spi_transfer *xfer; - int status = 0; - - msg = container_of(mcfqspi->msgq.next, struct spi_message, - queue); - - list_del_init(&msg->queue); - spin_unlock_irqrestore(&mcfqspi->lock, flags); - - spi = msg->spi; - - list_for_each_entry(xfer, &msg->transfers, transfer_list) { - bool cs_high = spi->mode & SPI_CS_HIGH; - u16 qmr = MCFQSPI_QMR_MSTR; - - if (xfer->bits_per_word) - qmr |= xfer->bits_per_word << 10; - else - qmr |= spi->bits_per_word << 10; - if (spi->mode & SPI_CPHA) - qmr |= MCFQSPI_QMR_CPHA; - if (spi->mode & SPI_CPOL) - qmr |= MCFQSPI_QMR_CPOL; - if (xfer->speed_hz) - qmr |= mcfqspi_qmr_baud(xfer->speed_hz); - else - qmr |= mcfqspi_qmr_baud(spi->max_speed_hz); - mcfqspi_wr_qmr(mcfqspi, qmr); - - mcfqspi_cs_select(mcfqspi, spi->chip_select, cs_high); - - mcfqspi_wr_qir(mcfqspi, MCFQSPI_QIR_SPIFE); - if ((xfer->bits_per_word ? xfer->bits_per_word : - spi->bits_per_word) == 8) - mcfqspi_transfer_msg8(mcfqspi, xfer->len, - xfer->tx_buf, - xfer->rx_buf); - else - mcfqspi_transfer_msg16(mcfqspi, xfer->len / 2, - xfer->tx_buf, - xfer->rx_buf); - mcfqspi_wr_qir(mcfqspi, 0); - - if (xfer->delay_usecs) - udelay(xfer->delay_usecs); - if (xfer->cs_change) { - if (!list_is_last(&xfer->transfer_list, - &msg->transfers)) - mcfqspi_cs_deselect(mcfqspi, - spi->chip_select, - cs_high); - } else { - if (list_is_last(&xfer->transfer_list, - &msg->transfers)) - mcfqspi_cs_deselect(mcfqspi, - spi->chip_select, - cs_high); - } - msg->actual_length += xfer->len; - } - msg->status = status; - msg->complete(msg->context); - - spin_lock_irqsave(&mcfqspi->lock, flags); - } - spin_unlock_irqrestore(&mcfqspi->lock, flags); -} - -static int mcfqspi_transfer(struct spi_device *spi, struct spi_message *msg) -{ - struct mcfqspi *mcfqspi; - struct spi_transfer *xfer; - unsigned long flags; - - mcfqspi = spi_master_get_devdata(spi->master); - - list_for_each_entry(xfer, &msg->transfers, transfer_list) { - if (xfer->bits_per_word && ((xfer->bits_per_word < 8) - || (xfer->bits_per_word > 16))) { - dev_dbg(&spi->dev, - "%d bits per word is not supported\n", - xfer->bits_per_word); - goto fail; - } - if (xfer->speed_hz) { - u32 real_speed = MCFQSPI_BUSCLK / - mcfqspi_qmr_baud(xfer->speed_hz); - if (real_speed != xfer->speed_hz) - dev_dbg(&spi->dev, - "using speed %d instead of %d\n", - real_speed, xfer->speed_hz); - } - } - msg->status = -EINPROGRESS; - msg->actual_length = 0; - - spin_lock_irqsave(&mcfqspi->lock, flags); - list_add_tail(&msg->queue, &mcfqspi->msgq); - queue_work(mcfqspi->workq, &mcfqspi->work); - spin_unlock_irqrestore(&mcfqspi->lock, flags); - - return 0; -fail: - msg->status = -EINVAL; - return -EINVAL; -} - -static int mcfqspi_setup(struct spi_device *spi) -{ - if ((spi->bits_per_word < 8) || (spi->bits_per_word > 16)) { - dev_dbg(&spi->dev, "%d bits per word is not supported\n", - spi->bits_per_word); - return -EINVAL; - } - if (spi->chip_select >= spi->master->num_chipselect) { - dev_dbg(&spi->dev, "%d chip select is out of range\n", - spi->chip_select); - return -EINVAL; - } - - mcfqspi_cs_deselect(spi_master_get_devdata(spi->master), - spi->chip_select, spi->mode & SPI_CS_HIGH); - - dev_dbg(&spi->dev, - "bits per word %d, chip select %d, speed %d KHz\n", - spi->bits_per_word, spi->chip_select, - (MCFQSPI_BUSCLK / mcfqspi_qmr_baud(spi->max_speed_hz)) - / 1000); - - return 0; -} - -static int __devinit mcfqspi_probe(struct platform_device *pdev) -{ - struct spi_master *master; - struct mcfqspi *mcfqspi; - struct resource *res; - struct mcfqspi_platform_data *pdata; - int status; - - master = spi_alloc_master(&pdev->dev, sizeof(*mcfqspi)); - if (master == NULL) { - dev_dbg(&pdev->dev, "spi_alloc_master failed\n"); - return -ENOMEM; - } - - mcfqspi = spi_master_get_devdata(master); - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_dbg(&pdev->dev, "platform_get_resource failed\n"); - status = -ENXIO; - goto fail0; - } - - if (!request_mem_region(res->start, resource_size(res), pdev->name)) { - dev_dbg(&pdev->dev, "request_mem_region failed\n"); - status = -EBUSY; - goto fail0; - } - - mcfqspi->iobase = ioremap(res->start, resource_size(res)); - if (!mcfqspi->iobase) { - dev_dbg(&pdev->dev, "ioremap failed\n"); - status = -ENOMEM; - goto fail1; - } - - mcfqspi->irq = platform_get_irq(pdev, 0); - if (mcfqspi->irq < 0) { - dev_dbg(&pdev->dev, "platform_get_irq failed\n"); - status = -ENXIO; - goto fail2; - } - - status = request_irq(mcfqspi->irq, mcfqspi_irq_handler, IRQF_DISABLED, - pdev->name, mcfqspi); - if (status) { - dev_dbg(&pdev->dev, "request_irq failed\n"); - goto fail2; - } - - mcfqspi->clk = clk_get(&pdev->dev, "qspi_clk"); - if (IS_ERR(mcfqspi->clk)) { - dev_dbg(&pdev->dev, "clk_get failed\n"); - status = PTR_ERR(mcfqspi->clk); - goto fail3; - } - clk_enable(mcfqspi->clk); - - mcfqspi->workq = create_singlethread_workqueue(dev_name(master->dev.parent)); - if (!mcfqspi->workq) { - dev_dbg(&pdev->dev, "create_workqueue failed\n"); - status = -ENOMEM; - goto fail4; - } - INIT_WORK(&mcfqspi->work, mcfqspi_work); - spin_lock_init(&mcfqspi->lock); - INIT_LIST_HEAD(&mcfqspi->msgq); - init_waitqueue_head(&mcfqspi->waitq); - - pdata = pdev->dev.platform_data; - if (!pdata) { - dev_dbg(&pdev->dev, "platform data is missing\n"); - goto fail5; - } - master->bus_num = pdata->bus_num; - master->num_chipselect = pdata->num_chipselect; - - mcfqspi->cs_control = pdata->cs_control; - status = mcfqspi_cs_setup(mcfqspi); - if (status) { - dev_dbg(&pdev->dev, "error initializing cs_control\n"); - goto fail5; - } - - master->mode_bits = SPI_CS_HIGH | SPI_CPOL | SPI_CPHA; - master->setup = mcfqspi_setup; - master->transfer = mcfqspi_transfer; - - platform_set_drvdata(pdev, master); - - status = spi_register_master(master); - if (status) { - dev_dbg(&pdev->dev, "spi_register_master failed\n"); - goto fail6; - } - dev_info(&pdev->dev, "Coldfire QSPI bus driver\n"); - - return 0; - -fail6: - mcfqspi_cs_teardown(mcfqspi); -fail5: - destroy_workqueue(mcfqspi->workq); -fail4: - clk_disable(mcfqspi->clk); - clk_put(mcfqspi->clk); -fail3: - free_irq(mcfqspi->irq, mcfqspi); -fail2: - iounmap(mcfqspi->iobase); -fail1: - release_mem_region(res->start, resource_size(res)); -fail0: - spi_master_put(master); - - dev_dbg(&pdev->dev, "Coldfire QSPI probe failed\n"); - - return status; -} - -static int __devexit mcfqspi_remove(struct platform_device *pdev) -{ - struct spi_master *master = platform_get_drvdata(pdev); - struct mcfqspi *mcfqspi = spi_master_get_devdata(master); - struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - - /* disable the hardware (set the baud rate to 0) */ - mcfqspi_wr_qmr(mcfqspi, MCFQSPI_QMR_MSTR); - - platform_set_drvdata(pdev, NULL); - mcfqspi_cs_teardown(mcfqspi); - destroy_workqueue(mcfqspi->workq); - clk_disable(mcfqspi->clk); - clk_put(mcfqspi->clk); - free_irq(mcfqspi->irq, mcfqspi); - iounmap(mcfqspi->iobase); - release_mem_region(res->start, resource_size(res)); - spi_unregister_master(master); - spi_master_put(master); - - return 0; -} - -#ifdef CONFIG_PM - -static int mcfqspi_suspend(struct device *dev) -{ - struct mcfqspi *mcfqspi = platform_get_drvdata(to_platform_device(dev)); - - clk_disable(mcfqspi->clk); - - return 0; -} - -static int mcfqspi_resume(struct device *dev) -{ - struct mcfqspi *mcfqspi = platform_get_drvdata(to_platform_device(dev)); - - clk_enable(mcfqspi->clk); - - return 0; -} - -static struct dev_pm_ops mcfqspi_dev_pm_ops = { - .suspend = mcfqspi_suspend, - .resume = mcfqspi_resume, -}; - -#define MCFQSPI_DEV_PM_OPS (&mcfqspi_dev_pm_ops) -#else -#define MCFQSPI_DEV_PM_OPS NULL -#endif - -static struct platform_driver mcfqspi_driver = { - .driver.name = DRIVER_NAME, - .driver.owner = THIS_MODULE, - .driver.pm = MCFQSPI_DEV_PM_OPS, - .remove = __devexit_p(mcfqspi_remove), -}; - -static int __init mcfqspi_init(void) -{ - return platform_driver_probe(&mcfqspi_driver, mcfqspi_probe); -} -module_init(mcfqspi_init); - -static void __exit mcfqspi_exit(void) -{ - platform_driver_unregister(&mcfqspi_driver); -} -module_exit(mcfqspi_exit); - -MODULE_AUTHOR("Steven King "); -MODULE_DESCRIPTION("Coldfire QSPI Controller Driver"); -MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:" DRIVER_NAME); diff --git a/drivers/spi/davinci_spi.c b/drivers/spi/davinci_spi.c deleted file mode 100644 index 1f0ed80..0000000 --- a/drivers/spi/davinci_spi.c +++ /dev/null @@ -1,1030 +0,0 @@ -/* - * Copyright (C) 2009 Texas Instruments. - * Copyright (C) 2010 EF Johnson Technologies - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#define SPI_NO_RESOURCE ((resource_size_t)-1) - -#define SPI_MAX_CHIPSELECT 2 - -#define CS_DEFAULT 0xFF - -#define SPIFMT_PHASE_MASK BIT(16) -#define SPIFMT_POLARITY_MASK BIT(17) -#define SPIFMT_DISTIMER_MASK BIT(18) -#define SPIFMT_SHIFTDIR_MASK BIT(20) -#define SPIFMT_WAITENA_MASK BIT(21) -#define SPIFMT_PARITYENA_MASK BIT(22) -#define SPIFMT_ODD_PARITY_MASK BIT(23) -#define SPIFMT_WDELAY_MASK 0x3f000000u -#define SPIFMT_WDELAY_SHIFT 24 -#define SPIFMT_PRESCALE_SHIFT 8 - -/* SPIPC0 */ -#define SPIPC0_DIFUN_MASK BIT(11) /* MISO */ -#define SPIPC0_DOFUN_MASK BIT(10) /* MOSI */ -#define SPIPC0_CLKFUN_MASK BIT(9) /* CLK */ -#define SPIPC0_SPIENA_MASK BIT(8) /* nREADY */ - -#define SPIINT_MASKALL 0x0101035F -#define SPIINT_MASKINT 0x0000015F -#define SPI_INTLVL_1 0x000001FF -#define SPI_INTLVL_0 0x00000000 - -/* SPIDAT1 (upper 16 bit defines) */ -#define SPIDAT1_CSHOLD_MASK BIT(12) - -/* SPIGCR1 */ -#define SPIGCR1_CLKMOD_MASK BIT(1) -#define SPIGCR1_MASTER_MASK BIT(0) -#define SPIGCR1_POWERDOWN_MASK BIT(8) -#define SPIGCR1_LOOPBACK_MASK BIT(16) -#define SPIGCR1_SPIENA_MASK BIT(24) - -/* SPIBUF */ -#define SPIBUF_TXFULL_MASK BIT(29) -#define SPIBUF_RXEMPTY_MASK BIT(31) - -/* SPIDELAY */ -#define SPIDELAY_C2TDELAY_SHIFT 24 -#define SPIDELAY_C2TDELAY_MASK (0xFF << SPIDELAY_C2TDELAY_SHIFT) -#define SPIDELAY_T2CDELAY_SHIFT 16 -#define SPIDELAY_T2CDELAY_MASK (0xFF << SPIDELAY_T2CDELAY_SHIFT) -#define SPIDELAY_T2EDELAY_SHIFT 8 -#define SPIDELAY_T2EDELAY_MASK (0xFF << SPIDELAY_T2EDELAY_SHIFT) -#define SPIDELAY_C2EDELAY_SHIFT 0 -#define SPIDELAY_C2EDELAY_MASK 0xFF - -/* Error Masks */ -#define SPIFLG_DLEN_ERR_MASK BIT(0) -#define SPIFLG_TIMEOUT_MASK BIT(1) -#define SPIFLG_PARERR_MASK BIT(2) -#define SPIFLG_DESYNC_MASK BIT(3) -#define SPIFLG_BITERR_MASK BIT(4) -#define SPIFLG_OVRRUN_MASK BIT(6) -#define SPIFLG_BUF_INIT_ACTIVE_MASK BIT(24) -#define SPIFLG_ERROR_MASK (SPIFLG_DLEN_ERR_MASK \ - | SPIFLG_TIMEOUT_MASK | SPIFLG_PARERR_MASK \ - | SPIFLG_DESYNC_MASK | SPIFLG_BITERR_MASK \ - | SPIFLG_OVRRUN_MASK) - -#define SPIINT_DMA_REQ_EN BIT(16) - -/* SPI Controller registers */ -#define SPIGCR0 0x00 -#define SPIGCR1 0x04 -#define SPIINT 0x08 -#define SPILVL 0x0c -#define SPIFLG 0x10 -#define SPIPC0 0x14 -#define SPIDAT1 0x3c -#define SPIBUF 0x40 -#define SPIDELAY 0x48 -#define SPIDEF 0x4c -#define SPIFMT0 0x50 - -/* We have 2 DMA channels per CS, one for RX and one for TX */ -struct davinci_spi_dma { - int tx_channel; - int rx_channel; - int dummy_param_slot; - enum dma_event_q eventq; -}; - -/* SPI Controller driver's private data. */ -struct davinci_spi { - struct spi_bitbang bitbang; - struct clk *clk; - - u8 version; - resource_size_t pbase; - void __iomem *base; - u32 irq; - struct completion done; - - const void *tx; - void *rx; -#define SPI_TMP_BUFSZ (SMP_CACHE_BYTES + 1) - u8 rx_tmp_buf[SPI_TMP_BUFSZ]; - int rcount; - int wcount; - struct davinci_spi_dma dma; - struct davinci_spi_platform_data *pdata; - - void (*get_rx)(u32 rx_data, struct davinci_spi *); - u32 (*get_tx)(struct davinci_spi *); - - u8 bytes_per_word[SPI_MAX_CHIPSELECT]; -}; - -static struct davinci_spi_config davinci_spi_default_cfg; - -static void davinci_spi_rx_buf_u8(u32 data, struct davinci_spi *dspi) -{ - if (dspi->rx) { - u8 *rx = dspi->rx; - *rx++ = (u8)data; - dspi->rx = rx; - } -} - -static void davinci_spi_rx_buf_u16(u32 data, struct davinci_spi *dspi) -{ - if (dspi->rx) { - u16 *rx = dspi->rx; - *rx++ = (u16)data; - dspi->rx = rx; - } -} - -static u32 davinci_spi_tx_buf_u8(struct davinci_spi *dspi) -{ - u32 data = 0; - if (dspi->tx) { - const u8 *tx = dspi->tx; - data = *tx++; - dspi->tx = tx; - } - return data; -} - -static u32 davinci_spi_tx_buf_u16(struct davinci_spi *dspi) -{ - u32 data = 0; - if (dspi->tx) { - const u16 *tx = dspi->tx; - data = *tx++; - dspi->tx = tx; - } - return data; -} - -static inline void set_io_bits(void __iomem *addr, u32 bits) -{ - u32 v = ioread32(addr); - - v |= bits; - iowrite32(v, addr); -} - -static inline void clear_io_bits(void __iomem *addr, u32 bits) -{ - u32 v = ioread32(addr); - - v &= ~bits; - iowrite32(v, addr); -} - -/* - * Interface to control the chip select signal - */ -static void davinci_spi_chipselect(struct spi_device *spi, int value) -{ - struct davinci_spi *dspi; - struct davinci_spi_platform_data *pdata; - u8 chip_sel = spi->chip_select; - u16 spidat1 = CS_DEFAULT; - bool gpio_chipsel = false; - - dspi = spi_master_get_devdata(spi->master); - pdata = dspi->pdata; - - if (pdata->chip_sel && chip_sel < pdata->num_chipselect && - pdata->chip_sel[chip_sel] != SPI_INTERN_CS) - gpio_chipsel = true; - - /* - * Board specific chip select logic decides the polarity and cs - * line for the controller - */ - if (gpio_chipsel) { - if (value == BITBANG_CS_ACTIVE) - gpio_set_value(pdata->chip_sel[chip_sel], 0); - else - gpio_set_value(pdata->chip_sel[chip_sel], 1); - } else { - if (value == BITBANG_CS_ACTIVE) { - spidat1 |= SPIDAT1_CSHOLD_MASK; - spidat1 &= ~(0x1 << chip_sel); - } - - iowrite16(spidat1, dspi->base + SPIDAT1 + 2); - } -} - -/** - * davinci_spi_get_prescale - Calculates the correct prescale value - * @maxspeed_hz: the maximum rate the SPI clock can run at - * - * This function calculates the prescale value that generates a clock rate - * less than or equal to the specified maximum. - * - * Returns: calculated prescale - 1 for easy programming into SPI registers - * or negative error number if valid prescalar cannot be updated. - */ -static inline int davinci_spi_get_prescale(struct davinci_spi *dspi, - u32 max_speed_hz) -{ - int ret; - - ret = DIV_ROUND_UP(clk_get_rate(dspi->clk), max_speed_hz); - - if (ret < 3 || ret > 256) - return -EINVAL; - - return ret - 1; -} - -/** - * davinci_spi_setup_transfer - This functions will determine transfer method - * @spi: spi device on which data transfer to be done - * @t: spi transfer in which transfer info is filled - * - * This function determines data transfer method (8/16/32 bit transfer). - * It will also set the SPI Clock Control register according to - * SPI slave device freq. - */ -static int davinci_spi_setup_transfer(struct spi_device *spi, - struct spi_transfer *t) -{ - - struct davinci_spi *dspi; - struct davinci_spi_config *spicfg; - u8 bits_per_word = 0; - u32 hz = 0, spifmt = 0, prescale = 0; - - dspi = spi_master_get_devdata(spi->master); - spicfg = (struct davinci_spi_config *)spi->controller_data; - if (!spicfg) - spicfg = &davinci_spi_default_cfg; - - if (t) { - bits_per_word = t->bits_per_word; - hz = t->speed_hz; - } - - /* if bits_per_word is not set then set it default */ - if (!bits_per_word) - bits_per_word = spi->bits_per_word; - - /* - * Assign function pointer to appropriate transfer method - * 8bit, 16bit or 32bit transfer - */ - if (bits_per_word <= 8 && bits_per_word >= 2) { - dspi->get_rx = davinci_spi_rx_buf_u8; - dspi->get_tx = davinci_spi_tx_buf_u8; - dspi->bytes_per_word[spi->chip_select] = 1; - } else if (bits_per_word <= 16 && bits_per_word >= 2) { - dspi->get_rx = davinci_spi_rx_buf_u16; - dspi->get_tx = davinci_spi_tx_buf_u16; - dspi->bytes_per_word[spi->chip_select] = 2; - } else - return -EINVAL; - - if (!hz) - hz = spi->max_speed_hz; - - /* Set up SPIFMTn register, unique to this chipselect. */ - - prescale = davinci_spi_get_prescale(dspi, hz); - if (prescale < 0) - return prescale; - - spifmt = (prescale << SPIFMT_PRESCALE_SHIFT) | (bits_per_word & 0x1f); - - if (spi->mode & SPI_LSB_FIRST) - spifmt |= SPIFMT_SHIFTDIR_MASK; - - if (spi->mode & SPI_CPOL) - spifmt |= SPIFMT_POLARITY_MASK; - - if (!(spi->mode & SPI_CPHA)) - spifmt |= SPIFMT_PHASE_MASK; - - /* - * Version 1 hardware supports two basic SPI modes: - * - Standard SPI mode uses 4 pins, with chipselect - * - 3 pin SPI is a 4 pin variant without CS (SPI_NO_CS) - * (distinct from SPI_3WIRE, with just one data wire; - * or similar variants without MOSI or without MISO) - * - * Version 2 hardware supports an optional handshaking signal, - * so it can support two more modes: - * - 5 pin SPI variant is standard SPI plus SPI_READY - * - 4 pin with enable is (SPI_READY | SPI_NO_CS) - */ - - if (dspi->version == SPI_VERSION_2) { - - u32 delay = 0; - - spifmt |= ((spicfg->wdelay << SPIFMT_WDELAY_SHIFT) - & SPIFMT_WDELAY_MASK); - - if (spicfg->odd_parity) - spifmt |= SPIFMT_ODD_PARITY_MASK; - - if (spicfg->parity_enable) - spifmt |= SPIFMT_PARITYENA_MASK; - - if (spicfg->timer_disable) { - spifmt |= SPIFMT_DISTIMER_MASK; - } else { - delay |= (spicfg->c2tdelay << SPIDELAY_C2TDELAY_SHIFT) - & SPIDELAY_C2TDELAY_MASK; - delay |= (spicfg->t2cdelay << SPIDELAY_T2CDELAY_SHIFT) - & SPIDELAY_T2CDELAY_MASK; - } - - if (spi->mode & SPI_READY) { - spifmt |= SPIFMT_WAITENA_MASK; - delay |= (spicfg->t2edelay << SPIDELAY_T2EDELAY_SHIFT) - & SPIDELAY_T2EDELAY_MASK; - delay |= (spicfg->c2edelay << SPIDELAY_C2EDELAY_SHIFT) - & SPIDELAY_C2EDELAY_MASK; - } - - iowrite32(delay, dspi->base + SPIDELAY); - } - - iowrite32(spifmt, dspi->base + SPIFMT0); - - return 0; -} - -/** - * davinci_spi_setup - This functions will set default transfer method - * @spi: spi device on which data transfer to be done - * - * This functions sets the default transfer method. - */ -static int davinci_spi_setup(struct spi_device *spi) -{ - int retval = 0; - struct davinci_spi *dspi; - struct davinci_spi_platform_data *pdata; - - dspi = spi_master_get_devdata(spi->master); - pdata = dspi->pdata; - - /* if bits per word length is zero then set it default 8 */ - if (!spi->bits_per_word) - spi->bits_per_word = 8; - - if (!(spi->mode & SPI_NO_CS)) { - if ((pdata->chip_sel == NULL) || - (pdata->chip_sel[spi->chip_select] == SPI_INTERN_CS)) - set_io_bits(dspi->base + SPIPC0, 1 << spi->chip_select); - - } - - if (spi->mode & SPI_READY) - set_io_bits(dspi->base + SPIPC0, SPIPC0_SPIENA_MASK); - - if (spi->mode & SPI_LOOP) - set_io_bits(dspi->base + SPIGCR1, SPIGCR1_LOOPBACK_MASK); - else - clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_LOOPBACK_MASK); - - return retval; -} - -static int davinci_spi_check_error(struct davinci_spi *dspi, int int_status) -{ - struct device *sdev = dspi->bitbang.master->dev.parent; - - if (int_status & SPIFLG_TIMEOUT_MASK) { - dev_dbg(sdev, "SPI Time-out Error\n"); - return -ETIMEDOUT; - } - if (int_status & SPIFLG_DESYNC_MASK) { - dev_dbg(sdev, "SPI Desynchronization Error\n"); - return -EIO; - } - if (int_status & SPIFLG_BITERR_MASK) { - dev_dbg(sdev, "SPI Bit error\n"); - return -EIO; - } - - if (dspi->version == SPI_VERSION_2) { - if (int_status & SPIFLG_DLEN_ERR_MASK) { - dev_dbg(sdev, "SPI Data Length Error\n"); - return -EIO; - } - if (int_status & SPIFLG_PARERR_MASK) { - dev_dbg(sdev, "SPI Parity Error\n"); - return -EIO; - } - if (int_status & SPIFLG_OVRRUN_MASK) { - dev_dbg(sdev, "SPI Data Overrun error\n"); - return -EIO; - } - if (int_status & SPIFLG_BUF_INIT_ACTIVE_MASK) { - dev_dbg(sdev, "SPI Buffer Init Active\n"); - return -EBUSY; - } - } - - return 0; -} - -/** - * davinci_spi_process_events - check for and handle any SPI controller events - * @dspi: the controller data - * - * This function will check the SPIFLG register and handle any events that are - * detected there - */ -static int davinci_spi_process_events(struct davinci_spi *dspi) -{ - u32 buf, status, errors = 0, spidat1; - - buf = ioread32(dspi->base + SPIBUF); - - if (dspi->rcount > 0 && !(buf & SPIBUF_RXEMPTY_MASK)) { - dspi->get_rx(buf & 0xFFFF, dspi); - dspi->rcount--; - } - - status = ioread32(dspi->base + SPIFLG); - - if (unlikely(status & SPIFLG_ERROR_MASK)) { - errors = status & SPIFLG_ERROR_MASK; - goto out; - } - - if (dspi->wcount > 0 && !(buf & SPIBUF_TXFULL_MASK)) { - spidat1 = ioread32(dspi->base + SPIDAT1); - dspi->wcount--; - spidat1 &= ~0xFFFF; - spidat1 |= 0xFFFF & dspi->get_tx(dspi); - iowrite32(spidat1, dspi->base + SPIDAT1); - } - -out: - return errors; -} - -static void davinci_spi_dma_callback(unsigned lch, u16 status, void *data) -{ - struct davinci_spi *dspi = data; - struct davinci_spi_dma *dma = &dspi->dma; - - edma_stop(lch); - - if (status == DMA_COMPLETE) { - if (lch == dma->rx_channel) - dspi->rcount = 0; - if (lch == dma->tx_channel) - dspi->wcount = 0; - } - - if ((!dspi->wcount && !dspi->rcount) || (status != DMA_COMPLETE)) - complete(&dspi->done); -} - -/** - * davinci_spi_bufs - functions which will handle transfer data - * @spi: spi device on which data transfer to be done - * @t: spi transfer in which transfer info is filled - * - * This function will put data to be transferred into data register - * of SPI controller and then wait until the completion will be marked - * by the IRQ Handler. - */ -static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t) -{ - struct davinci_spi *dspi; - int data_type, ret; - u32 tx_data, spidat1; - u32 errors = 0; - struct davinci_spi_config *spicfg; - struct davinci_spi_platform_data *pdata; - unsigned uninitialized_var(rx_buf_count); - struct device *sdev; - - dspi = spi_master_get_devdata(spi->master); - pdata = dspi->pdata; - spicfg = (struct davinci_spi_config *)spi->controller_data; - if (!spicfg) - spicfg = &davinci_spi_default_cfg; - sdev = dspi->bitbang.master->dev.parent; - - /* convert len to words based on bits_per_word */ - data_type = dspi->bytes_per_word[spi->chip_select]; - - dspi->tx = t->tx_buf; - dspi->rx = t->rx_buf; - dspi->wcount = t->len / data_type; - dspi->rcount = dspi->wcount; - - spidat1 = ioread32(dspi->base + SPIDAT1); - - clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK); - set_io_bits(dspi->base + SPIGCR1, SPIGCR1_SPIENA_MASK); - - INIT_COMPLETION(dspi->done); - - if (spicfg->io_type == SPI_IO_TYPE_INTR) - set_io_bits(dspi->base + SPIINT, SPIINT_MASKINT); - - if (spicfg->io_type != SPI_IO_TYPE_DMA) { - /* start the transfer */ - dspi->wcount--; - tx_data = dspi->get_tx(dspi); - spidat1 &= 0xFFFF0000; - spidat1 |= tx_data & 0xFFFF; - iowrite32(spidat1, dspi->base + SPIDAT1); - } else { - struct davinci_spi_dma *dma; - unsigned long tx_reg, rx_reg; - struct edmacc_param param; - void *rx_buf; - int b, c; - - dma = &dspi->dma; - - tx_reg = (unsigned long)dspi->pbase + SPIDAT1; - rx_reg = (unsigned long)dspi->pbase + SPIBUF; - - /* - * Transmit DMA setup - * - * If there is transmit data, map the transmit buffer, set it - * as the source of data and set the source B index to data - * size. If there is no transmit data, set the transmit register - * as the source of data, and set the source B index to zero. - * - * The destination is always the transmit register itself. And - * the destination never increments. - */ - - if (t->tx_buf) { - t->tx_dma = dma_map_single(&spi->dev, (void *)t->tx_buf, - t->len, DMA_TO_DEVICE); - if (dma_mapping_error(&spi->dev, t->tx_dma)) { - dev_dbg(sdev, "Unable to DMA map %d bytes" - "TX buffer\n", t->len); - return -ENOMEM; - } - } - - /* - * If number of words is greater than 65535, then we need - * to configure a 3 dimension transfer. Use the BCNTRLD - * feature to allow for transfers that aren't even multiples - * of 65535 (or any other possible b size) by first transferring - * the remainder amount then grabbing the next N blocks of - * 65535 words. - */ - - c = dspi->wcount / (SZ_64K - 1); /* N 65535 Blocks */ - b = dspi->wcount - c * (SZ_64K - 1); /* Remainder */ - if (b) - c++; - else - b = SZ_64K - 1; - - param.opt = TCINTEN | EDMA_TCC(dma->tx_channel); - param.src = t->tx_buf ? t->tx_dma : tx_reg; - param.a_b_cnt = b << 16 | data_type; - param.dst = tx_reg; - param.src_dst_bidx = t->tx_buf ? data_type : 0; - param.link_bcntrld = 0xffffffff; - param.src_dst_cidx = t->tx_buf ? data_type : 0; - param.ccnt = c; - edma_write_slot(dma->tx_channel, ¶m); - edma_link(dma->tx_channel, dma->dummy_param_slot); - - /* - * Receive DMA setup - * - * If there is receive buffer, use it to receive data. If there - * is none provided, use a temporary receive buffer. Set the - * destination B index to 0 so effectively only one byte is used - * in the temporary buffer (address does not increment). - * - * The source of receive data is the receive data register. The - * source address never increments. - */ - - if (t->rx_buf) { - rx_buf = t->rx_buf; - rx_buf_count = t->len; - } else { - rx_buf = dspi->rx_tmp_buf; - rx_buf_count = sizeof(dspi->rx_tmp_buf); - } - - t->rx_dma = dma_map_single(&spi->dev, rx_buf, rx_buf_count, - DMA_FROM_DEVICE); - if (dma_mapping_error(&spi->dev, t->rx_dma)) { - dev_dbg(sdev, "Couldn't DMA map a %d bytes RX buffer\n", - rx_buf_count); - if (t->tx_buf) - dma_unmap_single(NULL, t->tx_dma, t->len, - DMA_TO_DEVICE); - return -ENOMEM; - } - - param.opt = TCINTEN | EDMA_TCC(dma->rx_channel); - param.src = rx_reg; - param.a_b_cnt = b << 16 | data_type; - param.dst = t->rx_dma; - param.src_dst_bidx = (t->rx_buf ? data_type : 0) << 16; - param.link_bcntrld = 0xffffffff; - param.src_dst_cidx = (t->rx_buf ? data_type : 0) << 16; - param.ccnt = c; - edma_write_slot(dma->rx_channel, ¶m); - - if (pdata->cshold_bug) - iowrite16(spidat1 >> 16, dspi->base + SPIDAT1 + 2); - - edma_start(dma->rx_channel); - edma_start(dma->tx_channel); - set_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN); - } - - /* Wait for the transfer to complete */ - if (spicfg->io_type != SPI_IO_TYPE_POLL) { - wait_for_completion_interruptible(&(dspi->done)); - } else { - while (dspi->rcount > 0 || dspi->wcount > 0) { - errors = davinci_spi_process_events(dspi); - if (errors) - break; - cpu_relax(); - } - } - - clear_io_bits(dspi->base + SPIINT, SPIINT_MASKALL); - if (spicfg->io_type == SPI_IO_TYPE_DMA) { - - if (t->tx_buf) - dma_unmap_single(NULL, t->tx_dma, t->len, - DMA_TO_DEVICE); - - dma_unmap_single(NULL, t->rx_dma, rx_buf_count, - DMA_FROM_DEVICE); - - clear_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN); - } - - clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_SPIENA_MASK); - set_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK); - - /* - * Check for bit error, desync error,parity error,timeout error and - * receive overflow errors - */ - if (errors) { - ret = davinci_spi_check_error(dspi, errors); - WARN(!ret, "%s: error reported but no error found!\n", - dev_name(&spi->dev)); - return ret; - } - - if (dspi->rcount != 0 || dspi->wcount != 0) { - dev_err(sdev, "SPI data transfer error\n"); - return -EIO; - } - - return t->len; -} - -/** - * davinci_spi_irq - Interrupt handler for SPI Master Controller - * @irq: IRQ number for this SPI Master - * @context_data: structure for SPI Master controller davinci_spi - * - * ISR will determine that interrupt arrives either for READ or WRITE command. - * According to command it will do the appropriate action. It will check - * transfer length and if it is not zero then dispatch transfer command again. - * If transfer length is zero then it will indicate the COMPLETION so that - * davinci_spi_bufs function can go ahead. - */ -static irqreturn_t davinci_spi_irq(s32 irq, void *data) -{ - struct davinci_spi *dspi = data; - int status; - - status = davinci_spi_process_events(dspi); - if (unlikely(status != 0)) - clear_io_bits(dspi->base + SPIINT, SPIINT_MASKINT); - - if ((!dspi->rcount && !dspi->wcount) || status) - complete(&dspi->done); - - return IRQ_HANDLED; -} - -static int davinci_spi_request_dma(struct davinci_spi *dspi) -{ - int r; - struct davinci_spi_dma *dma = &dspi->dma; - - r = edma_alloc_channel(dma->rx_channel, davinci_spi_dma_callback, dspi, - dma->eventq); - if (r < 0) { - pr_err("Unable to request DMA channel for SPI RX\n"); - r = -EAGAIN; - goto rx_dma_failed; - } - - r = edma_alloc_channel(dma->tx_channel, davinci_spi_dma_callback, dspi, - dma->eventq); - if (r < 0) { - pr_err("Unable to request DMA channel for SPI TX\n"); - r = -EAGAIN; - goto tx_dma_failed; - } - - r = edma_alloc_slot(EDMA_CTLR(dma->tx_channel), EDMA_SLOT_ANY); - if (r < 0) { - pr_err("Unable to request SPI TX DMA param slot\n"); - r = -EAGAIN; - goto param_failed; - } - dma->dummy_param_slot = r; - edma_link(dma->dummy_param_slot, dma->dummy_param_slot); - - return 0; -param_failed: - edma_free_channel(dma->tx_channel); -tx_dma_failed: - edma_free_channel(dma->rx_channel); -rx_dma_failed: - return r; -} - -/** - * davinci_spi_probe - probe function for SPI Master Controller - * @pdev: platform_device structure which contains plateform specific data - * - * According to Linux Device Model this function will be invoked by Linux - * with platform_device struct which contains the device specific info. - * This function will map the SPI controller's memory, register IRQ, - * Reset SPI controller and setting its registers to default value. - * It will invoke spi_bitbang_start to create work queue so that client driver - * can register transfer method to work queue. - */ -static int davinci_spi_probe(struct platform_device *pdev) -{ - struct spi_master *master; - struct davinci_spi *dspi; - struct davinci_spi_platform_data *pdata; - struct resource *r, *mem; - resource_size_t dma_rx_chan = SPI_NO_RESOURCE; - resource_size_t dma_tx_chan = SPI_NO_RESOURCE; - int i = 0, ret = 0; - u32 spipc0; - - pdata = pdev->dev.platform_data; - if (pdata == NULL) { - ret = -ENODEV; - goto err; - } - - master = spi_alloc_master(&pdev->dev, sizeof(struct davinci_spi)); - if (master == NULL) { - ret = -ENOMEM; - goto err; - } - - dev_set_drvdata(&pdev->dev, master); - - dspi = spi_master_get_devdata(master); - if (dspi == NULL) { - ret = -ENOENT; - goto free_master; - } - - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (r == NULL) { - ret = -ENOENT; - goto free_master; - } - - dspi->pbase = r->start; - dspi->pdata = pdata; - - mem = request_mem_region(r->start, resource_size(r), pdev->name); - if (mem == NULL) { - ret = -EBUSY; - goto free_master; - } - - dspi->base = ioremap(r->start, resource_size(r)); - if (dspi->base == NULL) { - ret = -ENOMEM; - goto release_region; - } - - dspi->irq = platform_get_irq(pdev, 0); - if (dspi->irq <= 0) { - ret = -EINVAL; - goto unmap_io; - } - - ret = request_irq(dspi->irq, davinci_spi_irq, 0, dev_name(&pdev->dev), - dspi); - if (ret) - goto unmap_io; - - dspi->bitbang.master = spi_master_get(master); - if (dspi->bitbang.master == NULL) { - ret = -ENODEV; - goto irq_free; - } - - dspi->clk = clk_get(&pdev->dev, NULL); - if (IS_ERR(dspi->clk)) { - ret = -ENODEV; - goto put_master; - } - clk_enable(dspi->clk); - - master->bus_num = pdev->id; - master->num_chipselect = pdata->num_chipselect; - master->setup = davinci_spi_setup; - - dspi->bitbang.chipselect = davinci_spi_chipselect; - dspi->bitbang.setup_transfer = davinci_spi_setup_transfer; - - dspi->version = pdata->version; - - dspi->bitbang.flags = SPI_NO_CS | SPI_LSB_FIRST | SPI_LOOP; - if (dspi->version == SPI_VERSION_2) - dspi->bitbang.flags |= SPI_READY; - - r = platform_get_resource(pdev, IORESOURCE_DMA, 0); - if (r) - dma_rx_chan = r->start; - r = platform_get_resource(pdev, IORESOURCE_DMA, 1); - if (r) - dma_tx_chan = r->start; - - dspi->bitbang.txrx_bufs = davinci_spi_bufs; - if (dma_rx_chan != SPI_NO_RESOURCE && - dma_tx_chan != SPI_NO_RESOURCE) { - dspi->dma.rx_channel = dma_rx_chan; - dspi->dma.tx_channel = dma_tx_chan; - dspi->dma.eventq = pdata->dma_event_q; - - ret = davinci_spi_request_dma(dspi); - if (ret) - goto free_clk; - - dev_info(&pdev->dev, "DMA: supported\n"); - dev_info(&pdev->dev, "DMA: RX channel: %d, TX channel: %d, " - "event queue: %d\n", dma_rx_chan, dma_tx_chan, - pdata->dma_event_q); - } - - dspi->get_rx = davinci_spi_rx_buf_u8; - dspi->get_tx = davinci_spi_tx_buf_u8; - - init_completion(&dspi->done); - - /* Reset In/OUT SPI module */ - iowrite32(0, dspi->base + SPIGCR0); - udelay(100); - iowrite32(1, dspi->base + SPIGCR0); - - /* Set up SPIPC0. CS and ENA init is done in davinci_spi_setup */ - spipc0 = SPIPC0_DIFUN_MASK | SPIPC0_DOFUN_MASK | SPIPC0_CLKFUN_MASK; - iowrite32(spipc0, dspi->base + SPIPC0); - - /* initialize chip selects */ - if (pdata->chip_sel) { - for (i = 0; i < pdata->num_chipselect; i++) { - if (pdata->chip_sel[i] != SPI_INTERN_CS) - gpio_direction_output(pdata->chip_sel[i], 1); - } - } - - if (pdata->intr_line) - iowrite32(SPI_INTLVL_1, dspi->base + SPILVL); - else - iowrite32(SPI_INTLVL_0, dspi->base + SPILVL); - - iowrite32(CS_DEFAULT, dspi->base + SPIDEF); - - /* master mode default */ - set_io_bits(dspi->base + SPIGCR1, SPIGCR1_CLKMOD_MASK); - set_io_bits(dspi->base + SPIGCR1, SPIGCR1_MASTER_MASK); - set_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK); - - ret = spi_bitbang_start(&dspi->bitbang); - if (ret) - goto free_dma; - - dev_info(&pdev->dev, "Controller at 0x%p\n", dspi->base); - - return ret; - -free_dma: - edma_free_channel(dspi->dma.tx_channel); - edma_free_channel(dspi->dma.rx_channel); - edma_free_slot(dspi->dma.dummy_param_slot); -free_clk: - clk_disable(dspi->clk); - clk_put(dspi->clk); -put_master: - spi_master_put(master); -irq_free: - free_irq(dspi->irq, dspi); -unmap_io: - iounmap(dspi->base); -release_region: - release_mem_region(dspi->pbase, resource_size(r)); -free_master: - kfree(master); -err: - return ret; -} - -/** - * davinci_spi_remove - remove function for SPI Master Controller - * @pdev: platform_device structure which contains plateform specific data - * - * This function will do the reverse action of davinci_spi_probe function - * It will free the IRQ and SPI controller's memory region. - * It will also call spi_bitbang_stop to destroy the work queue which was - * created by spi_bitbang_start. - */ -static int __exit davinci_spi_remove(struct platform_device *pdev) -{ - struct davinci_spi *dspi; - struct spi_master *master; - struct resource *r; - - master = dev_get_drvdata(&pdev->dev); - dspi = spi_master_get_devdata(master); - - spi_bitbang_stop(&dspi->bitbang); - - clk_disable(dspi->clk); - clk_put(dspi->clk); - spi_master_put(master); - free_irq(dspi->irq, dspi); - iounmap(dspi->base); - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - release_mem_region(dspi->pbase, resource_size(r)); - - return 0; -} - -static struct platform_driver davinci_spi_driver = { - .driver = { - .name = "spi_davinci", - .owner = THIS_MODULE, - }, - .remove = __exit_p(davinci_spi_remove), -}; - -static int __init davinci_spi_init(void) -{ - return platform_driver_probe(&davinci_spi_driver, davinci_spi_probe); -} -module_init(davinci_spi_init); - -static void __exit davinci_spi_exit(void) -{ - platform_driver_unregister(&davinci_spi_driver); -} -module_exit(davinci_spi_exit); - -MODULE_DESCRIPTION("TI DaVinci SPI Master Controller Driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/spi/dw_spi.c b/drivers/spi/dw_spi.c deleted file mode 100644 index 919fa9d..0000000 --- a/drivers/spi/dw_spi.c +++ /dev/null @@ -1,936 +0,0 @@ -/* - * dw_spi.c - Designware SPI core controller driver (refer pxa2xx_spi.c) - * - * Copyright (c) 2009, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - */ - -#include -#include -#include -#include -#include -#include - -#include "dw_spi.h" - -#ifdef CONFIG_DEBUG_FS -#include -#endif - -#define START_STATE ((void *)0) -#define RUNNING_STATE ((void *)1) -#define DONE_STATE ((void *)2) -#define ERROR_STATE ((void *)-1) - -#define QUEUE_RUNNING 0 -#define QUEUE_STOPPED 1 - -#define MRST_SPI_DEASSERT 0 -#define MRST_SPI_ASSERT 1 - -/* Slave spi_dev related */ -struct chip_data { - u16 cr0; - u8 cs; /* chip select pin */ - u8 n_bytes; /* current is a 1/2/4 byte op */ - u8 tmode; /* TR/TO/RO/EEPROM */ - u8 type; /* SPI/SSP/MicroWire */ - - u8 poll_mode; /* 1 means use poll mode */ - - u32 dma_width; - u32 rx_threshold; - u32 tx_threshold; - u8 enable_dma; - u8 bits_per_word; - u16 clk_div; /* baud rate divider */ - u32 speed_hz; /* baud rate */ - void (*cs_control)(u32 command); -}; - -#ifdef CONFIG_DEBUG_FS -static int spi_show_regs_open(struct inode *inode, struct file *file) -{ - file->private_data = inode->i_private; - return 0; -} - -#define SPI_REGS_BUFSIZE 1024 -static ssize_t spi_show_regs(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct dw_spi *dws; - char *buf; - u32 len = 0; - ssize_t ret; - - dws = file->private_data; - - buf = kzalloc(SPI_REGS_BUFSIZE, GFP_KERNEL); - if (!buf) - return 0; - - len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, - "MRST SPI0 registers:\n"); - len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, - "=================================\n"); - len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, - "CTRL0: \t\t0x%08x\n", dw_readl(dws, ctrl0)); - len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, - "CTRL1: \t\t0x%08x\n", dw_readl(dws, ctrl1)); - len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, - "SSIENR: \t0x%08x\n", dw_readl(dws, ssienr)); - len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, - "SER: \t\t0x%08x\n", dw_readl(dws, ser)); - len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, - "BAUDR: \t\t0x%08x\n", dw_readl(dws, baudr)); - len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, - "TXFTLR: \t0x%08x\n", dw_readl(dws, txfltr)); - len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, - "RXFTLR: \t0x%08x\n", dw_readl(dws, rxfltr)); - len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, - "TXFLR: \t\t0x%08x\n", dw_readl(dws, txflr)); - len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, - "RXFLR: \t\t0x%08x\n", dw_readl(dws, rxflr)); - len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, - "SR: \t\t0x%08x\n", dw_readl(dws, sr)); - len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, - "IMR: \t\t0x%08x\n", dw_readl(dws, imr)); - len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, - "ISR: \t\t0x%08x\n", dw_readl(dws, isr)); - len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, - "DMACR: \t\t0x%08x\n", dw_readl(dws, dmacr)); - len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, - "DMATDLR: \t0x%08x\n", dw_readl(dws, dmatdlr)); - len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, - "DMARDLR: \t0x%08x\n", dw_readl(dws, dmardlr)); - len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, - "=================================\n"); - - ret = simple_read_from_buffer(user_buf, count, ppos, buf, len); - kfree(buf); - return ret; -} - -static const struct file_operations mrst_spi_regs_ops = { - .owner = THIS_MODULE, - .open = spi_show_regs_open, - .read = spi_show_regs, - .llseek = default_llseek, -}; - -static int mrst_spi_debugfs_init(struct dw_spi *dws) -{ - dws->debugfs = debugfs_create_dir("mrst_spi", NULL); - if (!dws->debugfs) - return -ENOMEM; - - debugfs_create_file("registers", S_IFREG | S_IRUGO, - dws->debugfs, (void *)dws, &mrst_spi_regs_ops); - return 0; -} - -static void mrst_spi_debugfs_remove(struct dw_spi *dws) -{ - if (dws->debugfs) - debugfs_remove_recursive(dws->debugfs); -} - -#else -static inline int mrst_spi_debugfs_init(struct dw_spi *dws) -{ - return 0; -} - -static inline void mrst_spi_debugfs_remove(struct dw_spi *dws) -{ -} -#endif /* CONFIG_DEBUG_FS */ - -/* Return the max entries we can fill into tx fifo */ -static inline u32 tx_max(struct dw_spi *dws) -{ - u32 tx_left, tx_room, rxtx_gap; - - tx_left = (dws->tx_end - dws->tx) / dws->n_bytes; - tx_room = dws->fifo_len - dw_readw(dws, txflr); - - /* - * Another concern is about the tx/rx mismatch, we - * though to use (dws->fifo_len - rxflr - txflr) as - * one maximum value for tx, but it doesn't cover the - * data which is out of tx/rx fifo and inside the - * shift registers. So a control from sw point of - * view is taken. - */ - rxtx_gap = ((dws->rx_end - dws->rx) - (dws->tx_end - dws->tx)) - / dws->n_bytes; - - return min3(tx_left, tx_room, (u32) (dws->fifo_len - rxtx_gap)); -} - -/* Return the max entries we should read out of rx fifo */ -static inline u32 rx_max(struct dw_spi *dws) -{ - u32 rx_left = (dws->rx_end - dws->rx) / dws->n_bytes; - - return min(rx_left, (u32)dw_readw(dws, rxflr)); -} - -static void dw_writer(struct dw_spi *dws) -{ - u32 max = tx_max(dws); - u16 txw = 0; - - while (max--) { - /* Set the tx word if the transfer's original "tx" is not null */ - if (dws->tx_end - dws->len) { - if (dws->n_bytes == 1) - txw = *(u8 *)(dws->tx); - else - txw = *(u16 *)(dws->tx); - } - dw_writew(dws, dr, txw); - dws->tx += dws->n_bytes; - } -} - -static void dw_reader(struct dw_spi *dws) -{ - u32 max = rx_max(dws); - u16 rxw; - - while (max--) { - rxw = dw_readw(dws, dr); - /* Care rx only if the transfer's original "rx" is not null */ - if (dws->rx_end - dws->len) { - if (dws->n_bytes == 1) - *(u8 *)(dws->rx) = rxw; - else - *(u16 *)(dws->rx) = rxw; - } - dws->rx += dws->n_bytes; - } -} - -static void *next_transfer(struct dw_spi *dws) -{ - struct spi_message *msg = dws->cur_msg; - struct spi_transfer *trans = dws->cur_transfer; - - /* Move to next transfer */ - if (trans->transfer_list.next != &msg->transfers) { - dws->cur_transfer = - list_entry(trans->transfer_list.next, - struct spi_transfer, - transfer_list); - return RUNNING_STATE; - } else - return DONE_STATE; -} - -/* - * Note: first step is the protocol driver prepares - * a dma-capable memory, and this func just need translate - * the virt addr to physical - */ -static int map_dma_buffers(struct dw_spi *dws) -{ - if (!dws->cur_msg->is_dma_mapped - || !dws->dma_inited - || !dws->cur_chip->enable_dma - || !dws->dma_ops) - return 0; - - if (dws->cur_transfer->tx_dma) - dws->tx_dma = dws->cur_transfer->tx_dma; - - if (dws->cur_transfer->rx_dma) - dws->rx_dma = dws->cur_transfer->rx_dma; - - return 1; -} - -/* Caller already set message->status; dma and pio irqs are blocked */ -static void giveback(struct dw_spi *dws) -{ - struct spi_transfer *last_transfer; - unsigned long flags; - struct spi_message *msg; - - spin_lock_irqsave(&dws->lock, flags); - msg = dws->cur_msg; - dws->cur_msg = NULL; - dws->cur_transfer = NULL; - dws->prev_chip = dws->cur_chip; - dws->cur_chip = NULL; - dws->dma_mapped = 0; - queue_work(dws->workqueue, &dws->pump_messages); - spin_unlock_irqrestore(&dws->lock, flags); - - last_transfer = list_entry(msg->transfers.prev, - struct spi_transfer, - transfer_list); - - if (!last_transfer->cs_change && dws->cs_control) - dws->cs_control(MRST_SPI_DEASSERT); - - msg->state = NULL; - if (msg->complete) - msg->complete(msg->context); -} - -static void int_error_stop(struct dw_spi *dws, const char *msg) -{ - /* Stop the hw */ - spi_enable_chip(dws, 0); - - dev_err(&dws->master->dev, "%s\n", msg); - dws->cur_msg->state = ERROR_STATE; - tasklet_schedule(&dws->pump_transfers); -} - -void dw_spi_xfer_done(struct dw_spi *dws) -{ - /* Update total byte transferred return count actual bytes read */ - dws->cur_msg->actual_length += dws->len; - - /* Move to next transfer */ - dws->cur_msg->state = next_transfer(dws); - - /* Handle end of message */ - if (dws->cur_msg->state == DONE_STATE) { - dws->cur_msg->status = 0; - giveback(dws); - } else - tasklet_schedule(&dws->pump_transfers); -} -EXPORT_SYMBOL_GPL(dw_spi_xfer_done); - -static irqreturn_t interrupt_transfer(struct dw_spi *dws) -{ - u16 irq_status = dw_readw(dws, isr); - - /* Error handling */ - if (irq_status & (SPI_INT_TXOI | SPI_INT_RXOI | SPI_INT_RXUI)) { - dw_readw(dws, txoicr); - dw_readw(dws, rxoicr); - dw_readw(dws, rxuicr); - int_error_stop(dws, "interrupt_transfer: fifo overrun/underrun"); - return IRQ_HANDLED; - } - - dw_reader(dws); - if (dws->rx_end == dws->rx) { - spi_mask_intr(dws, SPI_INT_TXEI); - dw_spi_xfer_done(dws); - return IRQ_HANDLED; - } - if (irq_status & SPI_INT_TXEI) { - spi_mask_intr(dws, SPI_INT_TXEI); - dw_writer(dws); - /* Enable TX irq always, it will be disabled when RX finished */ - spi_umask_intr(dws, SPI_INT_TXEI); - } - - return IRQ_HANDLED; -} - -static irqreturn_t dw_spi_irq(int irq, void *dev_id) -{ - struct dw_spi *dws = dev_id; - u16 irq_status = dw_readw(dws, isr) & 0x3f; - - if (!irq_status) - return IRQ_NONE; - - if (!dws->cur_msg) { - spi_mask_intr(dws, SPI_INT_TXEI); - return IRQ_HANDLED; - } - - return dws->transfer_handler(dws); -} - -/* Must be called inside pump_transfers() */ -static void poll_transfer(struct dw_spi *dws) -{ - do { - dw_writer(dws); - dw_reader(dws); - cpu_relax(); - } while (dws->rx_end > dws->rx); - - dw_spi_xfer_done(dws); -} - -static void pump_transfers(unsigned long data) -{ - struct dw_spi *dws = (struct dw_spi *)data; - struct spi_message *message = NULL; - struct spi_transfer *transfer = NULL; - struct spi_transfer *previous = NULL; - struct spi_device *spi = NULL; - struct chip_data *chip = NULL; - u8 bits = 0; - u8 imask = 0; - u8 cs_change = 0; - u16 txint_level = 0; - u16 clk_div = 0; - u32 speed = 0; - u32 cr0 = 0; - - /* Get current state information */ - message = dws->cur_msg; - transfer = dws->cur_transfer; - chip = dws->cur_chip; - spi = message->spi; - - if (unlikely(!chip->clk_div)) - chip->clk_div = dws->max_freq / chip->speed_hz; - - if (message->state == ERROR_STATE) { - message->status = -EIO; - goto early_exit; - } - - /* Handle end of message */ - if (message->state == DONE_STATE) { - message->status = 0; - goto early_exit; - } - - /* Delay if requested at end of transfer*/ - if (message->state == RUNNING_STATE) { - previous = list_entry(transfer->transfer_list.prev, - struct spi_transfer, - transfer_list); - if (previous->delay_usecs) - udelay(previous->delay_usecs); - } - - dws->n_bytes = chip->n_bytes; - dws->dma_width = chip->dma_width; - dws->cs_control = chip->cs_control; - - dws->rx_dma = transfer->rx_dma; - dws->tx_dma = transfer->tx_dma; - dws->tx = (void *)transfer->tx_buf; - dws->tx_end = dws->tx + transfer->len; - dws->rx = transfer->rx_buf; - dws->rx_end = dws->rx + transfer->len; - dws->cs_change = transfer->cs_change; - dws->len = dws->cur_transfer->len; - if (chip != dws->prev_chip) - cs_change = 1; - - cr0 = chip->cr0; - - /* Handle per transfer options for bpw and speed */ - if (transfer->speed_hz) { - speed = chip->speed_hz; - - if (transfer->speed_hz != speed) { - speed = transfer->speed_hz; - if (speed > dws->max_freq) { - printk(KERN_ERR "MRST SPI0: unsupported" - "freq: %dHz\n", speed); - message->status = -EIO; - goto early_exit; - } - - /* clk_div doesn't support odd number */ - clk_div = dws->max_freq / speed; - clk_div = (clk_div + 1) & 0xfffe; - - chip->speed_hz = speed; - chip->clk_div = clk_div; - } - } - if (transfer->bits_per_word) { - bits = transfer->bits_per_word; - - switch (bits) { - case 8: - case 16: - dws->n_bytes = dws->dma_width = bits >> 3; - break; - default: - printk(KERN_ERR "MRST SPI0: unsupported bits:" - "%db\n", bits); - message->status = -EIO; - goto early_exit; - } - - cr0 = (bits - 1) - | (chip->type << SPI_FRF_OFFSET) - | (spi->mode << SPI_MODE_OFFSET) - | (chip->tmode << SPI_TMOD_OFFSET); - } - message->state = RUNNING_STATE; - - /* - * Adjust transfer mode if necessary. Requires platform dependent - * chipselect mechanism. - */ - if (dws->cs_control) { - if (dws->rx && dws->tx) - chip->tmode = SPI_TMOD_TR; - else if (dws->rx) - chip->tmode = SPI_TMOD_RO; - else - chip->tmode = SPI_TMOD_TO; - - cr0 &= ~SPI_TMOD_MASK; - cr0 |= (chip->tmode << SPI_TMOD_OFFSET); - } - - /* Check if current transfer is a DMA transaction */ - dws->dma_mapped = map_dma_buffers(dws); - - /* - * Interrupt mode - * we only need set the TXEI IRQ, as TX/RX always happen syncronizely - */ - if (!dws->dma_mapped && !chip->poll_mode) { - int templen = dws->len / dws->n_bytes; - txint_level = dws->fifo_len / 2; - txint_level = (templen > txint_level) ? txint_level : templen; - - imask |= SPI_INT_TXEI | SPI_INT_TXOI | SPI_INT_RXUI | SPI_INT_RXOI; - dws->transfer_handler = interrupt_transfer; - } - - /* - * Reprogram registers only if - * 1. chip select changes - * 2. clk_div is changed - * 3. control value changes - */ - if (dw_readw(dws, ctrl0) != cr0 || cs_change || clk_div || imask) { - spi_enable_chip(dws, 0); - - if (dw_readw(dws, ctrl0) != cr0) - dw_writew(dws, ctrl0, cr0); - - spi_set_clk(dws, clk_div ? clk_div : chip->clk_div); - spi_chip_sel(dws, spi->chip_select); - - /* Set the interrupt mask, for poll mode just disable all int */ - spi_mask_intr(dws, 0xff); - if (imask) - spi_umask_intr(dws, imask); - if (txint_level) - dw_writew(dws, txfltr, txint_level); - - spi_enable_chip(dws, 1); - if (cs_change) - dws->prev_chip = chip; - } - - if (dws->dma_mapped) - dws->dma_ops->dma_transfer(dws, cs_change); - - if (chip->poll_mode) - poll_transfer(dws); - - return; - -early_exit: - giveback(dws); - return; -} - -static void pump_messages(struct work_struct *work) -{ - struct dw_spi *dws = - container_of(work, struct dw_spi, pump_messages); - unsigned long flags; - - /* Lock queue and check for queue work */ - spin_lock_irqsave(&dws->lock, flags); - if (list_empty(&dws->queue) || dws->run == QUEUE_STOPPED) { - dws->busy = 0; - spin_unlock_irqrestore(&dws->lock, flags); - return; - } - - /* Make sure we are not already running a message */ - if (dws->cur_msg) { - spin_unlock_irqrestore(&dws->lock, flags); - return; - } - - /* Extract head of queue */ - dws->cur_msg = list_entry(dws->queue.next, struct spi_message, queue); - list_del_init(&dws->cur_msg->queue); - - /* Initial message state*/ - dws->cur_msg->state = START_STATE; - dws->cur_transfer = list_entry(dws->cur_msg->transfers.next, - struct spi_transfer, - transfer_list); - dws->cur_chip = spi_get_ctldata(dws->cur_msg->spi); - - /* Mark as busy and launch transfers */ - tasklet_schedule(&dws->pump_transfers); - - dws->busy = 1; - spin_unlock_irqrestore(&dws->lock, flags); -} - -/* spi_device use this to queue in their spi_msg */ -static int dw_spi_transfer(struct spi_device *spi, struct spi_message *msg) -{ - struct dw_spi *dws = spi_master_get_devdata(spi->master); - unsigned long flags; - - spin_lock_irqsave(&dws->lock, flags); - - if (dws->run == QUEUE_STOPPED) { - spin_unlock_irqrestore(&dws->lock, flags); - return -ESHUTDOWN; - } - - msg->actual_length = 0; - msg->status = -EINPROGRESS; - msg->state = START_STATE; - - list_add_tail(&msg->queue, &dws->queue); - - if (dws->run == QUEUE_RUNNING && !dws->busy) { - - if (dws->cur_transfer || dws->cur_msg) - queue_work(dws->workqueue, - &dws->pump_messages); - else { - /* If no other data transaction in air, just go */ - spin_unlock_irqrestore(&dws->lock, flags); - pump_messages(&dws->pump_messages); - return 0; - } - } - - spin_unlock_irqrestore(&dws->lock, flags); - return 0; -} - -/* This may be called twice for each spi dev */ -static int dw_spi_setup(struct spi_device *spi) -{ - struct dw_spi_chip *chip_info = NULL; - struct chip_data *chip; - - if (spi->bits_per_word != 8 && spi->bits_per_word != 16) - return -EINVAL; - - /* Only alloc on first setup */ - chip = spi_get_ctldata(spi); - if (!chip) { - chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); - if (!chip) - return -ENOMEM; - } - - /* - * Protocol drivers may change the chip settings, so... - * if chip_info exists, use it - */ - chip_info = spi->controller_data; - - /* chip_info doesn't always exist */ - if (chip_info) { - if (chip_info->cs_control) - chip->cs_control = chip_info->cs_control; - - chip->poll_mode = chip_info->poll_mode; - chip->type = chip_info->type; - - chip->rx_threshold = 0; - chip->tx_threshold = 0; - - chip->enable_dma = chip_info->enable_dma; - } - - if (spi->bits_per_word <= 8) { - chip->n_bytes = 1; - chip->dma_width = 1; - } else if (spi->bits_per_word <= 16) { - chip->n_bytes = 2; - chip->dma_width = 2; - } else { - /* Never take >16b case for MRST SPIC */ - dev_err(&spi->dev, "invalid wordsize\n"); - return -EINVAL; - } - chip->bits_per_word = spi->bits_per_word; - - if (!spi->max_speed_hz) { - dev_err(&spi->dev, "No max speed HZ parameter\n"); - return -EINVAL; - } - chip->speed_hz = spi->max_speed_hz; - - chip->tmode = 0; /* Tx & Rx */ - /* Default SPI mode is SCPOL = 0, SCPH = 0 */ - chip->cr0 = (chip->bits_per_word - 1) - | (chip->type << SPI_FRF_OFFSET) - | (spi->mode << SPI_MODE_OFFSET) - | (chip->tmode << SPI_TMOD_OFFSET); - - spi_set_ctldata(spi, chip); - return 0; -} - -static void dw_spi_cleanup(struct spi_device *spi) -{ - struct chip_data *chip = spi_get_ctldata(spi); - kfree(chip); -} - -static int __devinit init_queue(struct dw_spi *dws) -{ - INIT_LIST_HEAD(&dws->queue); - spin_lock_init(&dws->lock); - - dws->run = QUEUE_STOPPED; - dws->busy = 0; - - tasklet_init(&dws->pump_transfers, - pump_transfers, (unsigned long)dws); - - INIT_WORK(&dws->pump_messages, pump_messages); - dws->workqueue = create_singlethread_workqueue( - dev_name(dws->master->dev.parent)); - if (dws->workqueue == NULL) - return -EBUSY; - - return 0; -} - -static int start_queue(struct dw_spi *dws) -{ - unsigned long flags; - - spin_lock_irqsave(&dws->lock, flags); - - if (dws->run == QUEUE_RUNNING || dws->busy) { - spin_unlock_irqrestore(&dws->lock, flags); - return -EBUSY; - } - - dws->run = QUEUE_RUNNING; - dws->cur_msg = NULL; - dws->cur_transfer = NULL; - dws->cur_chip = NULL; - dws->prev_chip = NULL; - spin_unlock_irqrestore(&dws->lock, flags); - - queue_work(dws->workqueue, &dws->pump_messages); - - return 0; -} - -static int stop_queue(struct dw_spi *dws) -{ - unsigned long flags; - unsigned limit = 50; - int status = 0; - - spin_lock_irqsave(&dws->lock, flags); - dws->run = QUEUE_STOPPED; - while ((!list_empty(&dws->queue) || dws->busy) && limit--) { - spin_unlock_irqrestore(&dws->lock, flags); - msleep(10); - spin_lock_irqsave(&dws->lock, flags); - } - - if (!list_empty(&dws->queue) || dws->busy) - status = -EBUSY; - spin_unlock_irqrestore(&dws->lock, flags); - - return status; -} - -static int destroy_queue(struct dw_spi *dws) -{ - int status; - - status = stop_queue(dws); - if (status != 0) - return status; - destroy_workqueue(dws->workqueue); - return 0; -} - -/* Restart the controller, disable all interrupts, clean rx fifo */ -static void spi_hw_init(struct dw_spi *dws) -{ - spi_enable_chip(dws, 0); - spi_mask_intr(dws, 0xff); - spi_enable_chip(dws, 1); - - /* - * Try to detect the FIFO depth if not set by interface driver, - * the depth could be from 2 to 256 from HW spec - */ - if (!dws->fifo_len) { - u32 fifo; - for (fifo = 2; fifo <= 257; fifo++) { - dw_writew(dws, txfltr, fifo); - if (fifo != dw_readw(dws, txfltr)) - break; - } - - dws->fifo_len = (fifo == 257) ? 0 : fifo; - dw_writew(dws, txfltr, 0); - } -} - -int __devinit dw_spi_add_host(struct dw_spi *dws) -{ - struct spi_master *master; - int ret; - - BUG_ON(dws == NULL); - - master = spi_alloc_master(dws->parent_dev, 0); - if (!master) { - ret = -ENOMEM; - goto exit; - } - - dws->master = master; - dws->type = SSI_MOTO_SPI; - dws->prev_chip = NULL; - dws->dma_inited = 0; - dws->dma_addr = (dma_addr_t)(dws->paddr + 0x60); - - ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED, - "dw_spi", dws); - if (ret < 0) { - dev_err(&master->dev, "can not get IRQ\n"); - goto err_free_master; - } - - master->mode_bits = SPI_CPOL | SPI_CPHA; - master->bus_num = dws->bus_num; - master->num_chipselect = dws->num_cs; - master->cleanup = dw_spi_cleanup; - master->setup = dw_spi_setup; - master->transfer = dw_spi_transfer; - - /* Basic HW init */ - spi_hw_init(dws); - - if (dws->dma_ops && dws->dma_ops->dma_init) { - ret = dws->dma_ops->dma_init(dws); - if (ret) { - dev_warn(&master->dev, "DMA init failed\n"); - dws->dma_inited = 0; - } - } - - /* Initial and start queue */ - ret = init_queue(dws); - if (ret) { - dev_err(&master->dev, "problem initializing queue\n"); - goto err_diable_hw; - } - ret = start_queue(dws); - if (ret) { - dev_err(&master->dev, "problem starting queue\n"); - goto err_diable_hw; - } - - spi_master_set_devdata(master, dws); - ret = spi_register_master(master); - if (ret) { - dev_err(&master->dev, "problem registering spi master\n"); - goto err_queue_alloc; - } - - mrst_spi_debugfs_init(dws); - return 0; - -err_queue_alloc: - destroy_queue(dws); - if (dws->dma_ops && dws->dma_ops->dma_exit) - dws->dma_ops->dma_exit(dws); -err_diable_hw: - spi_enable_chip(dws, 0); - free_irq(dws->irq, dws); -err_free_master: - spi_master_put(master); -exit: - return ret; -} -EXPORT_SYMBOL_GPL(dw_spi_add_host); - -void __devexit dw_spi_remove_host(struct dw_spi *dws) -{ - int status = 0; - - if (!dws) - return; - mrst_spi_debugfs_remove(dws); - - /* Remove the queue */ - status = destroy_queue(dws); - if (status != 0) - dev_err(&dws->master->dev, "dw_spi_remove: workqueue will not " - "complete, message memory not freed\n"); - - if (dws->dma_ops && dws->dma_ops->dma_exit) - dws->dma_ops->dma_exit(dws); - spi_enable_chip(dws, 0); - /* Disable clk */ - spi_set_clk(dws, 0); - free_irq(dws->irq, dws); - - /* Disconnect from the SPI framework */ - spi_unregister_master(dws->master); -} -EXPORT_SYMBOL_GPL(dw_spi_remove_host); - -int dw_spi_suspend_host(struct dw_spi *dws) -{ - int ret = 0; - - ret = stop_queue(dws); - if (ret) - return ret; - spi_enable_chip(dws, 0); - spi_set_clk(dws, 0); - return ret; -} -EXPORT_SYMBOL_GPL(dw_spi_suspend_host); - -int dw_spi_resume_host(struct dw_spi *dws) -{ - int ret; - - spi_hw_init(dws); - ret = start_queue(dws); - if (ret) - dev_err(&dws->master->dev, "fail to start queue (%d)\n", ret); - return ret; -} -EXPORT_SYMBOL_GPL(dw_spi_resume_host); - -MODULE_AUTHOR("Feng Tang "); -MODULE_DESCRIPTION("Driver for DesignWare SPI controller core"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/spi/dw_spi.h b/drivers/spi/dw_spi.h deleted file mode 100644 index 7a5e78d..0000000 --- a/drivers/spi/dw_spi.h +++ /dev/null @@ -1,232 +0,0 @@ -#ifndef DW_SPI_HEADER_H -#define DW_SPI_HEADER_H - -#include -#include - -/* Bit fields in CTRLR0 */ -#define SPI_DFS_OFFSET 0 - -#define SPI_FRF_OFFSET 4 -#define SPI_FRF_SPI 0x0 -#define SPI_FRF_SSP 0x1 -#define SPI_FRF_MICROWIRE 0x2 -#define SPI_FRF_RESV 0x3 - -#define SPI_MODE_OFFSET 6 -#define SPI_SCPH_OFFSET 6 -#define SPI_SCOL_OFFSET 7 - -#define SPI_TMOD_OFFSET 8 -#define SPI_TMOD_MASK (0x3 << SPI_TMOD_OFFSET) -#define SPI_TMOD_TR 0x0 /* xmit & recv */ -#define SPI_TMOD_TO 0x1 /* xmit only */ -#define SPI_TMOD_RO 0x2 /* recv only */ -#define SPI_TMOD_EPROMREAD 0x3 /* eeprom read mode */ - -#define SPI_SLVOE_OFFSET 10 -#define SPI_SRL_OFFSET 11 -#define SPI_CFS_OFFSET 12 - -/* Bit fields in SR, 7 bits */ -#define SR_MASK 0x7f /* cover 7 bits */ -#define SR_BUSY (1 << 0) -#define SR_TF_NOT_FULL (1 << 1) -#define SR_TF_EMPT (1 << 2) -#define SR_RF_NOT_EMPT (1 << 3) -#define SR_RF_FULL (1 << 4) -#define SR_TX_ERR (1 << 5) -#define SR_DCOL (1 << 6) - -/* Bit fields in ISR, IMR, RISR, 7 bits */ -#define SPI_INT_TXEI (1 << 0) -#define SPI_INT_TXOI (1 << 1) -#define SPI_INT_RXUI (1 << 2) -#define SPI_INT_RXOI (1 << 3) -#define SPI_INT_RXFI (1 << 4) -#define SPI_INT_MSTI (1 << 5) - -/* TX RX interrupt level threshold, max can be 256 */ -#define SPI_INT_THRESHOLD 32 - -enum dw_ssi_type { - SSI_MOTO_SPI = 0, - SSI_TI_SSP, - SSI_NS_MICROWIRE, -}; - -struct dw_spi_reg { - u32 ctrl0; - u32 ctrl1; - u32 ssienr; - u32 mwcr; - u32 ser; - u32 baudr; - u32 txfltr; - u32 rxfltr; - u32 txflr; - u32 rxflr; - u32 sr; - u32 imr; - u32 isr; - u32 risr; - u32 txoicr; - u32 rxoicr; - u32 rxuicr; - u32 msticr; - u32 icr; - u32 dmacr; - u32 dmatdlr; - u32 dmardlr; - u32 idr; - u32 version; - u32 dr; /* Currently oper as 32 bits, - though only low 16 bits matters */ -} __packed; - -struct dw_spi; -struct dw_spi_dma_ops { - int (*dma_init)(struct dw_spi *dws); - void (*dma_exit)(struct dw_spi *dws); - int (*dma_transfer)(struct dw_spi *dws, int cs_change); -}; - -struct dw_spi { - struct spi_master *master; - struct spi_device *cur_dev; - struct device *parent_dev; - enum dw_ssi_type type; - - void __iomem *regs; - unsigned long paddr; - u32 iolen; - int irq; - u32 fifo_len; /* depth of the FIFO buffer */ - u32 max_freq; /* max bus freq supported */ - - u16 bus_num; - u16 num_cs; /* supported slave numbers */ - - /* Driver message queue */ - struct workqueue_struct *workqueue; - struct work_struct pump_messages; - spinlock_t lock; - struct list_head queue; - int busy; - int run; - - /* Message Transfer pump */ - struct tasklet_struct pump_transfers; - - /* Current message transfer state info */ - struct spi_message *cur_msg; - struct spi_transfer *cur_transfer; - struct chip_data *cur_chip; - struct chip_data *prev_chip; - size_t len; - void *tx; - void *tx_end; - void *rx; - void *rx_end; - int dma_mapped; - dma_addr_t rx_dma; - dma_addr_t tx_dma; - size_t rx_map_len; - size_t tx_map_len; - u8 n_bytes; /* current is a 1/2 bytes op */ - u8 max_bits_per_word; /* maxim is 16b */ - u32 dma_width; - int cs_change; - irqreturn_t (*transfer_handler)(struct dw_spi *dws); - void (*cs_control)(u32 command); - - /* Dma info */ - int dma_inited; - struct dma_chan *txchan; - struct scatterlist tx_sgl; - struct dma_chan *rxchan; - struct scatterlist rx_sgl; - int dma_chan_done; - struct device *dma_dev; - dma_addr_t dma_addr; /* phy address of the Data register */ - struct dw_spi_dma_ops *dma_ops; - void *dma_priv; /* platform relate info */ - struct pci_dev *dmac; - - /* Bus interface info */ - void *priv; -#ifdef CONFIG_DEBUG_FS - struct dentry *debugfs; -#endif -}; - -#define dw_readl(dw, name) \ - __raw_readl(&(((struct dw_spi_reg *)dw->regs)->name)) -#define dw_writel(dw, name, val) \ - __raw_writel((val), &(((struct dw_spi_reg *)dw->regs)->name)) -#define dw_readw(dw, name) \ - __raw_readw(&(((struct dw_spi_reg *)dw->regs)->name)) -#define dw_writew(dw, name, val) \ - __raw_writew((val), &(((struct dw_spi_reg *)dw->regs)->name)) - -static inline void spi_enable_chip(struct dw_spi *dws, int enable) -{ - dw_writel(dws, ssienr, (enable ? 1 : 0)); -} - -static inline void spi_set_clk(struct dw_spi *dws, u16 div) -{ - dw_writel(dws, baudr, div); -} - -static inline void spi_chip_sel(struct dw_spi *dws, u16 cs) -{ - if (cs > dws->num_cs) - return; - - if (dws->cs_control) - dws->cs_control(1); - - dw_writel(dws, ser, 1 << cs); -} - -/* Disable IRQ bits */ -static inline void spi_mask_intr(struct dw_spi *dws, u32 mask) -{ - u32 new_mask; - - new_mask = dw_readl(dws, imr) & ~mask; - dw_writel(dws, imr, new_mask); -} - -/* Enable IRQ bits */ -static inline void spi_umask_intr(struct dw_spi *dws, u32 mask) -{ - u32 new_mask; - - new_mask = dw_readl(dws, imr) | mask; - dw_writel(dws, imr, new_mask); -} - -/* - * Each SPI slave device to work with dw_api controller should - * has such a structure claiming its working mode (PIO/DMA etc), - * which can be save in the "controller_data" member of the - * struct spi_device - */ -struct dw_spi_chip { - u8 poll_mode; /* 0 for contoller polling mode */ - u8 type; /* SPI/SSP/Micrwire */ - u8 enable_dma; - void (*cs_control)(u32 command); -}; - -extern int dw_spi_add_host(struct dw_spi *dws); -extern void dw_spi_remove_host(struct dw_spi *dws); -extern int dw_spi_suspend_host(struct dw_spi *dws); -extern int dw_spi_resume_host(struct dw_spi *dws); -extern void dw_spi_xfer_done(struct dw_spi *dws); - -/* platform related setup */ -extern int dw_spi_mid_init(struct dw_spi *dws); /* Intel MID platforms */ -#endif /* DW_SPI_HEADER_H */ diff --git a/drivers/spi/dw_spi_mid.c b/drivers/spi/dw_spi_mid.c deleted file mode 100644 index 4891782..0000000 --- a/drivers/spi/dw_spi_mid.c +++ /dev/null @@ -1,224 +0,0 @@ -/* - * dw_spi_mid.c - special handling for DW core on Intel MID platform - * - * Copyright (c) 2009, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - */ - -#include -#include -#include -#include -#include - -#include "dw_spi.h" - -#ifdef CONFIG_SPI_DW_MID_DMA -#include -#include - -struct mid_dma { - struct intel_mid_dma_slave dmas_tx; - struct intel_mid_dma_slave dmas_rx; -}; - -static bool mid_spi_dma_chan_filter(struct dma_chan *chan, void *param) -{ - struct dw_spi *dws = param; - - return dws->dmac && (&dws->dmac->dev == chan->device->dev); -} - -static int mid_spi_dma_init(struct dw_spi *dws) -{ - struct mid_dma *dw_dma = dws->dma_priv; - struct intel_mid_dma_slave *rxs, *txs; - dma_cap_mask_t mask; - - /* - * Get pci device for DMA controller, currently it could only - * be the DMA controller of either Moorestown or Medfield - */ - dws->dmac = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0813, NULL); - if (!dws->dmac) - dws->dmac = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0827, NULL); - - dma_cap_zero(mask); - dma_cap_set(DMA_SLAVE, mask); - - /* 1. Init rx channel */ - dws->rxchan = dma_request_channel(mask, mid_spi_dma_chan_filter, dws); - if (!dws->rxchan) - goto err_exit; - rxs = &dw_dma->dmas_rx; - rxs->hs_mode = LNW_DMA_HW_HS; - rxs->cfg_mode = LNW_DMA_PER_TO_MEM; - dws->rxchan->private = rxs; - - /* 2. Init tx channel */ - dws->txchan = dma_request_channel(mask, mid_spi_dma_chan_filter, dws); - if (!dws->txchan) - goto free_rxchan; - txs = &dw_dma->dmas_tx; - txs->hs_mode = LNW_DMA_HW_HS; - txs->cfg_mode = LNW_DMA_MEM_TO_PER; - dws->txchan->private = txs; - - dws->dma_inited = 1; - return 0; - -free_rxchan: - dma_release_channel(dws->rxchan); -err_exit: - return -1; - -} - -static void mid_spi_dma_exit(struct dw_spi *dws) -{ - dma_release_channel(dws->txchan); - dma_release_channel(dws->rxchan); -} - -/* - * dws->dma_chan_done is cleared before the dma transfer starts, - * callback for rx/tx channel will each increment it by 1. - * Reaching 2 means the whole spi transaction is done. - */ -static void dw_spi_dma_done(void *arg) -{ - struct dw_spi *dws = arg; - - if (++dws->dma_chan_done != 2) - return; - dw_spi_xfer_done(dws); -} - -static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change) -{ - struct dma_async_tx_descriptor *txdesc = NULL, *rxdesc = NULL; - struct dma_chan *txchan, *rxchan; - struct dma_slave_config txconf, rxconf; - u16 dma_ctrl = 0; - - /* 1. setup DMA related registers */ - if (cs_change) { - spi_enable_chip(dws, 0); - dw_writew(dws, dmardlr, 0xf); - dw_writew(dws, dmatdlr, 0x10); - if (dws->tx_dma) - dma_ctrl |= 0x2; - if (dws->rx_dma) - dma_ctrl |= 0x1; - dw_writew(dws, dmacr, dma_ctrl); - spi_enable_chip(dws, 1); - } - - dws->dma_chan_done = 0; - txchan = dws->txchan; - rxchan = dws->rxchan; - - /* 2. Prepare the TX dma transfer */ - txconf.direction = DMA_TO_DEVICE; - txconf.dst_addr = dws->dma_addr; - txconf.dst_maxburst = LNW_DMA_MSIZE_16; - txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; - txconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; - - txchan->device->device_control(txchan, DMA_SLAVE_CONFIG, - (unsigned long) &txconf); - - memset(&dws->tx_sgl, 0, sizeof(dws->tx_sgl)); - dws->tx_sgl.dma_address = dws->tx_dma; - dws->tx_sgl.length = dws->len; - - txdesc = txchan->device->device_prep_slave_sg(txchan, - &dws->tx_sgl, - 1, - DMA_TO_DEVICE, - DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP); - txdesc->callback = dw_spi_dma_done; - txdesc->callback_param = dws; - - /* 3. Prepare the RX dma transfer */ - rxconf.direction = DMA_FROM_DEVICE; - rxconf.src_addr = dws->dma_addr; - rxconf.src_maxburst = LNW_DMA_MSIZE_16; - rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; - rxconf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; - - rxchan->device->device_control(rxchan, DMA_SLAVE_CONFIG, - (unsigned long) &rxconf); - - memset(&dws->rx_sgl, 0, sizeof(dws->rx_sgl)); - dws->rx_sgl.dma_address = dws->rx_dma; - dws->rx_sgl.length = dws->len; - - rxdesc = rxchan->device->device_prep_slave_sg(rxchan, - &dws->rx_sgl, - 1, - DMA_FROM_DEVICE, - DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP); - rxdesc->callback = dw_spi_dma_done; - rxdesc->callback_param = dws; - - /* rx must be started before tx due to spi instinct */ - rxdesc->tx_submit(rxdesc); - txdesc->tx_submit(txdesc); - return 0; -} - -static struct dw_spi_dma_ops mid_dma_ops = { - .dma_init = mid_spi_dma_init, - .dma_exit = mid_spi_dma_exit, - .dma_transfer = mid_spi_dma_transfer, -}; -#endif - -/* Some specific info for SPI0 controller on Moorestown */ - -/* HW info for MRST CLk Control Unit, one 32b reg */ -#define MRST_SPI_CLK_BASE 100000000 /* 100m */ -#define MRST_CLK_SPI0_REG 0xff11d86c -#define CLK_SPI_BDIV_OFFSET 0 -#define CLK_SPI_BDIV_MASK 0x00000007 -#define CLK_SPI_CDIV_OFFSET 9 -#define CLK_SPI_CDIV_MASK 0x00000e00 -#define CLK_SPI_DISABLE_OFFSET 8 - -int dw_spi_mid_init(struct dw_spi *dws) -{ - u32 *clk_reg, clk_cdiv; - - clk_reg = ioremap_nocache(MRST_CLK_SPI0_REG, 16); - if (!clk_reg) - return -ENOMEM; - - /* get SPI controller operating freq info */ - clk_cdiv = (readl(clk_reg) & CLK_SPI_CDIV_MASK) >> CLK_SPI_CDIV_OFFSET; - dws->max_freq = MRST_SPI_CLK_BASE / (clk_cdiv + 1); - iounmap(clk_reg); - - dws->num_cs = 16; - dws->fifo_len = 40; /* FIFO has 40 words buffer */ - -#ifdef CONFIG_SPI_DW_MID_DMA - dws->dma_priv = kzalloc(sizeof(struct mid_dma), GFP_KERNEL); - if (!dws->dma_priv) - return -ENOMEM; - dws->dma_ops = &mid_dma_ops; -#endif - return 0; -} diff --git a/drivers/spi/dw_spi_mmio.c b/drivers/spi/dw_spi_mmio.c deleted file mode 100644 index e0e813d..0000000 --- a/drivers/spi/dw_spi_mmio.c +++ /dev/null @@ -1,151 +0,0 @@ -/* - * dw_spi_mmio.c - Memory-mapped interface driver for DW SPI Core - * - * Copyright (c) 2010, Octasic semiconductor. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include -#include - -#include "dw_spi.h" - -#define DRIVER_NAME "dw_spi_mmio" - -struct dw_spi_mmio { - struct dw_spi dws; - struct clk *clk; -}; - -static int __devinit dw_spi_mmio_probe(struct platform_device *pdev) -{ - struct dw_spi_mmio *dwsmmio; - struct dw_spi *dws; - struct resource *mem, *ioarea; - int ret; - - dwsmmio = kzalloc(sizeof(struct dw_spi_mmio), GFP_KERNEL); - if (!dwsmmio) { - ret = -ENOMEM; - goto err_end; - } - - dws = &dwsmmio->dws; - - /* Get basic io resource and map it */ - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!mem) { - dev_err(&pdev->dev, "no mem resource?\n"); - ret = -EINVAL; - goto err_kfree; - } - - ioarea = request_mem_region(mem->start, resource_size(mem), - pdev->name); - if (!ioarea) { - dev_err(&pdev->dev, "SPI region already claimed\n"); - ret = -EBUSY; - goto err_kfree; - } - - dws->regs = ioremap_nocache(mem->start, resource_size(mem)); - if (!dws->regs) { - dev_err(&pdev->dev, "SPI region already mapped\n"); - ret = -ENOMEM; - goto err_release_reg; - } - - dws->irq = platform_get_irq(pdev, 0); - if (dws->irq < 0) { - dev_err(&pdev->dev, "no irq resource?\n"); - ret = dws->irq; /* -ENXIO */ - goto err_unmap; - } - - dwsmmio->clk = clk_get(&pdev->dev, NULL); - if (IS_ERR(dwsmmio->clk)) { - ret = PTR_ERR(dwsmmio->clk); - goto err_irq; - } - clk_enable(dwsmmio->clk); - - dws->parent_dev = &pdev->dev; - dws->bus_num = 0; - dws->num_cs = 4; - dws->max_freq = clk_get_rate(dwsmmio->clk); - - ret = dw_spi_add_host(dws); - if (ret) - goto err_clk; - - platform_set_drvdata(pdev, dwsmmio); - return 0; - -err_clk: - clk_disable(dwsmmio->clk); - clk_put(dwsmmio->clk); - dwsmmio->clk = NULL; -err_irq: - free_irq(dws->irq, dws); -err_unmap: - iounmap(dws->regs); -err_release_reg: - release_mem_region(mem->start, resource_size(mem)); -err_kfree: - kfree(dwsmmio); -err_end: - return ret; -} - -static int __devexit dw_spi_mmio_remove(struct platform_device *pdev) -{ - struct dw_spi_mmio *dwsmmio = platform_get_drvdata(pdev); - struct resource *mem; - - platform_set_drvdata(pdev, NULL); - - clk_disable(dwsmmio->clk); - clk_put(dwsmmio->clk); - dwsmmio->clk = NULL; - - free_irq(dwsmmio->dws.irq, &dwsmmio->dws); - dw_spi_remove_host(&dwsmmio->dws); - iounmap(dwsmmio->dws.regs); - kfree(dwsmmio); - - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - release_mem_region(mem->start, resource_size(mem)); - return 0; -} - -static struct platform_driver dw_spi_mmio_driver = { - .remove = __devexit_p(dw_spi_mmio_remove), - .driver = { - .name = DRIVER_NAME, - .owner = THIS_MODULE, - }, -}; - -static int __init dw_spi_mmio_init(void) -{ - return platform_driver_probe(&dw_spi_mmio_driver, dw_spi_mmio_probe); -} -module_init(dw_spi_mmio_init); - -static void __exit dw_spi_mmio_exit(void) -{ - platform_driver_unregister(&dw_spi_mmio_driver); -} -module_exit(dw_spi_mmio_exit); - -MODULE_AUTHOR("Jean-Hugues Deschenes "); -MODULE_DESCRIPTION("Memory-mapped I/O interface driver for DW SPI Core"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/spi/dw_spi_pci.c b/drivers/spi/dw_spi_pci.c deleted file mode 100644 index ad260aa..0000000 --- a/drivers/spi/dw_spi_pci.c +++ /dev/null @@ -1,181 +0,0 @@ -/* - * dw_spi_pci.c - PCI interface driver for DW SPI Core - * - * Copyright (c) 2009, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - */ - -#include -#include -#include -#include - -#include "dw_spi.h" - -#define DRIVER_NAME "dw_spi_pci" - -struct dw_spi_pci { - struct pci_dev *pdev; - struct dw_spi dws; -}; - -static int __devinit spi_pci_probe(struct pci_dev *pdev, - const struct pci_device_id *ent) -{ - struct dw_spi_pci *dwpci; - struct dw_spi *dws; - int pci_bar = 0; - int ret; - - printk(KERN_INFO "DW: found PCI SPI controller(ID: %04x:%04x)\n", - pdev->vendor, pdev->device); - - ret = pci_enable_device(pdev); - if (ret) - return ret; - - dwpci = kzalloc(sizeof(struct dw_spi_pci), GFP_KERNEL); - if (!dwpci) { - ret = -ENOMEM; - goto err_disable; - } - - dwpci->pdev = pdev; - dws = &dwpci->dws; - - /* Get basic io resource and map it */ - dws->paddr = pci_resource_start(pdev, pci_bar); - dws->iolen = pci_resource_len(pdev, pci_bar); - - ret = pci_request_region(pdev, pci_bar, dev_name(&pdev->dev)); - if (ret) - goto err_kfree; - - dws->regs = ioremap_nocache((unsigned long)dws->paddr, - pci_resource_len(pdev, pci_bar)); - if (!dws->regs) { - ret = -ENOMEM; - goto err_release_reg; - } - - dws->parent_dev = &pdev->dev; - dws->bus_num = 0; - dws->num_cs = 4; - dws->irq = pdev->irq; - - /* - * Specific handling for Intel MID paltforms, like dma setup, - * clock rate, FIFO depth. - */ - if (pdev->device == 0x0800) { - ret = dw_spi_mid_init(dws); - if (ret) - goto err_unmap; - } - - ret = dw_spi_add_host(dws); - if (ret) - goto err_unmap; - - /* PCI hook and SPI hook use the same drv data */ - pci_set_drvdata(pdev, dwpci); - return 0; - -err_unmap: - iounmap(dws->regs); -err_release_reg: - pci_release_region(pdev, pci_bar); -err_kfree: - kfree(dwpci); -err_disable: - pci_disable_device(pdev); - return ret; -} - -static void __devexit spi_pci_remove(struct pci_dev *pdev) -{ - struct dw_spi_pci *dwpci = pci_get_drvdata(pdev); - - pci_set_drvdata(pdev, NULL); - dw_spi_remove_host(&dwpci->dws); - iounmap(dwpci->dws.regs); - pci_release_region(pdev, 0); - kfree(dwpci); - pci_disable_device(pdev); -} - -#ifdef CONFIG_PM -static int spi_suspend(struct pci_dev *pdev, pm_message_t state) -{ - struct dw_spi_pci *dwpci = pci_get_drvdata(pdev); - int ret; - - ret = dw_spi_suspend_host(&dwpci->dws); - if (ret) - return ret; - pci_save_state(pdev); - pci_disable_device(pdev); - pci_set_power_state(pdev, pci_choose_state(pdev, state)); - return ret; -} - -static int spi_resume(struct pci_dev *pdev) -{ - struct dw_spi_pci *dwpci = pci_get_drvdata(pdev); - int ret; - - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - ret = pci_enable_device(pdev); - if (ret) - return ret; - return dw_spi_resume_host(&dwpci->dws); -} -#else -#define spi_suspend NULL -#define spi_resume NULL -#endif - -static const struct pci_device_id pci_ids[] __devinitdata = { - /* Intel MID platform SPI controller 0 */ - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) }, - {}, -}; - -static struct pci_driver dw_spi_driver = { - .name = DRIVER_NAME, - .id_table = pci_ids, - .probe = spi_pci_probe, - .remove = __devexit_p(spi_pci_remove), - .suspend = spi_suspend, - .resume = spi_resume, -}; - -static int __init mrst_spi_init(void) -{ - return pci_register_driver(&dw_spi_driver); -} - -static void __exit mrst_spi_exit(void) -{ - pci_unregister_driver(&dw_spi_driver); -} - -module_init(mrst_spi_init); -module_exit(mrst_spi_exit); - -MODULE_AUTHOR("Feng Tang "); -MODULE_DESCRIPTION("PCI interface driver for DW SPI Core"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/spi/ep93xx_spi.c b/drivers/spi/ep93xx_spi.c deleted file mode 100644 index d357007..0000000 --- a/drivers/spi/ep93xx_spi.c +++ /dev/null @@ -1,938 +0,0 @@ -/* - * Driver for Cirrus Logic EP93xx SPI controller. - * - * Copyright (c) 2010 Mika Westerberg - * - * Explicit FIFO handling code was inspired by amba-pl022 driver. - * - * Chip select support using other than built-in GPIOs by H. Hartley Sweeten. - * - * For more information about the SPI controller see documentation on Cirrus - * Logic web site: - * http://www.cirrus.com/en/pubs/manual/EP93xx_Users_Guide_UM1.pdf - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#define SSPCR0 0x0000 -#define SSPCR0_MODE_SHIFT 6 -#define SSPCR0_SCR_SHIFT 8 - -#define SSPCR1 0x0004 -#define SSPCR1_RIE BIT(0) -#define SSPCR1_TIE BIT(1) -#define SSPCR1_RORIE BIT(2) -#define SSPCR1_LBM BIT(3) -#define SSPCR1_SSE BIT(4) -#define SSPCR1_MS BIT(5) -#define SSPCR1_SOD BIT(6) - -#define SSPDR 0x0008 - -#define SSPSR 0x000c -#define SSPSR_TFE BIT(0) -#define SSPSR_TNF BIT(1) -#define SSPSR_RNE BIT(2) -#define SSPSR_RFF BIT(3) -#define SSPSR_BSY BIT(4) -#define SSPCPSR 0x0010 - -#define SSPIIR 0x0014 -#define SSPIIR_RIS BIT(0) -#define SSPIIR_TIS BIT(1) -#define SSPIIR_RORIS BIT(2) -#define SSPICR SSPIIR - -/* timeout in milliseconds */ -#define SPI_TIMEOUT 5 -/* maximum depth of RX/TX FIFO */ -#define SPI_FIFO_SIZE 8 - -/** - * struct ep93xx_spi - EP93xx SPI controller structure - * @lock: spinlock that protects concurrent accesses to fields @running, - * @current_msg and @msg_queue - * @pdev: pointer to platform device - * @clk: clock for the controller - * @regs_base: pointer to ioremap()'d registers - * @irq: IRQ number used by the driver - * @min_rate: minimum clock rate (in Hz) supported by the controller - * @max_rate: maximum clock rate (in Hz) supported by the controller - * @running: is the queue running - * @wq: workqueue used by the driver - * @msg_work: work that is queued for the driver - * @wait: wait here until given transfer is completed - * @msg_queue: queue for the messages - * @current_msg: message that is currently processed (or %NULL if none) - * @tx: current byte in transfer to transmit - * @rx: current byte in transfer to receive - * @fifo_level: how full is FIFO (%0..%SPI_FIFO_SIZE - %1). Receiving one - * frame decreases this level and sending one frame increases it. - * - * This structure holds EP93xx SPI controller specific information. When - * @running is %true, driver accepts transfer requests from protocol drivers. - * @current_msg is used to hold pointer to the message that is currently - * processed. If @current_msg is %NULL, it means that no processing is going - * on. - * - * Most of the fields are only written once and they can be accessed without - * taking the @lock. Fields that are accessed concurrently are: @current_msg, - * @running, and @msg_queue. - */ -struct ep93xx_spi { - spinlock_t lock; - const struct platform_device *pdev; - struct clk *clk; - void __iomem *regs_base; - int irq; - unsigned long min_rate; - unsigned long max_rate; - bool running; - struct workqueue_struct *wq; - struct work_struct msg_work; - struct completion wait; - struct list_head msg_queue; - struct spi_message *current_msg; - size_t tx; - size_t rx; - size_t fifo_level; -}; - -/** - * struct ep93xx_spi_chip - SPI device hardware settings - * @spi: back pointer to the SPI device - * @rate: max rate in hz this chip supports - * @div_cpsr: cpsr (pre-scaler) divider - * @div_scr: scr divider - * @dss: bits per word (4 - 16 bits) - * @ops: private chip operations - * - * This structure is used to store hardware register specific settings for each - * SPI device. Settings are written to hardware by function - * ep93xx_spi_chip_setup(). - */ -struct ep93xx_spi_chip { - const struct spi_device *spi; - unsigned long rate; - u8 div_cpsr; - u8 div_scr; - u8 dss; - struct ep93xx_spi_chip_ops *ops; -}; - -/* converts bits per word to CR0.DSS value */ -#define bits_per_word_to_dss(bpw) ((bpw) - 1) - -static inline void -ep93xx_spi_write_u8(const struct ep93xx_spi *espi, u16 reg, u8 value) -{ - __raw_writeb(value, espi->regs_base + reg); -} - -static inline u8 -ep93xx_spi_read_u8(const struct ep93xx_spi *spi, u16 reg) -{ - return __raw_readb(spi->regs_base + reg); -} - -static inline void -ep93xx_spi_write_u16(const struct ep93xx_spi *espi, u16 reg, u16 value) -{ - __raw_writew(value, espi->regs_base + reg); -} - -static inline u16 -ep93xx_spi_read_u16(const struct ep93xx_spi *spi, u16 reg) -{ - return __raw_readw(spi->regs_base + reg); -} - -static int ep93xx_spi_enable(const struct ep93xx_spi *espi) -{ - u8 regval; - int err; - - err = clk_enable(espi->clk); - if (err) - return err; - - regval = ep93xx_spi_read_u8(espi, SSPCR1); - regval |= SSPCR1_SSE; - ep93xx_spi_write_u8(espi, SSPCR1, regval); - - return 0; -} - -static void ep93xx_spi_disable(const struct ep93xx_spi *espi) -{ - u8 regval; - - regval = ep93xx_spi_read_u8(espi, SSPCR1); - regval &= ~SSPCR1_SSE; - ep93xx_spi_write_u8(espi, SSPCR1, regval); - - clk_disable(espi->clk); -} - -static void ep93xx_spi_enable_interrupts(const struct ep93xx_spi *espi) -{ - u8 regval; - - regval = ep93xx_spi_read_u8(espi, SSPCR1); - regval |= (SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE); - ep93xx_spi_write_u8(espi, SSPCR1, regval); -} - -static void ep93xx_spi_disable_interrupts(const struct ep93xx_spi *espi) -{ - u8 regval; - - regval = ep93xx_spi_read_u8(espi, SSPCR1); - regval &= ~(SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE); - ep93xx_spi_write_u8(espi, SSPCR1, regval); -} - -/** - * ep93xx_spi_calc_divisors() - calculates SPI clock divisors - * @espi: ep93xx SPI controller struct - * @chip: divisors are calculated for this chip - * @rate: desired SPI output clock rate - * - * Function calculates cpsr (clock pre-scaler) and scr divisors based on - * given @rate and places them to @chip->div_cpsr and @chip->div_scr. If, - * for some reason, divisors cannot be calculated nothing is stored and - * %-EINVAL is returned. - */ -static int ep93xx_spi_calc_divisors(const struct ep93xx_spi *espi, - struct ep93xx_spi_chip *chip, - unsigned long rate) -{ - unsigned long spi_clk_rate = clk_get_rate(espi->clk); - int cpsr, scr; - - /* - * Make sure that max value is between values supported by the - * controller. Note that minimum value is already checked in - * ep93xx_spi_transfer(). - */ - rate = clamp(rate, espi->min_rate, espi->max_rate); - - /* - * Calculate divisors so that we can get speed according the - * following formula: - * rate = spi_clock_rate / (cpsr * (1 + scr)) - * - * cpsr must be even number and starts from 2, scr can be any number - * between 0 and 255. - */ - for (cpsr = 2; cpsr <= 254; cpsr += 2) { - for (scr = 0; scr <= 255; scr++) { - if ((spi_clk_rate / (cpsr * (scr + 1))) <= rate) { - chip->div_scr = (u8)scr; - chip->div_cpsr = (u8)cpsr; - return 0; - } - } - } - - return -EINVAL; -} - -static void ep93xx_spi_cs_control(struct spi_device *spi, bool control) -{ - struct ep93xx_spi_chip *chip = spi_get_ctldata(spi); - int value = (spi->mode & SPI_CS_HIGH) ? control : !control; - - if (chip->ops && chip->ops->cs_control) - chip->ops->cs_control(spi, value); -} - -/** - * ep93xx_spi_setup() - setup an SPI device - * @spi: SPI device to setup - * - * This function sets up SPI device mode, speed etc. Can be called multiple - * times for a single device. Returns %0 in case of success, negative error in - * case of failure. When this function returns success, the device is - * deselected. - */ -static int ep93xx_spi_setup(struct spi_device *spi) -{ - struct ep93xx_spi *espi = spi_master_get_devdata(spi->master); - struct ep93xx_spi_chip *chip; - - if (spi->bits_per_word < 4 || spi->bits_per_word > 16) { - dev_err(&espi->pdev->dev, "invalid bits per word %d\n", - spi->bits_per_word); - return -EINVAL; - } - - chip = spi_get_ctldata(spi); - if (!chip) { - dev_dbg(&espi->pdev->dev, "initial setup for %s\n", - spi->modalias); - - chip = kzalloc(sizeof(*chip), GFP_KERNEL); - if (!chip) - return -ENOMEM; - - chip->spi = spi; - chip->ops = spi->controller_data; - - if (chip->ops && chip->ops->setup) { - int ret = chip->ops->setup(spi); - if (ret) { - kfree(chip); - return ret; - } - } - - spi_set_ctldata(spi, chip); - } - - if (spi->max_speed_hz != chip->rate) { - int err; - - err = ep93xx_spi_calc_divisors(espi, chip, spi->max_speed_hz); - if (err != 0) { - spi_set_ctldata(spi, NULL); - kfree(chip); - return err; - } - chip->rate = spi->max_speed_hz; - } - - chip->dss = bits_per_word_to_dss(spi->bits_per_word); - - ep93xx_spi_cs_control(spi, false); - return 0; -} - -/** - * ep93xx_spi_transfer() - queue message to be transferred - * @spi: target SPI device - * @msg: message to be transferred - * - * This function is called by SPI device drivers when they are going to transfer - * a new message. It simply puts the message in the queue and schedules - * workqueue to perform the actual transfer later on. - * - * Returns %0 on success and negative error in case of failure. - */ -static int ep93xx_spi_transfer(struct spi_device *spi, struct spi_message *msg) -{ - struct ep93xx_spi *espi = spi_master_get_devdata(spi->master); - struct spi_transfer *t; - unsigned long flags; - - if (!msg || !msg->complete) - return -EINVAL; - - /* first validate each transfer */ - list_for_each_entry(t, &msg->transfers, transfer_list) { - if (t->bits_per_word) { - if (t->bits_per_word < 4 || t->bits_per_word > 16) - return -EINVAL; - } - if (t->speed_hz && t->speed_hz < espi->min_rate) - return -EINVAL; - } - - /* - * Now that we own the message, let's initialize it so that it is - * suitable for us. We use @msg->status to signal whether there was - * error in transfer and @msg->state is used to hold pointer to the - * current transfer (or %NULL if no active current transfer). - */ - msg->state = NULL; - msg->status = 0; - msg->actual_length = 0; - - spin_lock_irqsave(&espi->lock, flags); - if (!espi->running) { - spin_unlock_irqrestore(&espi->lock, flags); - return -ESHUTDOWN; - } - list_add_tail(&msg->queue, &espi->msg_queue); - queue_work(espi->wq, &espi->msg_work); - spin_unlock_irqrestore(&espi->lock, flags); - - return 0; -} - -/** - * ep93xx_spi_cleanup() - cleans up master controller specific state - * @spi: SPI device to cleanup - * - * This function releases master controller specific state for given @spi - * device. - */ -static void ep93xx_spi_cleanup(struct spi_device *spi) -{ - struct ep93xx_spi_chip *chip; - - chip = spi_get_ctldata(spi); - if (chip) { - if (chip->ops && chip->ops->cleanup) - chip->ops->cleanup(spi); - spi_set_ctldata(spi, NULL); - kfree(chip); - } -} - -/** - * ep93xx_spi_chip_setup() - configures hardware according to given @chip - * @espi: ep93xx SPI controller struct - * @chip: chip specific settings - * - * This function sets up the actual hardware registers with settings given in - * @chip. Note that no validation is done so make sure that callers validate - * settings before calling this. - */ -static void ep93xx_spi_chip_setup(const struct ep93xx_spi *espi, - const struct ep93xx_spi_chip *chip) -{ - u16 cr0; - - cr0 = chip->div_scr << SSPCR0_SCR_SHIFT; - cr0 |= (chip->spi->mode & (SPI_CPHA|SPI_CPOL)) << SSPCR0_MODE_SHIFT; - cr0 |= chip->dss; - - dev_dbg(&espi->pdev->dev, "setup: mode %d, cpsr %d, scr %d, dss %d\n", - chip->spi->mode, chip->div_cpsr, chip->div_scr, chip->dss); - dev_dbg(&espi->pdev->dev, "setup: cr0 %#x", cr0); - - ep93xx_spi_write_u8(espi, SSPCPSR, chip->div_cpsr); - ep93xx_spi_write_u16(espi, SSPCR0, cr0); -} - -static inline int bits_per_word(const struct ep93xx_spi *espi) -{ - struct spi_message *msg = espi->current_msg; - struct spi_transfer *t = msg->state; - - return t->bits_per_word ? t->bits_per_word : msg->spi->bits_per_word; -} - -static void ep93xx_do_write(struct ep93xx_spi *espi, struct spi_transfer *t) -{ - if (bits_per_word(espi) > 8) { - u16 tx_val = 0; - - if (t->tx_buf) - tx_val = ((u16 *)t->tx_buf)[espi->tx]; - ep93xx_spi_write_u16(espi, SSPDR, tx_val); - espi->tx += sizeof(tx_val); - } else { - u8 tx_val = 0; - - if (t->tx_buf) - tx_val = ((u8 *)t->tx_buf)[espi->tx]; - ep93xx_spi_write_u8(espi, SSPDR, tx_val); - espi->tx += sizeof(tx_val); - } -} - -static void ep93xx_do_read(struct ep93xx_spi *espi, struct spi_transfer *t) -{ - if (bits_per_word(espi) > 8) { - u16 rx_val; - - rx_val = ep93xx_spi_read_u16(espi, SSPDR); - if (t->rx_buf) - ((u16 *)t->rx_buf)[espi->rx] = rx_val; - espi->rx += sizeof(rx_val); - } else { - u8 rx_val; - - rx_val = ep93xx_spi_read_u8(espi, SSPDR); - if (t->rx_buf) - ((u8 *)t->rx_buf)[espi->rx] = rx_val; - espi->rx += sizeof(rx_val); - } -} - -/** - * ep93xx_spi_read_write() - perform next RX/TX transfer - * @espi: ep93xx SPI controller struct - * - * This function transfers next bytes (or half-words) to/from RX/TX FIFOs. If - * called several times, the whole transfer will be completed. Returns - * %-EINPROGRESS when current transfer was not yet completed otherwise %0. - * - * When this function is finished, RX FIFO should be empty and TX FIFO should be - * full. - */ -static int ep93xx_spi_read_write(struct ep93xx_spi *espi) -{ - struct spi_message *msg = espi->current_msg; - struct spi_transfer *t = msg->state; - - /* read as long as RX FIFO has frames in it */ - while ((ep93xx_spi_read_u8(espi, SSPSR) & SSPSR_RNE)) { - ep93xx_do_read(espi, t); - espi->fifo_level--; - } - - /* write as long as TX FIFO has room */ - while (espi->fifo_level < SPI_FIFO_SIZE && espi->tx < t->len) { - ep93xx_do_write(espi, t); - espi->fifo_level++; - } - - if (espi->rx == t->len) { - msg->actual_length += t->len; - return 0; - } - - return -EINPROGRESS; -} - -/** - * ep93xx_spi_process_transfer() - processes one SPI transfer - * @espi: ep93xx SPI controller struct - * @msg: current message - * @t: transfer to process - * - * This function processes one SPI transfer given in @t. Function waits until - * transfer is complete (may sleep) and updates @msg->status based on whether - * transfer was successfully processed or not. - */ -static void ep93xx_spi_process_transfer(struct ep93xx_spi *espi, - struct spi_message *msg, - struct spi_transfer *t) -{ - struct ep93xx_spi_chip *chip = spi_get_ctldata(msg->spi); - - msg->state = t; - - /* - * Handle any transfer specific settings if needed. We use - * temporary chip settings here and restore original later when - * the transfer is finished. - */ - if (t->speed_hz || t->bits_per_word) { - struct ep93xx_spi_chip tmp_chip = *chip; - - if (t->speed_hz) { - int err; - - err = ep93xx_spi_calc_divisors(espi, &tmp_chip, - t->speed_hz); - if (err) { - dev_err(&espi->pdev->dev, - "failed to adjust speed\n"); - msg->status = err; - return; - } - } - - if (t->bits_per_word) - tmp_chip.dss = bits_per_word_to_dss(t->bits_per_word); - - /* - * Set up temporary new hw settings for this transfer. - */ - ep93xx_spi_chip_setup(espi, &tmp_chip); - } - - espi->rx = 0; - espi->tx = 0; - - /* - * Now everything is set up for the current transfer. We prime the TX - * FIFO, enable interrupts, and wait for the transfer to complete. - */ - if (ep93xx_spi_read_write(espi)) { - ep93xx_spi_enable_interrupts(espi); - wait_for_completion(&espi->wait); - } - - /* - * In case of error during transmit, we bail out from processing - * the message. - */ - if (msg->status) - return; - - /* - * After this transfer is finished, perform any possible - * post-transfer actions requested by the protocol driver. - */ - if (t->delay_usecs) { - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(usecs_to_jiffies(t->delay_usecs)); - } - if (t->cs_change) { - if (!list_is_last(&t->transfer_list, &msg->transfers)) { - /* - * In case protocol driver is asking us to drop the - * chipselect briefly, we let the scheduler to handle - * any "delay" here. - */ - ep93xx_spi_cs_control(msg->spi, false); - cond_resched(); - ep93xx_spi_cs_control(msg->spi, true); - } - } - - if (t->speed_hz || t->bits_per_word) - ep93xx_spi_chip_setup(espi, chip); -} - -/* - * ep93xx_spi_process_message() - process one SPI message - * @espi: ep93xx SPI controller struct - * @msg: message to process - * - * This function processes a single SPI message. We go through all transfers in - * the message and pass them to ep93xx_spi_process_transfer(). Chipselect is - * asserted during the whole message (unless per transfer cs_change is set). - * - * @msg->status contains %0 in case of success or negative error code in case of - * failure. - */ -static void ep93xx_spi_process_message(struct ep93xx_spi *espi, - struct spi_message *msg) -{ - unsigned long timeout; - struct spi_transfer *t; - int err; - - /* - * Enable the SPI controller and its clock. - */ - err = ep93xx_spi_enable(espi); - if (err) { - dev_err(&espi->pdev->dev, "failed to enable SPI controller\n"); - msg->status = err; - return; - } - - /* - * Just to be sure: flush any data from RX FIFO. - */ - timeout = jiffies + msecs_to_jiffies(SPI_TIMEOUT); - while (ep93xx_spi_read_u16(espi, SSPSR) & SSPSR_RNE) { - if (time_after(jiffies, timeout)) { - dev_warn(&espi->pdev->dev, - "timeout while flushing RX FIFO\n"); - msg->status = -ETIMEDOUT; - return; - } - ep93xx_spi_read_u16(espi, SSPDR); - } - - /* - * We explicitly handle FIFO level. This way we don't have to check TX - * FIFO status using %SSPSR_TNF bit which may cause RX FIFO overruns. - */ - espi->fifo_level = 0; - - /* - * Update SPI controller registers according to spi device and assert - * the chipselect. - */ - ep93xx_spi_chip_setup(espi, spi_get_ctldata(msg->spi)); - ep93xx_spi_cs_control(msg->spi, true); - - list_for_each_entry(t, &msg->transfers, transfer_list) { - ep93xx_spi_process_transfer(espi, msg, t); - if (msg->status) - break; - } - - /* - * Now the whole message is transferred (or failed for some reason). We - * deselect the device and disable the SPI controller. - */ - ep93xx_spi_cs_control(msg->spi, false); - ep93xx_spi_disable(espi); -} - -#define work_to_espi(work) (container_of((work), struct ep93xx_spi, msg_work)) - -/** - * ep93xx_spi_work() - EP93xx SPI workqueue worker function - * @work: work struct - * - * Workqueue worker function. This function is called when there are new - * SPI messages to be processed. Message is taken out from the queue and then - * passed to ep93xx_spi_process_message(). - * - * After message is transferred, protocol driver is notified by calling - * @msg->complete(). In case of error, @msg->status is set to negative error - * number, otherwise it contains zero (and @msg->actual_length is updated). - */ -static void ep93xx_spi_work(struct work_struct *work) -{ - struct ep93xx_spi *espi = work_to_espi(work); - struct spi_message *msg; - - spin_lock_irq(&espi->lock); - if (!espi->running || espi->current_msg || - list_empty(&espi->msg_queue)) { - spin_unlock_irq(&espi->lock); - return; - } - msg = list_first_entry(&espi->msg_queue, struct spi_message, queue); - list_del_init(&msg->queue); - espi->current_msg = msg; - spin_unlock_irq(&espi->lock); - - ep93xx_spi_process_message(espi, msg); - - /* - * Update the current message and re-schedule ourselves if there are - * more messages in the queue. - */ - spin_lock_irq(&espi->lock); - espi->current_msg = NULL; - if (espi->running && !list_empty(&espi->msg_queue)) - queue_work(espi->wq, &espi->msg_work); - spin_unlock_irq(&espi->lock); - - /* notify the protocol driver that we are done with this message */ - msg->complete(msg->context); -} - -static irqreturn_t ep93xx_spi_interrupt(int irq, void *dev_id) -{ - struct ep93xx_spi *espi = dev_id; - u8 irq_status = ep93xx_spi_read_u8(espi, SSPIIR); - - /* - * If we got ROR (receive overrun) interrupt we know that something is - * wrong. Just abort the message. - */ - if (unlikely(irq_status & SSPIIR_RORIS)) { - /* clear the overrun interrupt */ - ep93xx_spi_write_u8(espi, SSPICR, 0); - dev_warn(&espi->pdev->dev, - "receive overrun, aborting the message\n"); - espi->current_msg->status = -EIO; - } else { - /* - * Interrupt is either RX (RIS) or TX (TIS). For both cases we - * simply execute next data transfer. - */ - if (ep93xx_spi_read_write(espi)) { - /* - * In normal case, there still is some processing left - * for current transfer. Let's wait for the next - * interrupt then. - */ - return IRQ_HANDLED; - } - } - - /* - * Current transfer is finished, either with error or with success. In - * any case we disable interrupts and notify the worker to handle - * any post-processing of the message. - */ - ep93xx_spi_disable_interrupts(espi); - complete(&espi->wait); - return IRQ_HANDLED; -} - -static int __init ep93xx_spi_probe(struct platform_device *pdev) -{ - struct spi_master *master; - struct ep93xx_spi_info *info; - struct ep93xx_spi *espi; - struct resource *res; - int error; - - info = pdev->dev.platform_data; - - master = spi_alloc_master(&pdev->dev, sizeof(*espi)); - if (!master) { - dev_err(&pdev->dev, "failed to allocate spi master\n"); - return -ENOMEM; - } - - master->setup = ep93xx_spi_setup; - master->transfer = ep93xx_spi_transfer; - master->cleanup = ep93xx_spi_cleanup; - master->bus_num = pdev->id; - master->num_chipselect = info->num_chipselect; - master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; - - platform_set_drvdata(pdev, master); - - espi = spi_master_get_devdata(master); - - espi->clk = clk_get(&pdev->dev, NULL); - if (IS_ERR(espi->clk)) { - dev_err(&pdev->dev, "unable to get spi clock\n"); - error = PTR_ERR(espi->clk); - goto fail_release_master; - } - - spin_lock_init(&espi->lock); - init_completion(&espi->wait); - - /* - * Calculate maximum and minimum supported clock rates - * for the controller. - */ - espi->max_rate = clk_get_rate(espi->clk) / 2; - espi->min_rate = clk_get_rate(espi->clk) / (254 * 256); - espi->pdev = pdev; - - espi->irq = platform_get_irq(pdev, 0); - if (espi->irq < 0) { - error = -EBUSY; - dev_err(&pdev->dev, "failed to get irq resources\n"); - goto fail_put_clock; - } - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(&pdev->dev, "unable to get iomem resource\n"); - error = -ENODEV; - goto fail_put_clock; - } - - res = request_mem_region(res->start, resource_size(res), pdev->name); - if (!res) { - dev_err(&pdev->dev, "unable to request iomem resources\n"); - error = -EBUSY; - goto fail_put_clock; - } - - espi->regs_base = ioremap(res->start, resource_size(res)); - if (!espi->regs_base) { - dev_err(&pdev->dev, "failed to map resources\n"); - error = -ENODEV; - goto fail_free_mem; - } - - error = request_irq(espi->irq, ep93xx_spi_interrupt, 0, - "ep93xx-spi", espi); - if (error) { - dev_err(&pdev->dev, "failed to request irq\n"); - goto fail_unmap_regs; - } - - espi->wq = create_singlethread_workqueue("ep93xx_spid"); - if (!espi->wq) { - dev_err(&pdev->dev, "unable to create workqueue\n"); - goto fail_free_irq; - } - INIT_WORK(&espi->msg_work, ep93xx_spi_work); - INIT_LIST_HEAD(&espi->msg_queue); - espi->running = true; - - /* make sure that the hardware is disabled */ - ep93xx_spi_write_u8(espi, SSPCR1, 0); - - error = spi_register_master(master); - if (error) { - dev_err(&pdev->dev, "failed to register SPI master\n"); - goto fail_free_queue; - } - - dev_info(&pdev->dev, "EP93xx SPI Controller at 0x%08lx irq %d\n", - (unsigned long)res->start, espi->irq); - - return 0; - -fail_free_queue: - destroy_workqueue(espi->wq); -fail_free_irq: - free_irq(espi->irq, espi); -fail_unmap_regs: - iounmap(espi->regs_base); -fail_free_mem: - release_mem_region(res->start, resource_size(res)); -fail_put_clock: - clk_put(espi->clk); -fail_release_master: - spi_master_put(master); - platform_set_drvdata(pdev, NULL); - - return error; -} - -static int __exit ep93xx_spi_remove(struct platform_device *pdev) -{ - struct spi_master *master = platform_get_drvdata(pdev); - struct ep93xx_spi *espi = spi_master_get_devdata(master); - struct resource *res; - - spin_lock_irq(&espi->lock); - espi->running = false; - spin_unlock_irq(&espi->lock); - - destroy_workqueue(espi->wq); - - /* - * Complete remaining messages with %-ESHUTDOWN status. - */ - spin_lock_irq(&espi->lock); - while (!list_empty(&espi->msg_queue)) { - struct spi_message *msg; - - msg = list_first_entry(&espi->msg_queue, - struct spi_message, queue); - list_del_init(&msg->queue); - msg->status = -ESHUTDOWN; - spin_unlock_irq(&espi->lock); - msg->complete(msg->context); - spin_lock_irq(&espi->lock); - } - spin_unlock_irq(&espi->lock); - - free_irq(espi->irq, espi); - iounmap(espi->regs_base); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - release_mem_region(res->start, resource_size(res)); - clk_put(espi->clk); - platform_set_drvdata(pdev, NULL); - - spi_unregister_master(master); - return 0; -} - -static struct platform_driver ep93xx_spi_driver = { - .driver = { - .name = "ep93xx-spi", - .owner = THIS_MODULE, - }, - .remove = __exit_p(ep93xx_spi_remove), -}; - -static int __init ep93xx_spi_init(void) -{ - return platform_driver_probe(&ep93xx_spi_driver, ep93xx_spi_probe); -} -module_init(ep93xx_spi_init); - -static void __exit ep93xx_spi_exit(void) -{ - platform_driver_unregister(&ep93xx_spi_driver); -} -module_exit(ep93xx_spi_exit); - -MODULE_DESCRIPTION("EP93xx SPI Controller driver"); -MODULE_AUTHOR("Mika Westerberg "); -MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:ep93xx-spi"); diff --git a/drivers/spi/mpc512x_psc_spi.c b/drivers/spi/mpc512x_psc_spi.c deleted file mode 100644 index 6a5b423..0000000 --- a/drivers/spi/mpc512x_psc_spi.c +++ /dev/null @@ -1,577 +0,0 @@ -/* - * MPC512x PSC in SPI mode driver. - * - * Copyright (C) 2007,2008 Freescale Semiconductor Inc. - * Original port from 52xx driver: - * Hongjun Chen - * - * Fork of mpc52xx_psc_spi.c: - * Copyright (C) 2006 TOPTICA Photonics AG., Dragos Carp - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -struct mpc512x_psc_spi { - void (*cs_control)(struct spi_device *spi, bool on); - u32 sysclk; - - /* driver internal data */ - struct mpc52xx_psc __iomem *psc; - struct mpc512x_psc_fifo __iomem *fifo; - unsigned int irq; - u8 bits_per_word; - u8 busy; - u32 mclk; - u8 eofbyte; - - struct workqueue_struct *workqueue; - struct work_struct work; - - struct list_head queue; - spinlock_t lock; /* Message queue lock */ - - struct completion done; -}; - -/* controller state */ -struct mpc512x_psc_spi_cs { - int bits_per_word; - int speed_hz; -}; - -/* set clock freq, clock ramp, bits per work - * if t is NULL then reset the values to the default values - */ -static int mpc512x_psc_spi_transfer_setup(struct spi_device *spi, - struct spi_transfer *t) -{ - struct mpc512x_psc_spi_cs *cs = spi->controller_state; - - cs->speed_hz = (t && t->speed_hz) - ? t->speed_hz : spi->max_speed_hz; - cs->bits_per_word = (t && t->bits_per_word) - ? t->bits_per_word : spi->bits_per_word; - cs->bits_per_word = ((cs->bits_per_word + 7) / 8) * 8; - return 0; -} - -static void mpc512x_psc_spi_activate_cs(struct spi_device *spi) -{ - struct mpc512x_psc_spi_cs *cs = spi->controller_state; - struct mpc512x_psc_spi *mps = spi_master_get_devdata(spi->master); - struct mpc52xx_psc __iomem *psc = mps->psc; - u32 sicr; - u32 ccr; - u16 bclkdiv; - - sicr = in_be32(&psc->sicr); - - /* Set clock phase and polarity */ - if (spi->mode & SPI_CPHA) - sicr |= 0x00001000; - else - sicr &= ~0x00001000; - - if (spi->mode & SPI_CPOL) - sicr |= 0x00002000; - else - sicr &= ~0x00002000; - - if (spi->mode & SPI_LSB_FIRST) - sicr |= 0x10000000; - else - sicr &= ~0x10000000; - out_be32(&psc->sicr, sicr); - - ccr = in_be32(&psc->ccr); - ccr &= 0xFF000000; - if (cs->speed_hz) - bclkdiv = (mps->mclk / cs->speed_hz) - 1; - else - bclkdiv = (mps->mclk / 1000000) - 1; /* default 1MHz */ - - ccr |= (((bclkdiv & 0xff) << 16) | (((bclkdiv >> 8) & 0xff) << 8)); - out_be32(&psc->ccr, ccr); - mps->bits_per_word = cs->bits_per_word; - - if (mps->cs_control) - mps->cs_control(spi, (spi->mode & SPI_CS_HIGH) ? 1 : 0); -} - -static void mpc512x_psc_spi_deactivate_cs(struct spi_device *spi) -{ - struct mpc512x_psc_spi *mps = spi_master_get_devdata(spi->master); - - if (mps->cs_control) - mps->cs_control(spi, (spi->mode & SPI_CS_HIGH) ? 0 : 1); - -} - -/* extract and scale size field in txsz or rxsz */ -#define MPC512x_PSC_FIFO_SZ(sz) ((sz & 0x7ff) << 2); - -#define EOFBYTE 1 - -static int mpc512x_psc_spi_transfer_rxtx(struct spi_device *spi, - struct spi_transfer *t) -{ - struct mpc512x_psc_spi *mps = spi_master_get_devdata(spi->master); - struct mpc52xx_psc __iomem *psc = mps->psc; - struct mpc512x_psc_fifo __iomem *fifo = mps->fifo; - size_t len = t->len; - u8 *tx_buf = (u8 *)t->tx_buf; - u8 *rx_buf = (u8 *)t->rx_buf; - - if (!tx_buf && !rx_buf && t->len) - return -EINVAL; - - /* Zero MR2 */ - in_8(&psc->mode); - out_8(&psc->mode, 0x0); - - while (len) { - int count; - int i; - u8 data; - size_t fifosz; - int rxcount; - - /* - * The number of bytes that can be sent at a time - * depends on the fifo size. - */ - fifosz = MPC512x_PSC_FIFO_SZ(in_be32(&fifo->txsz)); - count = min(fifosz, len); - - for (i = count; i > 0; i--) { - data = tx_buf ? *tx_buf++ : 0; - if (len == EOFBYTE) - setbits32(&fifo->txcmd, MPC512x_PSC_FIFO_EOF); - out_8(&fifo->txdata_8, data); - len--; - } - - INIT_COMPLETION(mps->done); - - /* interrupt on tx fifo empty */ - out_be32(&fifo->txisr, MPC512x_PSC_FIFO_EMPTY); - out_be32(&fifo->tximr, MPC512x_PSC_FIFO_EMPTY); - - /* enable transmiter/receiver */ - out_8(&psc->command, - MPC52xx_PSC_TX_ENABLE | MPC52xx_PSC_RX_ENABLE); - - wait_for_completion(&mps->done); - - mdelay(1); - - /* rx fifo should have count bytes in it */ - rxcount = in_be32(&fifo->rxcnt); - if (rxcount != count) - mdelay(1); - - rxcount = in_be32(&fifo->rxcnt); - if (rxcount != count) { - dev_warn(&spi->dev, "expected %d bytes in rx fifo " - "but got %d\n", count, rxcount); - } - - rxcount = min(rxcount, count); - for (i = rxcount; i > 0; i--) { - data = in_8(&fifo->rxdata_8); - if (rx_buf) - *rx_buf++ = data; - } - while (in_be32(&fifo->rxcnt)) { - in_8(&fifo->rxdata_8); - } - - out_8(&psc->command, - MPC52xx_PSC_TX_DISABLE | MPC52xx_PSC_RX_DISABLE); - } - /* disable transmiter/receiver and fifo interrupt */ - out_8(&psc->command, MPC52xx_PSC_TX_DISABLE | MPC52xx_PSC_RX_DISABLE); - out_be32(&fifo->tximr, 0); - return 0; -} - -static void mpc512x_psc_spi_work(struct work_struct *work) -{ - struct mpc512x_psc_spi *mps = container_of(work, - struct mpc512x_psc_spi, - work); - - spin_lock_irq(&mps->lock); - mps->busy = 1; - while (!list_empty(&mps->queue)) { - struct spi_message *m; - struct spi_device *spi; - struct spi_transfer *t = NULL; - unsigned cs_change; - int status; - - m = container_of(mps->queue.next, struct spi_message, queue); - list_del_init(&m->queue); - spin_unlock_irq(&mps->lock); - - spi = m->spi; - cs_change = 1; - status = 0; - list_for_each_entry(t, &m->transfers, transfer_list) { - if (t->bits_per_word || t->speed_hz) { - status = mpc512x_psc_spi_transfer_setup(spi, t); - if (status < 0) - break; - } - - if (cs_change) - mpc512x_psc_spi_activate_cs(spi); - cs_change = t->cs_change; - - status = mpc512x_psc_spi_transfer_rxtx(spi, t); - if (status) - break; - m->actual_length += t->len; - - if (t->delay_usecs) - udelay(t->delay_usecs); - - if (cs_change) - mpc512x_psc_spi_deactivate_cs(spi); - } - - m->status = status; - m->complete(m->context); - - if (status || !cs_change) - mpc512x_psc_spi_deactivate_cs(spi); - - mpc512x_psc_spi_transfer_setup(spi, NULL); - - spin_lock_irq(&mps->lock); - } - mps->busy = 0; - spin_unlock_irq(&mps->lock); -} - -static int mpc512x_psc_spi_setup(struct spi_device *spi) -{ - struct mpc512x_psc_spi *mps = spi_master_get_devdata(spi->master); - struct mpc512x_psc_spi_cs *cs = spi->controller_state; - unsigned long flags; - - if (spi->bits_per_word % 8) - return -EINVAL; - - if (!cs) { - cs = kzalloc(sizeof *cs, GFP_KERNEL); - if (!cs) - return -ENOMEM; - spi->controller_state = cs; - } - - cs->bits_per_word = spi->bits_per_word; - cs->speed_hz = spi->max_speed_hz; - - spin_lock_irqsave(&mps->lock, flags); - if (!mps->busy) - mpc512x_psc_spi_deactivate_cs(spi); - spin_unlock_irqrestore(&mps->lock, flags); - - return 0; -} - -static int mpc512x_psc_spi_transfer(struct spi_device *spi, - struct spi_message *m) -{ - struct mpc512x_psc_spi *mps = spi_master_get_devdata(spi->master); - unsigned long flags; - - m->actual_length = 0; - m->status = -EINPROGRESS; - - spin_lock_irqsave(&mps->lock, flags); - list_add_tail(&m->queue, &mps->queue); - queue_work(mps->workqueue, &mps->work); - spin_unlock_irqrestore(&mps->lock, flags); - - return 0; -} - -static void mpc512x_psc_spi_cleanup(struct spi_device *spi) -{ - kfree(spi->controller_state); -} - -static int mpc512x_psc_spi_port_config(struct spi_master *master, - struct mpc512x_psc_spi *mps) -{ - struct mpc52xx_psc __iomem *psc = mps->psc; - struct mpc512x_psc_fifo __iomem *fifo = mps->fifo; - struct clk *spiclk; - int ret = 0; - char name[32]; - u32 sicr; - u32 ccr; - u16 bclkdiv; - - sprintf(name, "psc%d_mclk", master->bus_num); - spiclk = clk_get(&master->dev, name); - clk_enable(spiclk); - mps->mclk = clk_get_rate(spiclk); - clk_put(spiclk); - - /* Reset the PSC into a known state */ - out_8(&psc->command, MPC52xx_PSC_RST_RX); - out_8(&psc->command, MPC52xx_PSC_RST_TX); - out_8(&psc->command, MPC52xx_PSC_TX_DISABLE | MPC52xx_PSC_RX_DISABLE); - - /* Disable psc interrupts all useful interrupts are in fifo */ - out_be16(&psc->isr_imr.imr, 0); - - /* Disable fifo interrupts, will be enabled later */ - out_be32(&fifo->tximr, 0); - out_be32(&fifo->rximr, 0); - - /* Setup fifo slice address and size */ - /*out_be32(&fifo->txsz, 0x0fe00004);*/ - /*out_be32(&fifo->rxsz, 0x0ff00004);*/ - - sicr = 0x01000000 | /* SIM = 0001 -- 8 bit */ - 0x00800000 | /* GenClk = 1 -- internal clk */ - 0x00008000 | /* SPI = 1 */ - 0x00004000 | /* MSTR = 1 -- SPI master */ - 0x00000800; /* UseEOF = 1 -- SS low until EOF */ - - out_be32(&psc->sicr, sicr); - - ccr = in_be32(&psc->ccr); - ccr &= 0xFF000000; - bclkdiv = (mps->mclk / 1000000) - 1; /* default 1MHz */ - ccr |= (((bclkdiv & 0xff) << 16) | (((bclkdiv >> 8) & 0xff) << 8)); - out_be32(&psc->ccr, ccr); - - /* Set 2ms DTL delay */ - out_8(&psc->ctur, 0x00); - out_8(&psc->ctlr, 0x82); - - /* we don't use the alarms */ - out_be32(&fifo->rxalarm, 0xfff); - out_be32(&fifo->txalarm, 0); - - /* Enable FIFO slices for Rx/Tx */ - out_be32(&fifo->rxcmd, - MPC512x_PSC_FIFO_ENABLE_SLICE | MPC512x_PSC_FIFO_ENABLE_DMA); - out_be32(&fifo->txcmd, - MPC512x_PSC_FIFO_ENABLE_SLICE | MPC512x_PSC_FIFO_ENABLE_DMA); - - mps->bits_per_word = 8; - - return ret; -} - -static irqreturn_t mpc512x_psc_spi_isr(int irq, void *dev_id) -{ - struct mpc512x_psc_spi *mps = (struct mpc512x_psc_spi *)dev_id; - struct mpc512x_psc_fifo __iomem *fifo = mps->fifo; - - /* clear interrupt and wake up the work queue */ - if (in_be32(&fifo->txisr) & - in_be32(&fifo->tximr) & MPC512x_PSC_FIFO_EMPTY) { - out_be32(&fifo->txisr, MPC512x_PSC_FIFO_EMPTY); - out_be32(&fifo->tximr, 0); - complete(&mps->done); - return IRQ_HANDLED; - } - return IRQ_NONE; -} - -/* bus_num is used only for the case dev->platform_data == NULL */ -static int __devinit mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr, - u32 size, unsigned int irq, - s16 bus_num) -{ - struct fsl_spi_platform_data *pdata = dev->platform_data; - struct mpc512x_psc_spi *mps; - struct spi_master *master; - int ret; - void *tempp; - - master = spi_alloc_master(dev, sizeof *mps); - if (master == NULL) - return -ENOMEM; - - dev_set_drvdata(dev, master); - mps = spi_master_get_devdata(master); - mps->irq = irq; - - if (pdata == NULL) { - dev_err(dev, "probe called without platform data, no " - "cs_control function will be called\n"); - mps->cs_control = NULL; - mps->sysclk = 0; - master->bus_num = bus_num; - master->num_chipselect = 255; - } else { - mps->cs_control = pdata->cs_control; - mps->sysclk = pdata->sysclk; - master->bus_num = pdata->bus_num; - master->num_chipselect = pdata->max_chipselect; - } - - master->setup = mpc512x_psc_spi_setup; - master->transfer = mpc512x_psc_spi_transfer; - master->cleanup = mpc512x_psc_spi_cleanup; - master->dev.of_node = dev->of_node; - - tempp = ioremap(regaddr, size); - if (!tempp) { - dev_err(dev, "could not ioremap I/O port range\n"); - ret = -EFAULT; - goto free_master; - } - mps->psc = tempp; - mps->fifo = - (struct mpc512x_psc_fifo *)(tempp + sizeof(struct mpc52xx_psc)); - - ret = request_irq(mps->irq, mpc512x_psc_spi_isr, IRQF_SHARED, - "mpc512x-psc-spi", mps); - if (ret) - goto free_master; - - ret = mpc512x_psc_spi_port_config(master, mps); - if (ret < 0) - goto free_irq; - - spin_lock_init(&mps->lock); - init_completion(&mps->done); - INIT_WORK(&mps->work, mpc512x_psc_spi_work); - INIT_LIST_HEAD(&mps->queue); - - mps->workqueue = - create_singlethread_workqueue(dev_name(master->dev.parent)); - if (mps->workqueue == NULL) { - ret = -EBUSY; - goto free_irq; - } - - ret = spi_register_master(master); - if (ret < 0) - goto unreg_master; - - return ret; - -unreg_master: - destroy_workqueue(mps->workqueue); -free_irq: - free_irq(mps->irq, mps); -free_master: - if (mps->psc) - iounmap(mps->psc); - spi_master_put(master); - - return ret; -} - -static int __devexit mpc512x_psc_spi_do_remove(struct device *dev) -{ - struct spi_master *master = dev_get_drvdata(dev); - struct mpc512x_psc_spi *mps = spi_master_get_devdata(master); - - flush_workqueue(mps->workqueue); - destroy_workqueue(mps->workqueue); - spi_unregister_master(master); - free_irq(mps->irq, mps); - if (mps->psc) - iounmap(mps->psc); - - return 0; -} - -static int __devinit mpc512x_psc_spi_of_probe(struct platform_device *op) -{ - const u32 *regaddr_p; - u64 regaddr64, size64; - s16 id = -1; - - regaddr_p = of_get_address(op->dev.of_node, 0, &size64, NULL); - if (!regaddr_p) { - dev_err(&op->dev, "Invalid PSC address\n"); - return -EINVAL; - } - regaddr64 = of_translate_address(op->dev.of_node, regaddr_p); - - /* get PSC id (0..11, used by port_config) */ - if (op->dev.platform_data == NULL) { - const u32 *psc_nump; - - psc_nump = of_get_property(op->dev.of_node, "cell-index", NULL); - if (!psc_nump || *psc_nump > 11) { - dev_err(&op->dev, "mpc512x_psc_spi: Device node %s " - "has invalid cell-index property\n", - op->dev.of_node->full_name); - return -EINVAL; - } - id = *psc_nump; - } - - return mpc512x_psc_spi_do_probe(&op->dev, (u32) regaddr64, (u32) size64, - irq_of_parse_and_map(op->dev.of_node, 0), id); -} - -static int __devexit mpc512x_psc_spi_of_remove(struct platform_device *op) -{ - return mpc512x_psc_spi_do_remove(&op->dev); -} - -static struct of_device_id mpc512x_psc_spi_of_match[] = { - { .compatible = "fsl,mpc5121-psc-spi", }, - {}, -}; - -MODULE_DEVICE_TABLE(of, mpc512x_psc_spi_of_match); - -static struct platform_driver mpc512x_psc_spi_of_driver = { - .probe = mpc512x_psc_spi_of_probe, - .remove = __devexit_p(mpc512x_psc_spi_of_remove), - .driver = { - .name = "mpc512x-psc-spi", - .owner = THIS_MODULE, - .of_match_table = mpc512x_psc_spi_of_match, - }, -}; - -static int __init mpc512x_psc_spi_init(void) -{ - return platform_driver_register(&mpc512x_psc_spi_of_driver); -} -module_init(mpc512x_psc_spi_init); - -static void __exit mpc512x_psc_spi_exit(void) -{ - platform_driver_unregister(&mpc512x_psc_spi_of_driver); -} -module_exit(mpc512x_psc_spi_exit); - -MODULE_AUTHOR("John Rigby"); -MODULE_DESCRIPTION("MPC512x PSC SPI Driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/spi/mpc52xx_psc_spi.c b/drivers/spi/mpc52xx_psc_spi.c deleted file mode 100644 index e30baf0..0000000 --- a/drivers/spi/mpc52xx_psc_spi.c +++ /dev/null @@ -1,529 +0,0 @@ -/* - * MPC52xx PSC in SPI mode driver. - * - * Maintainer: Dragos Carp - * - * Copyright (C) 2006 TOPTICA Photonics AG. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#define MCLK 20000000 /* PSC port MClk in hz */ - -struct mpc52xx_psc_spi { - /* fsl_spi_platform data */ - void (*cs_control)(struct spi_device *spi, bool on); - u32 sysclk; - - /* driver internal data */ - struct mpc52xx_psc __iomem *psc; - struct mpc52xx_psc_fifo __iomem *fifo; - unsigned int irq; - u8 bits_per_word; - u8 busy; - - struct workqueue_struct *workqueue; - struct work_struct work; - - struct list_head queue; - spinlock_t lock; - - struct completion done; -}; - -/* controller state */ -struct mpc52xx_psc_spi_cs { - int bits_per_word; - int speed_hz; -}; - -/* set clock freq, clock ramp, bits per work - * if t is NULL then reset the values to the default values - */ -static int mpc52xx_psc_spi_transfer_setup(struct spi_device *spi, - struct spi_transfer *t) -{ - struct mpc52xx_psc_spi_cs *cs = spi->controller_state; - - cs->speed_hz = (t && t->speed_hz) - ? t->speed_hz : spi->max_speed_hz; - cs->bits_per_word = (t && t->bits_per_word) - ? t->bits_per_word : spi->bits_per_word; - cs->bits_per_word = ((cs->bits_per_word + 7) / 8) * 8; - return 0; -} - -static void mpc52xx_psc_spi_activate_cs(struct spi_device *spi) -{ - struct mpc52xx_psc_spi_cs *cs = spi->controller_state; - struct mpc52xx_psc_spi *mps = spi_master_get_devdata(spi->master); - struct mpc52xx_psc __iomem *psc = mps->psc; - u32 sicr; - u16 ccr; - - sicr = in_be32(&psc->sicr); - - /* Set clock phase and polarity */ - if (spi->mode & SPI_CPHA) - sicr |= 0x00001000; - else - sicr &= ~0x00001000; - if (spi->mode & SPI_CPOL) - sicr |= 0x00002000; - else - sicr &= ~0x00002000; - - if (spi->mode & SPI_LSB_FIRST) - sicr |= 0x10000000; - else - sicr &= ~0x10000000; - out_be32(&psc->sicr, sicr); - - /* Set clock frequency and bits per word - * Because psc->ccr is defined as 16bit register instead of 32bit - * just set the lower byte of BitClkDiv - */ - ccr = in_be16((u16 __iomem *)&psc->ccr); - ccr &= 0xFF00; - if (cs->speed_hz) - ccr |= (MCLK / cs->speed_hz - 1) & 0xFF; - else /* by default SPI Clk 1MHz */ - ccr |= (MCLK / 1000000 - 1) & 0xFF; - out_be16((u16 __iomem *)&psc->ccr, ccr); - mps->bits_per_word = cs->bits_per_word; - - if (mps->cs_control) - mps->cs_control(spi, (spi->mode & SPI_CS_HIGH) ? 1 : 0); -} - -static void mpc52xx_psc_spi_deactivate_cs(struct spi_device *spi) -{ - struct mpc52xx_psc_spi *mps = spi_master_get_devdata(spi->master); - - if (mps->cs_control) - mps->cs_control(spi, (spi->mode & SPI_CS_HIGH) ? 0 : 1); -} - -#define MPC52xx_PSC_BUFSIZE (MPC52xx_PSC_RFNUM_MASK + 1) -/* wake up when 80% fifo full */ -#define MPC52xx_PSC_RFALARM (MPC52xx_PSC_BUFSIZE * 20 / 100) - -static int mpc52xx_psc_spi_transfer_rxtx(struct spi_device *spi, - struct spi_transfer *t) -{ - struct mpc52xx_psc_spi *mps = spi_master_get_devdata(spi->master); - struct mpc52xx_psc __iomem *psc = mps->psc; - struct mpc52xx_psc_fifo __iomem *fifo = mps->fifo; - unsigned rb = 0; /* number of bytes receieved */ - unsigned sb = 0; /* number of bytes sent */ - unsigned char *rx_buf = (unsigned char *)t->rx_buf; - unsigned char *tx_buf = (unsigned char *)t->tx_buf; - unsigned rfalarm; - unsigned send_at_once = MPC52xx_PSC_BUFSIZE; - unsigned recv_at_once; - int last_block = 0; - - if (!t->tx_buf && !t->rx_buf && t->len) - return -EINVAL; - - /* enable transmiter/receiver */ - out_8(&psc->command, MPC52xx_PSC_TX_ENABLE | MPC52xx_PSC_RX_ENABLE); - while (rb < t->len) { - if (t->len - rb > MPC52xx_PSC_BUFSIZE) { - rfalarm = MPC52xx_PSC_RFALARM; - last_block = 0; - } else { - send_at_once = t->len - sb; - rfalarm = MPC52xx_PSC_BUFSIZE - (t->len - rb); - last_block = 1; - } - - dev_dbg(&spi->dev, "send %d bytes...\n", send_at_once); - for (; send_at_once; sb++, send_at_once--) { - /* set EOF flag before the last word is sent */ - if (send_at_once == 1 && last_block) - out_8(&psc->ircr2, 0x01); - - if (tx_buf) - out_8(&psc->mpc52xx_psc_buffer_8, tx_buf[sb]); - else - out_8(&psc->mpc52xx_psc_buffer_8, 0); - } - - - /* enable interrupts and wait for wake up - * if just one byte is expected the Rx FIFO genererates no - * FFULL interrupt, so activate the RxRDY interrupt - */ - out_8(&psc->command, MPC52xx_PSC_SEL_MODE_REG_1); - if (t->len - rb == 1) { - out_8(&psc->mode, 0); - } else { - out_8(&psc->mode, MPC52xx_PSC_MODE_FFULL); - out_be16(&fifo->rfalarm, rfalarm); - } - out_be16(&psc->mpc52xx_psc_imr, MPC52xx_PSC_IMR_RXRDY); - wait_for_completion(&mps->done); - recv_at_once = in_be16(&fifo->rfnum); - dev_dbg(&spi->dev, "%d bytes received\n", recv_at_once); - - send_at_once = recv_at_once; - if (rx_buf) { - for (; recv_at_once; rb++, recv_at_once--) - rx_buf[rb] = in_8(&psc->mpc52xx_psc_buffer_8); - } else { - for (; recv_at_once; rb++, recv_at_once--) - in_8(&psc->mpc52xx_psc_buffer_8); - } - } - /* disable transmiter/receiver */ - out_8(&psc->command, MPC52xx_PSC_TX_DISABLE | MPC52xx_PSC_RX_DISABLE); - - return 0; -} - -static void mpc52xx_psc_spi_work(struct work_struct *work) -{ - struct mpc52xx_psc_spi *mps = - container_of(work, struct mpc52xx_psc_spi, work); - - spin_lock_irq(&mps->lock); - mps->busy = 1; - while (!list_empty(&mps->queue)) { - struct spi_message *m; - struct spi_device *spi; - struct spi_transfer *t = NULL; - unsigned cs_change; - int status; - - m = container_of(mps->queue.next, struct spi_message, queue); - list_del_init(&m->queue); - spin_unlock_irq(&mps->lock); - - spi = m->spi; - cs_change = 1; - status = 0; - list_for_each_entry (t, &m->transfers, transfer_list) { - if (t->bits_per_word || t->speed_hz) { - status = mpc52xx_psc_spi_transfer_setup(spi, t); - if (status < 0) - break; - } - - if (cs_change) - mpc52xx_psc_spi_activate_cs(spi); - cs_change = t->cs_change; - - status = mpc52xx_psc_spi_transfer_rxtx(spi, t); - if (status) - break; - m->actual_length += t->len; - - if (t->delay_usecs) - udelay(t->delay_usecs); - - if (cs_change) - mpc52xx_psc_spi_deactivate_cs(spi); - } - - m->status = status; - m->complete(m->context); - - if (status || !cs_change) - mpc52xx_psc_spi_deactivate_cs(spi); - - mpc52xx_psc_spi_transfer_setup(spi, NULL); - - spin_lock_irq(&mps->lock); - } - mps->busy = 0; - spin_unlock_irq(&mps->lock); -} - -static int mpc52xx_psc_spi_setup(struct spi_device *spi) -{ - struct mpc52xx_psc_spi *mps = spi_master_get_devdata(spi->master); - struct mpc52xx_psc_spi_cs *cs = spi->controller_state; - unsigned long flags; - - if (spi->bits_per_word%8) - return -EINVAL; - - if (!cs) { - cs = kzalloc(sizeof *cs, GFP_KERNEL); - if (!cs) - return -ENOMEM; - spi->controller_state = cs; - } - - cs->bits_per_word = spi->bits_per_word; - cs->speed_hz = spi->max_speed_hz; - - spin_lock_irqsave(&mps->lock, flags); - if (!mps->busy) - mpc52xx_psc_spi_deactivate_cs(spi); - spin_unlock_irqrestore(&mps->lock, flags); - - return 0; -} - -static int mpc52xx_psc_spi_transfer(struct spi_device *spi, - struct spi_message *m) -{ - struct mpc52xx_psc_spi *mps = spi_master_get_devdata(spi->master); - unsigned long flags; - - m->actual_length = 0; - m->status = -EINPROGRESS; - - spin_lock_irqsave(&mps->lock, flags); - list_add_tail(&m->queue, &mps->queue); - queue_work(mps->workqueue, &mps->work); - spin_unlock_irqrestore(&mps->lock, flags); - - return 0; -} - -static void mpc52xx_psc_spi_cleanup(struct spi_device *spi) -{ - kfree(spi->controller_state); -} - -static int mpc52xx_psc_spi_port_config(int psc_id, struct mpc52xx_psc_spi *mps) -{ - struct mpc52xx_psc __iomem *psc = mps->psc; - struct mpc52xx_psc_fifo __iomem *fifo = mps->fifo; - u32 mclken_div; - int ret; - - /* default sysclk is 512MHz */ - mclken_div = (mps->sysclk ? mps->sysclk : 512000000) / MCLK; - ret = mpc52xx_set_psc_clkdiv(psc_id, mclken_div); - if (ret) - return ret; - - /* Reset the PSC into a known state */ - out_8(&psc->command, MPC52xx_PSC_RST_RX); - out_8(&psc->command, MPC52xx_PSC_RST_TX); - out_8(&psc->command, MPC52xx_PSC_TX_DISABLE | MPC52xx_PSC_RX_DISABLE); - - /* Disable interrupts, interrupts are based on alarm level */ - out_be16(&psc->mpc52xx_psc_imr, 0); - out_8(&psc->command, MPC52xx_PSC_SEL_MODE_REG_1); - out_8(&fifo->rfcntl, 0); - out_8(&psc->mode, MPC52xx_PSC_MODE_FFULL); - - /* Configure 8bit codec mode as a SPI master and use EOF flags */ - /* SICR_SIM_CODEC8|SICR_GENCLK|SICR_SPI|SICR_MSTR|SICR_USEEOF */ - out_be32(&psc->sicr, 0x0180C800); - out_be16((u16 __iomem *)&psc->ccr, 0x070F); /* default SPI Clk 1MHz */ - - /* Set 2ms DTL delay */ - out_8(&psc->ctur, 0x00); - out_8(&psc->ctlr, 0x84); - - mps->bits_per_word = 8; - - return 0; -} - -static irqreturn_t mpc52xx_psc_spi_isr(int irq, void *dev_id) -{ - struct mpc52xx_psc_spi *mps = (struct mpc52xx_psc_spi *)dev_id; - struct mpc52xx_psc __iomem *psc = mps->psc; - - /* disable interrupt and wake up the work queue */ - if (in_be16(&psc->mpc52xx_psc_isr) & MPC52xx_PSC_IMR_RXRDY) { - out_be16(&psc->mpc52xx_psc_imr, 0); - complete(&mps->done); - return IRQ_HANDLED; - } - return IRQ_NONE; -} - -/* bus_num is used only for the case dev->platform_data == NULL */ -static int __devinit mpc52xx_psc_spi_do_probe(struct device *dev, u32 regaddr, - u32 size, unsigned int irq, s16 bus_num) -{ - struct fsl_spi_platform_data *pdata = dev->platform_data; - struct mpc52xx_psc_spi *mps; - struct spi_master *master; - int ret; - - master = spi_alloc_master(dev, sizeof *mps); - if (master == NULL) - return -ENOMEM; - - dev_set_drvdata(dev, master); - mps = spi_master_get_devdata(master); - - /* the spi->mode bits understood by this driver: */ - master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST; - - mps->irq = irq; - if (pdata == NULL) { - dev_warn(dev, "probe called without platform data, no " - "cs_control function will be called\n"); - mps->cs_control = NULL; - mps->sysclk = 0; - master->bus_num = bus_num; - master->num_chipselect = 255; - } else { - mps->cs_control = pdata->cs_control; - mps->sysclk = pdata->sysclk; - master->bus_num = pdata->bus_num; - master->num_chipselect = pdata->max_chipselect; - } - master->setup = mpc52xx_psc_spi_setup; - master->transfer = mpc52xx_psc_spi_transfer; - master->cleanup = mpc52xx_psc_spi_cleanup; - master->dev.of_node = dev->of_node; - - mps->psc = ioremap(regaddr, size); - if (!mps->psc) { - dev_err(dev, "could not ioremap I/O port range\n"); - ret = -EFAULT; - goto free_master; - } - /* On the 5200, fifo regs are immediately ajacent to the psc regs */ - mps->fifo = ((void __iomem *)mps->psc) + sizeof(struct mpc52xx_psc); - - ret = request_irq(mps->irq, mpc52xx_psc_spi_isr, 0, "mpc52xx-psc-spi", - mps); - if (ret) - goto free_master; - - ret = mpc52xx_psc_spi_port_config(master->bus_num, mps); - if (ret < 0) { - dev_err(dev, "can't configure PSC! Is it capable of SPI?\n"); - goto free_irq; - } - - spin_lock_init(&mps->lock); - init_completion(&mps->done); - INIT_WORK(&mps->work, mpc52xx_psc_spi_work); - INIT_LIST_HEAD(&mps->queue); - - mps->workqueue = create_singlethread_workqueue( - dev_name(master->dev.parent)); - if (mps->workqueue == NULL) { - ret = -EBUSY; - goto free_irq; - } - - ret = spi_register_master(master); - if (ret < 0) - goto unreg_master; - - return ret; - -unreg_master: - destroy_workqueue(mps->workqueue); -free_irq: - free_irq(mps->irq, mps); -free_master: - if (mps->psc) - iounmap(mps->psc); - spi_master_put(master); - - return ret; -} - -static int __devinit mpc52xx_psc_spi_of_probe(struct platform_device *op) -{ - const u32 *regaddr_p; - u64 regaddr64, size64; - s16 id = -1; - - regaddr_p = of_get_address(op->dev.of_node, 0, &size64, NULL); - if (!regaddr_p) { - dev_err(&op->dev, "Invalid PSC address\n"); - return -EINVAL; - } - regaddr64 = of_translate_address(op->dev.of_node, regaddr_p); - - /* get PSC id (1..6, used by port_config) */ - if (op->dev.platform_data == NULL) { - const u32 *psc_nump; - - psc_nump = of_get_property(op->dev.of_node, "cell-index", NULL); - if (!psc_nump || *psc_nump > 5) { - dev_err(&op->dev, "Invalid cell-index property\n"); - return -EINVAL; - } - id = *psc_nump + 1; - } - - return mpc52xx_psc_spi_do_probe(&op->dev, (u32)regaddr64, (u32)size64, - irq_of_parse_and_map(op->dev.of_node, 0), id); -} - -static int __devexit mpc52xx_psc_spi_of_remove(struct platform_device *op) -{ - struct spi_master *master = dev_get_drvdata(&op->dev); - struct mpc52xx_psc_spi *mps = spi_master_get_devdata(master); - - flush_workqueue(mps->workqueue); - destroy_workqueue(mps->workqueue); - spi_unregister_master(master); - free_irq(mps->irq, mps); - if (mps->psc) - iounmap(mps->psc); - - return 0; -} - -static const struct of_device_id mpc52xx_psc_spi_of_match[] = { - { .compatible = "fsl,mpc5200-psc-spi", }, - { .compatible = "mpc5200-psc-spi", }, /* old */ - {} -}; - -MODULE_DEVICE_TABLE(of, mpc52xx_psc_spi_of_match); - -static struct platform_driver mpc52xx_psc_spi_of_driver = { - .probe = mpc52xx_psc_spi_of_probe, - .remove = __devexit_p(mpc52xx_psc_spi_of_remove), - .driver = { - .name = "mpc52xx-psc-spi", - .owner = THIS_MODULE, - .of_match_table = mpc52xx_psc_spi_of_match, - }, -}; - -static int __init mpc52xx_psc_spi_init(void) -{ - return platform_driver_register(&mpc52xx_psc_spi_of_driver); -} -module_init(mpc52xx_psc_spi_init); - -static void __exit mpc52xx_psc_spi_exit(void) -{ - platform_driver_unregister(&mpc52xx_psc_spi_of_driver); -} -module_exit(mpc52xx_psc_spi_exit); - -MODULE_AUTHOR("Dragos Carp"); -MODULE_DESCRIPTION("MPC52xx PSC SPI Driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/spi/mpc52xx_spi.c b/drivers/spi/mpc52xx_spi.c deleted file mode 100644 index 015a974..0000000 --- a/drivers/spi/mpc52xx_spi.c +++ /dev/null @@ -1,579 +0,0 @@ -/* - * MPC52xx SPI bus driver. - * - * Copyright (C) 2008 Secret Lab Technologies Ltd. - * - * This file is released under the GPLv2 - * - * This is the driver for the MPC5200's dedicated SPI controller. - * - * Note: this driver does not support the MPC5200 PSC in SPI mode. For - * that driver see drivers/spi/mpc52xx_psc_spi.c - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -MODULE_AUTHOR("Grant Likely "); -MODULE_DESCRIPTION("MPC52xx SPI (non-PSC) Driver"); -MODULE_LICENSE("GPL"); - -/* Register offsets */ -#define SPI_CTRL1 0x00 -#define SPI_CTRL1_SPIE (1 << 7) -#define SPI_CTRL1_SPE (1 << 6) -#define SPI_CTRL1_MSTR (1 << 4) -#define SPI_CTRL1_CPOL (1 << 3) -#define SPI_CTRL1_CPHA (1 << 2) -#define SPI_CTRL1_SSOE (1 << 1) -#define SPI_CTRL1_LSBFE (1 << 0) - -#define SPI_CTRL2 0x01 -#define SPI_BRR 0x04 - -#define SPI_STATUS 0x05 -#define SPI_STATUS_SPIF (1 << 7) -#define SPI_STATUS_WCOL (1 << 6) -#define SPI_STATUS_MODF (1 << 4) - -#define SPI_DATA 0x09 -#define SPI_PORTDATA 0x0d -#define SPI_DATADIR 0x10 - -/* FSM state return values */ -#define FSM_STOP 0 /* Nothing more for the state machine to */ - /* do. If something interesting happens */ - /* then an IRQ will be received */ -#define FSM_POLL 1 /* need to poll for completion, an IRQ is */ - /* not expected */ -#define FSM_CONTINUE 2 /* Keep iterating the state machine */ - -/* Driver internal data */ -struct mpc52xx_spi { - struct spi_master *master; - void __iomem *regs; - int irq0; /* MODF irq */ - int irq1; /* SPIF irq */ - unsigned int ipb_freq; - - /* Statistics; not used now, but will be reintroduced for debugfs */ - int msg_count; - int wcol_count; - int wcol_ticks; - u32 wcol_tx_timestamp; - int modf_count; - int byte_count; - - struct list_head queue; /* queue of pending messages */ - spinlock_t lock; - struct work_struct work; - - /* Details of current transfer (length, and buffer pointers) */ - struct spi_message *message; /* current message */ - struct spi_transfer *transfer; /* current transfer */ - int (*state)(int irq, struct mpc52xx_spi *ms, u8 status, u8 data); - int len; - int timestamp; - u8 *rx_buf; - const u8 *tx_buf; - int cs_change; - int gpio_cs_count; - unsigned int *gpio_cs; -}; - -/* - * CS control function - */ -static void mpc52xx_spi_chipsel(struct mpc52xx_spi *ms, int value) -{ - int cs; - - if (ms->gpio_cs_count > 0) { - cs = ms->message->spi->chip_select; - gpio_set_value(ms->gpio_cs[cs], value ? 0 : 1); - } else - out_8(ms->regs + SPI_PORTDATA, value ? 0 : 0x08); -} - -/* - * Start a new transfer. This is called both by the idle state - * for the first transfer in a message, and by the wait state when the - * previous transfer in a message is complete. - */ -static void mpc52xx_spi_start_transfer(struct mpc52xx_spi *ms) -{ - ms->rx_buf = ms->transfer->rx_buf; - ms->tx_buf = ms->transfer->tx_buf; - ms->len = ms->transfer->len; - - /* Activate the chip select */ - if (ms->cs_change) - mpc52xx_spi_chipsel(ms, 1); - ms->cs_change = ms->transfer->cs_change; - - /* Write out the first byte */ - ms->wcol_tx_timestamp = get_tbl(); - if (ms->tx_buf) - out_8(ms->regs + SPI_DATA, *ms->tx_buf++); - else - out_8(ms->regs + SPI_DATA, 0); -} - -/* Forward declaration of state handlers */ -static int mpc52xx_spi_fsmstate_transfer(int irq, struct mpc52xx_spi *ms, - u8 status, u8 data); -static int mpc52xx_spi_fsmstate_wait(int irq, struct mpc52xx_spi *ms, - u8 status, u8 data); - -/* - * IDLE state - * - * No transfers are in progress; if another transfer is pending then retrieve - * it and kick it off. Otherwise, stop processing the state machine - */ -static int -mpc52xx_spi_fsmstate_idle(int irq, struct mpc52xx_spi *ms, u8 status, u8 data) -{ - struct spi_device *spi; - int spr, sppr; - u8 ctrl1; - - if (status && (irq != NO_IRQ)) - dev_err(&ms->master->dev, "spurious irq, status=0x%.2x\n", - status); - - /* Check if there is another transfer waiting. */ - if (list_empty(&ms->queue)) - return FSM_STOP; - - /* get the head of the queue */ - ms->message = list_first_entry(&ms->queue, struct spi_message, queue); - list_del_init(&ms->message->queue); - - /* Setup the controller parameters */ - ctrl1 = SPI_CTRL1_SPIE | SPI_CTRL1_SPE | SPI_CTRL1_MSTR; - spi = ms->message->spi; - if (spi->mode & SPI_CPHA) - ctrl1 |= SPI_CTRL1_CPHA; - if (spi->mode & SPI_CPOL) - ctrl1 |= SPI_CTRL1_CPOL; - if (spi->mode & SPI_LSB_FIRST) - ctrl1 |= SPI_CTRL1_LSBFE; - out_8(ms->regs + SPI_CTRL1, ctrl1); - - /* Setup the controller speed */ - /* minimum divider is '2'. Also, add '1' to force rounding the - * divider up. */ - sppr = ((ms->ipb_freq / ms->message->spi->max_speed_hz) + 1) >> 1; - spr = 0; - if (sppr < 1) - sppr = 1; - while (((sppr - 1) & ~0x7) != 0) { - sppr = (sppr + 1) >> 1; /* add '1' to force rounding up */ - spr++; - } - sppr--; /* sppr quantity in register is offset by 1 */ - if (spr > 7) { - /* Don't overrun limits of SPI baudrate register */ - spr = 7; - sppr = 7; - } - out_8(ms->regs + SPI_BRR, sppr << 4 | spr); /* Set speed */ - - ms->cs_change = 1; - ms->transfer = container_of(ms->message->transfers.next, - struct spi_transfer, transfer_list); - - mpc52xx_spi_start_transfer(ms); - ms->state = mpc52xx_spi_fsmstate_transfer; - - return FSM_CONTINUE; -} - -/* - * TRANSFER state - * - * In the middle of a transfer. If the SPI core has completed processing - * a byte, then read out the received data and write out the next byte - * (unless this transfer is finished; in which case go on to the wait - * state) - */ -static int mpc52xx_spi_fsmstate_transfer(int irq, struct mpc52xx_spi *ms, - u8 status, u8 data) -{ - if (!status) - return ms->irq0 ? FSM_STOP : FSM_POLL; - - if (status & SPI_STATUS_WCOL) { - /* The SPI controller is stoopid. At slower speeds, it may - * raise the SPIF flag before the state machine is actually - * finished, which causes a collision (internal to the state - * machine only). The manual recommends inserting a delay - * between receiving the interrupt and sending the next byte, - * but it can also be worked around simply by retrying the - * transfer which is what we do here. */ - ms->wcol_count++; - ms->wcol_ticks += get_tbl() - ms->wcol_tx_timestamp; - ms->wcol_tx_timestamp = get_tbl(); - data = 0; - if (ms->tx_buf) - data = *(ms->tx_buf - 1); - out_8(ms->regs + SPI_DATA, data); /* try again */ - return FSM_CONTINUE; - } else if (status & SPI_STATUS_MODF) { - ms->modf_count++; - dev_err(&ms->master->dev, "mode fault\n"); - mpc52xx_spi_chipsel(ms, 0); - ms->message->status = -EIO; - ms->message->complete(ms->message->context); - ms->state = mpc52xx_spi_fsmstate_idle; - return FSM_CONTINUE; - } - - /* Read data out of the spi device */ - ms->byte_count++; - if (ms->rx_buf) - *ms->rx_buf++ = data; - - /* Is the transfer complete? */ - ms->len--; - if (ms->len == 0) { - ms->timestamp = get_tbl(); - ms->timestamp += ms->transfer->delay_usecs * tb_ticks_per_usec; - ms->state = mpc52xx_spi_fsmstate_wait; - return FSM_CONTINUE; - } - - /* Write out the next byte */ - ms->wcol_tx_timestamp = get_tbl(); - if (ms->tx_buf) - out_8(ms->regs + SPI_DATA, *ms->tx_buf++); - else - out_8(ms->regs + SPI_DATA, 0); - - return FSM_CONTINUE; -} - -/* - * WAIT state - * - * A transfer has completed; need to wait for the delay period to complete - * before starting the next transfer - */ -static int -mpc52xx_spi_fsmstate_wait(int irq, struct mpc52xx_spi *ms, u8 status, u8 data) -{ - if (status && irq) - dev_err(&ms->master->dev, "spurious irq, status=0x%.2x\n", - status); - - if (((int)get_tbl()) - ms->timestamp < 0) - return FSM_POLL; - - ms->message->actual_length += ms->transfer->len; - - /* Check if there is another transfer in this message. If there - * aren't then deactivate CS, notify sender, and drop back to idle - * to start the next message. */ - if (ms->transfer->transfer_list.next == &ms->message->transfers) { - ms->msg_count++; - mpc52xx_spi_chipsel(ms, 0); - ms->message->status = 0; - ms->message->complete(ms->message->context); - ms->state = mpc52xx_spi_fsmstate_idle; - return FSM_CONTINUE; - } - - /* There is another transfer; kick it off */ - - if (ms->cs_change) - mpc52xx_spi_chipsel(ms, 0); - - ms->transfer = container_of(ms->transfer->transfer_list.next, - struct spi_transfer, transfer_list); - mpc52xx_spi_start_transfer(ms); - ms->state = mpc52xx_spi_fsmstate_transfer; - return FSM_CONTINUE; -} - -/** - * mpc52xx_spi_fsm_process - Finite State Machine iteration function - * @irq: irq number that triggered the FSM or 0 for polling - * @ms: pointer to mpc52xx_spi driver data - */ -static void mpc52xx_spi_fsm_process(int irq, struct mpc52xx_spi *ms) -{ - int rc = FSM_CONTINUE; - u8 status, data; - - while (rc == FSM_CONTINUE) { - /* Interrupt cleared by read of STATUS followed by - * read of DATA registers */ - status = in_8(ms->regs + SPI_STATUS); - data = in_8(ms->regs + SPI_DATA); - rc = ms->state(irq, ms, status, data); - } - - if (rc == FSM_POLL) - schedule_work(&ms->work); -} - -/** - * mpc52xx_spi_irq - IRQ handler - */ -static irqreturn_t mpc52xx_spi_irq(int irq, void *_ms) -{ - struct mpc52xx_spi *ms = _ms; - spin_lock(&ms->lock); - mpc52xx_spi_fsm_process(irq, ms); - spin_unlock(&ms->lock); - return IRQ_HANDLED; -} - -/** - * mpc52xx_spi_wq - Workqueue function for polling the state machine - */ -static void mpc52xx_spi_wq(struct work_struct *work) -{ - struct mpc52xx_spi *ms = container_of(work, struct mpc52xx_spi, work); - unsigned long flags; - - spin_lock_irqsave(&ms->lock, flags); - mpc52xx_spi_fsm_process(0, ms); - spin_unlock_irqrestore(&ms->lock, flags); -} - -/* - * spi_master ops - */ - -static int mpc52xx_spi_setup(struct spi_device *spi) -{ - if (spi->bits_per_word % 8) - return -EINVAL; - - if (spi->mode & ~(SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST)) - return -EINVAL; - - if (spi->chip_select >= spi->master->num_chipselect) - return -EINVAL; - - return 0; -} - -static int mpc52xx_spi_transfer(struct spi_device *spi, struct spi_message *m) -{ - struct mpc52xx_spi *ms = spi_master_get_devdata(spi->master); - unsigned long flags; - - m->actual_length = 0; - m->status = -EINPROGRESS; - - spin_lock_irqsave(&ms->lock, flags); - list_add_tail(&m->queue, &ms->queue); - spin_unlock_irqrestore(&ms->lock, flags); - schedule_work(&ms->work); - - return 0; -} - -/* - * OF Platform Bus Binding - */ -static int __devinit mpc52xx_spi_probe(struct platform_device *op) -{ - struct spi_master *master; - struct mpc52xx_spi *ms; - void __iomem *regs; - u8 ctrl1; - int rc, i = 0; - int gpio_cs; - - /* MMIO registers */ - dev_dbg(&op->dev, "probing mpc5200 SPI device\n"); - regs = of_iomap(op->dev.of_node, 0); - if (!regs) - return -ENODEV; - - /* initialize the device */ - ctrl1 = SPI_CTRL1_SPIE | SPI_CTRL1_SPE | SPI_CTRL1_MSTR; - out_8(regs + SPI_CTRL1, ctrl1); - out_8(regs + SPI_CTRL2, 0x0); - out_8(regs + SPI_DATADIR, 0xe); /* Set output pins */ - out_8(regs + SPI_PORTDATA, 0x8); /* Deassert /SS signal */ - - /* Clear the status register and re-read it to check for a MODF - * failure. This driver cannot currently handle multiple masters - * on the SPI bus. This fault will also occur if the SPI signals - * are not connected to any pins (port_config setting) */ - in_8(regs + SPI_STATUS); - out_8(regs + SPI_CTRL1, ctrl1); - - in_8(regs + SPI_DATA); - if (in_8(regs + SPI_STATUS) & SPI_STATUS_MODF) { - dev_err(&op->dev, "mode fault; is port_config correct?\n"); - rc = -EIO; - goto err_init; - } - - dev_dbg(&op->dev, "allocating spi_master struct\n"); - master = spi_alloc_master(&op->dev, sizeof *ms); - if (!master) { - rc = -ENOMEM; - goto err_alloc; - } - - master->bus_num = -1; - master->setup = mpc52xx_spi_setup; - master->transfer = mpc52xx_spi_transfer; - master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST; - master->dev.of_node = op->dev.of_node; - - dev_set_drvdata(&op->dev, master); - - ms = spi_master_get_devdata(master); - ms->master = master; - ms->regs = regs; - ms->irq0 = irq_of_parse_and_map(op->dev.of_node, 0); - ms->irq1 = irq_of_parse_and_map(op->dev.of_node, 1); - ms->state = mpc52xx_spi_fsmstate_idle; - ms->ipb_freq = mpc5xxx_get_bus_frequency(op->dev.of_node); - ms->gpio_cs_count = of_gpio_count(op->dev.of_node); - if (ms->gpio_cs_count > 0) { - master->num_chipselect = ms->gpio_cs_count; - ms->gpio_cs = kmalloc(ms->gpio_cs_count * sizeof(unsigned int), - GFP_KERNEL); - if (!ms->gpio_cs) { - rc = -ENOMEM; - goto err_alloc; - } - - for (i = 0; i < ms->gpio_cs_count; i++) { - gpio_cs = of_get_gpio(op->dev.of_node, i); - if (gpio_cs < 0) { - dev_err(&op->dev, - "could not parse the gpio field " - "in oftree\n"); - rc = -ENODEV; - goto err_gpio; - } - - rc = gpio_request(gpio_cs, dev_name(&op->dev)); - if (rc) { - dev_err(&op->dev, - "can't request spi cs gpio #%d " - "on gpio line %d\n", i, gpio_cs); - goto err_gpio; - } - - gpio_direction_output(gpio_cs, 1); - ms->gpio_cs[i] = gpio_cs; - } - } else { - master->num_chipselect = 1; - } - - spin_lock_init(&ms->lock); - INIT_LIST_HEAD(&ms->queue); - INIT_WORK(&ms->work, mpc52xx_spi_wq); - - /* Decide if interrupts can be used */ - if (ms->irq0 && ms->irq1) { - rc = request_irq(ms->irq0, mpc52xx_spi_irq, 0, - "mpc5200-spi-modf", ms); - rc |= request_irq(ms->irq1, mpc52xx_spi_irq, 0, - "mpc5200-spi-spif", ms); - if (rc) { - free_irq(ms->irq0, ms); - free_irq(ms->irq1, ms); - ms->irq0 = ms->irq1 = 0; - } - } else { - /* operate in polled mode */ - ms->irq0 = ms->irq1 = 0; - } - - if (!ms->irq0) - dev_info(&op->dev, "using polled mode\n"); - - dev_dbg(&op->dev, "registering spi_master struct\n"); - rc = spi_register_master(master); - if (rc) - goto err_register; - - dev_info(&ms->master->dev, "registered MPC5200 SPI bus\n"); - - return rc; - - err_register: - dev_err(&ms->master->dev, "initialization failed\n"); - spi_master_put(master); - err_gpio: - while (i-- > 0) - gpio_free(ms->gpio_cs[i]); - - kfree(ms->gpio_cs); - err_alloc: - err_init: - iounmap(regs); - return rc; -} - -static int __devexit mpc52xx_spi_remove(struct platform_device *op) -{ - struct spi_master *master = dev_get_drvdata(&op->dev); - struct mpc52xx_spi *ms = spi_master_get_devdata(master); - int i; - - free_irq(ms->irq0, ms); - free_irq(ms->irq1, ms); - - for (i = 0; i < ms->gpio_cs_count; i++) - gpio_free(ms->gpio_cs[i]); - - kfree(ms->gpio_cs); - spi_unregister_master(master); - spi_master_put(master); - iounmap(ms->regs); - - return 0; -} - -static const struct of_device_id mpc52xx_spi_match[] __devinitconst = { - { .compatible = "fsl,mpc5200-spi", }, - {} -}; -MODULE_DEVICE_TABLE(of, mpc52xx_spi_match); - -static struct platform_driver mpc52xx_spi_of_driver = { - .driver = { - .name = "mpc52xx-spi", - .owner = THIS_MODULE, - .of_match_table = mpc52xx_spi_match, - }, - .probe = mpc52xx_spi_probe, - .remove = __devexit_p(mpc52xx_spi_remove), -}; - -static int __init mpc52xx_spi_init(void) -{ - return platform_driver_register(&mpc52xx_spi_of_driver); -} -module_init(mpc52xx_spi_init); - -static void __exit mpc52xx_spi_exit(void) -{ - platform_driver_unregister(&mpc52xx_spi_of_driver); -} -module_exit(mpc52xx_spi_exit); - diff --git a/drivers/spi/omap2_mcspi.c b/drivers/spi/omap2_mcspi.c deleted file mode 100644 index 969cdd2..0000000 --- a/drivers/spi/omap2_mcspi.c +++ /dev/null @@ -1,1293 +0,0 @@ -/* - * OMAP2 McSPI controller driver - * - * Copyright (C) 2005, 2006 Nokia Corporation - * Author: Samuel Ortiz and - * Juha Yrj�l� - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include -#include -#include - -#define OMAP2_MCSPI_MAX_FREQ 48000000 - -/* OMAP2 has 3 SPI controllers, while OMAP3 has 4 */ -#define OMAP2_MCSPI_MAX_CTRL 4 - -#define OMAP2_MCSPI_REVISION 0x00 -#define OMAP2_MCSPI_SYSSTATUS 0x14 -#define OMAP2_MCSPI_IRQSTATUS 0x18 -#define OMAP2_MCSPI_IRQENABLE 0x1c -#define OMAP2_MCSPI_WAKEUPENABLE 0x20 -#define OMAP2_MCSPI_SYST 0x24 -#define OMAP2_MCSPI_MODULCTRL 0x28 - -/* per-channel banks, 0x14 bytes each, first is: */ -#define OMAP2_MCSPI_CHCONF0 0x2c -#define OMAP2_MCSPI_CHSTAT0 0x30 -#define OMAP2_MCSPI_CHCTRL0 0x34 -#define OMAP2_MCSPI_TX0 0x38 -#define OMAP2_MCSPI_RX0 0x3c - -/* per-register bitmasks: */ - -#define OMAP2_MCSPI_MODULCTRL_SINGLE BIT(0) -#define OMAP2_MCSPI_MODULCTRL_MS BIT(2) -#define OMAP2_MCSPI_MODULCTRL_STEST BIT(3) - -#define OMAP2_MCSPI_CHCONF_PHA BIT(0) -#define OMAP2_MCSPI_CHCONF_POL BIT(1) -#define OMAP2_MCSPI_CHCONF_CLKD_MASK (0x0f << 2) -#define OMAP2_MCSPI_CHCONF_EPOL BIT(6) -#define OMAP2_MCSPI_CHCONF_WL_MASK (0x1f << 7) -#define OMAP2_MCSPI_CHCONF_TRM_RX_ONLY BIT(12) -#define OMAP2_MCSPI_CHCONF_TRM_TX_ONLY BIT(13) -#define OMAP2_MCSPI_CHCONF_TRM_MASK (0x03 << 12) -#define OMAP2_MCSPI_CHCONF_DMAW BIT(14) -#define OMAP2_MCSPI_CHCONF_DMAR BIT(15) -#define OMAP2_MCSPI_CHCONF_DPE0 BIT(16) -#define OMAP2_MCSPI_CHCONF_DPE1 BIT(17) -#define OMAP2_MCSPI_CHCONF_IS BIT(18) -#define OMAP2_MCSPI_CHCONF_TURBO BIT(19) -#define OMAP2_MCSPI_CHCONF_FORCE BIT(20) - -#define OMAP2_MCSPI_CHSTAT_RXS BIT(0) -#define OMAP2_MCSPI_CHSTAT_TXS BIT(1) -#define OMAP2_MCSPI_CHSTAT_EOT BIT(2) - -#define OMAP2_MCSPI_CHCTRL_EN BIT(0) - -#define OMAP2_MCSPI_WAKEUPENABLE_WKEN BIT(0) - -/* We have 2 DMA channels per CS, one for RX and one for TX */ -struct omap2_mcspi_dma { - int dma_tx_channel; - int dma_rx_channel; - - int dma_tx_sync_dev; - int dma_rx_sync_dev; - - struct completion dma_tx_completion; - struct completion dma_rx_completion; -}; - -/* use PIO for small transfers, avoiding DMA setup/teardown overhead and - * cache operations; better heuristics consider wordsize and bitrate. - */ -#define DMA_MIN_BYTES 160 - - -struct omap2_mcspi { - struct work_struct work; - /* lock protects queue and registers */ - spinlock_t lock; - struct list_head msg_queue; - struct spi_master *master; - /* Virtual base address of the controller */ - void __iomem *base; - unsigned long phys; - /* SPI1 has 4 channels, while SPI2 has 2 */ - struct omap2_mcspi_dma *dma_channels; - struct device *dev; -}; - -struct omap2_mcspi_cs { - void __iomem *base; - unsigned long phys; - int word_len; - struct list_head node; - /* Context save and restore shadow register */ - u32 chconf0; -}; - -/* used for context save and restore, structure members to be updated whenever - * corresponding registers are modified. - */ -struct omap2_mcspi_regs { - u32 modulctrl; - u32 wakeupenable; - struct list_head cs; -}; - -static struct omap2_mcspi_regs omap2_mcspi_ctx[OMAP2_MCSPI_MAX_CTRL]; - -static struct workqueue_struct *omap2_mcspi_wq; - -#define MOD_REG_BIT(val, mask, set) do { \ - if (set) \ - val |= mask; \ - else \ - val &= ~mask; \ -} while (0) - -static inline void mcspi_write_reg(struct spi_master *master, - int idx, u32 val) -{ - struct omap2_mcspi *mcspi = spi_master_get_devdata(master); - - __raw_writel(val, mcspi->base + idx); -} - -static inline u32 mcspi_read_reg(struct spi_master *master, int idx) -{ - struct omap2_mcspi *mcspi = spi_master_get_devdata(master); - - return __raw_readl(mcspi->base + idx); -} - -static inline void mcspi_write_cs_reg(const struct spi_device *spi, - int idx, u32 val) -{ - struct omap2_mcspi_cs *cs = spi->controller_state; - - __raw_writel(val, cs->base + idx); -} - -static inline u32 mcspi_read_cs_reg(const struct spi_device *spi, int idx) -{ - struct omap2_mcspi_cs *cs = spi->controller_state; - - return __raw_readl(cs->base + idx); -} - -static inline u32 mcspi_cached_chconf0(const struct spi_device *spi) -{ - struct omap2_mcspi_cs *cs = spi->controller_state; - - return cs->chconf0; -} - -static inline void mcspi_write_chconf0(const struct spi_device *spi, u32 val) -{ - struct omap2_mcspi_cs *cs = spi->controller_state; - - cs->chconf0 = val; - mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCONF0, val); - mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0); -} - -static void omap2_mcspi_set_dma_req(const struct spi_device *spi, - int is_read, int enable) -{ - u32 l, rw; - - l = mcspi_cached_chconf0(spi); - - if (is_read) /* 1 is read, 0 write */ - rw = OMAP2_MCSPI_CHCONF_DMAR; - else - rw = OMAP2_MCSPI_CHCONF_DMAW; - - MOD_REG_BIT(l, rw, enable); - mcspi_write_chconf0(spi, l); -} - -static void omap2_mcspi_set_enable(const struct spi_device *spi, int enable) -{ - u32 l; - - l = enable ? OMAP2_MCSPI_CHCTRL_EN : 0; - mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCTRL0, l); - /* Flash post-writes */ - mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCTRL0); -} - -static void omap2_mcspi_force_cs(struct spi_device *spi, int cs_active) -{ - u32 l; - - l = mcspi_cached_chconf0(spi); - MOD_REG_BIT(l, OMAP2_MCSPI_CHCONF_FORCE, cs_active); - mcspi_write_chconf0(spi, l); -} - -static void omap2_mcspi_set_master_mode(struct spi_master *master) -{ - u32 l; - - /* setup when switching from (reset default) slave mode - * to single-channel master mode - */ - l = mcspi_read_reg(master, OMAP2_MCSPI_MODULCTRL); - MOD_REG_BIT(l, OMAP2_MCSPI_MODULCTRL_STEST, 0); - MOD_REG_BIT(l, OMAP2_MCSPI_MODULCTRL_MS, 0); - MOD_REG_BIT(l, OMAP2_MCSPI_MODULCTRL_SINGLE, 1); - mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, l); - - omap2_mcspi_ctx[master->bus_num - 1].modulctrl = l; -} - -static void omap2_mcspi_restore_ctx(struct omap2_mcspi *mcspi) -{ - struct spi_master *spi_cntrl; - struct omap2_mcspi_cs *cs; - spi_cntrl = mcspi->master; - - /* McSPI: context restore */ - mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_MODULCTRL, - omap2_mcspi_ctx[spi_cntrl->bus_num - 1].modulctrl); - - mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_WAKEUPENABLE, - omap2_mcspi_ctx[spi_cntrl->bus_num - 1].wakeupenable); - - list_for_each_entry(cs, &omap2_mcspi_ctx[spi_cntrl->bus_num - 1].cs, - node) - __raw_writel(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0); -} -static void omap2_mcspi_disable_clocks(struct omap2_mcspi *mcspi) -{ - pm_runtime_put_sync(mcspi->dev); -} - -static int omap2_mcspi_enable_clocks(struct omap2_mcspi *mcspi) -{ - return pm_runtime_get_sync(mcspi->dev); -} - -static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit) -{ - unsigned long timeout; - - timeout = jiffies + msecs_to_jiffies(1000); - while (!(__raw_readl(reg) & bit)) { - if (time_after(jiffies, timeout)) - return -1; - cpu_relax(); - } - return 0; -} - -static unsigned -omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer) -{ - struct omap2_mcspi *mcspi; - struct omap2_mcspi_cs *cs = spi->controller_state; - struct omap2_mcspi_dma *mcspi_dma; - unsigned int count, c; - unsigned long base, tx_reg, rx_reg; - int word_len, data_type, element_count; - int elements = 0; - u32 l; - u8 * rx; - const u8 * tx; - void __iomem *chstat_reg; - - mcspi = spi_master_get_devdata(spi->master); - mcspi_dma = &mcspi->dma_channels[spi->chip_select]; - l = mcspi_cached_chconf0(spi); - - chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0; - - count = xfer->len; - c = count; - word_len = cs->word_len; - - base = cs->phys; - tx_reg = base + OMAP2_MCSPI_TX0; - rx_reg = base + OMAP2_MCSPI_RX0; - rx = xfer->rx_buf; - tx = xfer->tx_buf; - - if (word_len <= 8) { - data_type = OMAP_DMA_DATA_TYPE_S8; - element_count = count; - } else if (word_len <= 16) { - data_type = OMAP_DMA_DATA_TYPE_S16; - element_count = count >> 1; - } else /* word_len <= 32 */ { - data_type = OMAP_DMA_DATA_TYPE_S32; - element_count = count >> 2; - } - - if (tx != NULL) { - omap_set_dma_transfer_params(mcspi_dma->dma_tx_channel, - data_type, element_count, 1, - OMAP_DMA_SYNC_ELEMENT, - mcspi_dma->dma_tx_sync_dev, 0); - - omap_set_dma_dest_params(mcspi_dma->dma_tx_channel, 0, - OMAP_DMA_AMODE_CONSTANT, - tx_reg, 0, 0); - - omap_set_dma_src_params(mcspi_dma->dma_tx_channel, 0, - OMAP_DMA_AMODE_POST_INC, - xfer->tx_dma, 0, 0); - } - - if (rx != NULL) { - elements = element_count - 1; - if (l & OMAP2_MCSPI_CHCONF_TURBO) - elements--; - - omap_set_dma_transfer_params(mcspi_dma->dma_rx_channel, - data_type, elements, 1, - OMAP_DMA_SYNC_ELEMENT, - mcspi_dma->dma_rx_sync_dev, 1); - - omap_set_dma_src_params(mcspi_dma->dma_rx_channel, 0, - OMAP_DMA_AMODE_CONSTANT, - rx_reg, 0, 0); - - omap_set_dma_dest_params(mcspi_dma->dma_rx_channel, 0, - OMAP_DMA_AMODE_POST_INC, - xfer->rx_dma, 0, 0); - } - - if (tx != NULL) { - omap_start_dma(mcspi_dma->dma_tx_channel); - omap2_mcspi_set_dma_req(spi, 0, 1); - } - - if (rx != NULL) { - omap_start_dma(mcspi_dma->dma_rx_channel); - omap2_mcspi_set_dma_req(spi, 1, 1); - } - - if (tx != NULL) { - wait_for_completion(&mcspi_dma->dma_tx_completion); - dma_unmap_single(&spi->dev, xfer->tx_dma, count, DMA_TO_DEVICE); - - /* for TX_ONLY mode, be sure all words have shifted out */ - if (rx == NULL) { - if (mcspi_wait_for_reg_bit(chstat_reg, - OMAP2_MCSPI_CHSTAT_TXS) < 0) - dev_err(&spi->dev, "TXS timed out\n"); - else if (mcspi_wait_for_reg_bit(chstat_reg, - OMAP2_MCSPI_CHSTAT_EOT) < 0) - dev_err(&spi->dev, "EOT timed out\n"); - } - } - - if (rx != NULL) { - wait_for_completion(&mcspi_dma->dma_rx_completion); - dma_unmap_single(&spi->dev, xfer->rx_dma, count, DMA_FROM_DEVICE); - omap2_mcspi_set_enable(spi, 0); - - if (l & OMAP2_MCSPI_CHCONF_TURBO) { - - if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0) - & OMAP2_MCSPI_CHSTAT_RXS)) { - u32 w; - - w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0); - if (word_len <= 8) - ((u8 *)xfer->rx_buf)[elements++] = w; - else if (word_len <= 16) - ((u16 *)xfer->rx_buf)[elements++] = w; - else /* word_len <= 32 */ - ((u32 *)xfer->rx_buf)[elements++] = w; - } else { - dev_err(&spi->dev, - "DMA RX penultimate word empty"); - count -= (word_len <= 8) ? 2 : - (word_len <= 16) ? 4 : - /* word_len <= 32 */ 8; - omap2_mcspi_set_enable(spi, 1); - return count; - } - } - - if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0) - & OMAP2_MCSPI_CHSTAT_RXS)) { - u32 w; - - w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0); - if (word_len <= 8) - ((u8 *)xfer->rx_buf)[elements] = w; - else if (word_len <= 16) - ((u16 *)xfer->rx_buf)[elements] = w; - else /* word_len <= 32 */ - ((u32 *)xfer->rx_buf)[elements] = w; - } else { - dev_err(&spi->dev, "DMA RX last word empty"); - count -= (word_len <= 8) ? 1 : - (word_len <= 16) ? 2 : - /* word_len <= 32 */ 4; - } - omap2_mcspi_set_enable(spi, 1); - } - return count; -} - -static unsigned -omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) -{ - struct omap2_mcspi *mcspi; - struct omap2_mcspi_cs *cs = spi->controller_state; - unsigned int count, c; - u32 l; - void __iomem *base = cs->base; - void __iomem *tx_reg; - void __iomem *rx_reg; - void __iomem *chstat_reg; - int word_len; - - mcspi = spi_master_get_devdata(spi->master); - count = xfer->len; - c = count; - word_len = cs->word_len; - - l = mcspi_cached_chconf0(spi); - - /* We store the pre-calculated register addresses on stack to speed - * up the transfer loop. */ - tx_reg = base + OMAP2_MCSPI_TX0; - rx_reg = base + OMAP2_MCSPI_RX0; - chstat_reg = base + OMAP2_MCSPI_CHSTAT0; - - if (c < (word_len>>3)) - return 0; - - if (word_len <= 8) { - u8 *rx; - const u8 *tx; - - rx = xfer->rx_buf; - tx = xfer->tx_buf; - - do { - c -= 1; - if (tx != NULL) { - if (mcspi_wait_for_reg_bit(chstat_reg, - OMAP2_MCSPI_CHSTAT_TXS) < 0) { - dev_err(&spi->dev, "TXS timed out\n"); - goto out; - } - dev_vdbg(&spi->dev, "write-%d %02x\n", - word_len, *tx); - __raw_writel(*tx++, tx_reg); - } - if (rx != NULL) { - if (mcspi_wait_for_reg_bit(chstat_reg, - OMAP2_MCSPI_CHSTAT_RXS) < 0) { - dev_err(&spi->dev, "RXS timed out\n"); - goto out; - } - - if (c == 1 && tx == NULL && - (l & OMAP2_MCSPI_CHCONF_TURBO)) { - omap2_mcspi_set_enable(spi, 0); - *rx++ = __raw_readl(rx_reg); - dev_vdbg(&spi->dev, "read-%d %02x\n", - word_len, *(rx - 1)); - if (mcspi_wait_for_reg_bit(chstat_reg, - OMAP2_MCSPI_CHSTAT_RXS) < 0) { - dev_err(&spi->dev, - "RXS timed out\n"); - goto out; - } - c = 0; - } else if (c == 0 && tx == NULL) { - omap2_mcspi_set_enable(spi, 0); - } - - *rx++ = __raw_readl(rx_reg); - dev_vdbg(&spi->dev, "read-%d %02x\n", - word_len, *(rx - 1)); - } - } while (c); - } else if (word_len <= 16) { - u16 *rx; - const u16 *tx; - - rx = xfer->rx_buf; - tx = xfer->tx_buf; - do { - c -= 2; - if (tx != NULL) { - if (mcspi_wait_for_reg_bit(chstat_reg, - OMAP2_MCSPI_CHSTAT_TXS) < 0) { - dev_err(&spi->dev, "TXS timed out\n"); - goto out; - } - dev_vdbg(&spi->dev, "write-%d %04x\n", - word_len, *tx); - __raw_writel(*tx++, tx_reg); - } - if (rx != NULL) { - if (mcspi_wait_for_reg_bit(chstat_reg, - OMAP2_MCSPI_CHSTAT_RXS) < 0) { - dev_err(&spi->dev, "RXS timed out\n"); - goto out; - } - - if (c == 2 && tx == NULL && - (l & OMAP2_MCSPI_CHCONF_TURBO)) { - omap2_mcspi_set_enable(spi, 0); - *rx++ = __raw_readl(rx_reg); - dev_vdbg(&spi->dev, "read-%d %04x\n", - word_len, *(rx - 1)); - if (mcspi_wait_for_reg_bit(chstat_reg, - OMAP2_MCSPI_CHSTAT_RXS) < 0) { - dev_err(&spi->dev, - "RXS timed out\n"); - goto out; - } - c = 0; - } else if (c == 0 && tx == NULL) { - omap2_mcspi_set_enable(spi, 0); - } - - *rx++ = __raw_readl(rx_reg); - dev_vdbg(&spi->dev, "read-%d %04x\n", - word_len, *(rx - 1)); - } - } while (c >= 2); - } else if (word_len <= 32) { - u32 *rx; - const u32 *tx; - - rx = xfer->rx_buf; - tx = xfer->tx_buf; - do { - c -= 4; - if (tx != NULL) { - if (mcspi_wait_for_reg_bit(chstat_reg, - OMAP2_MCSPI_CHSTAT_TXS) < 0) { - dev_err(&spi->dev, "TXS timed out\n"); - goto out; - } - dev_vdbg(&spi->dev, "write-%d %08x\n", - word_len, *tx); - __raw_writel(*tx++, tx_reg); - } - if (rx != NULL) { - if (mcspi_wait_for_reg_bit(chstat_reg, - OMAP2_MCSPI_CHSTAT_RXS) < 0) { - dev_err(&spi->dev, "RXS timed out\n"); - goto out; - } - - if (c == 4 && tx == NULL && - (l & OMAP2_MCSPI_CHCONF_TURBO)) { - omap2_mcspi_set_enable(spi, 0); - *rx++ = __raw_readl(rx_reg); - dev_vdbg(&spi->dev, "read-%d %08x\n", - word_len, *(rx - 1)); - if (mcspi_wait_for_reg_bit(chstat_reg, - OMAP2_MCSPI_CHSTAT_RXS) < 0) { - dev_err(&spi->dev, - "RXS timed out\n"); - goto out; - } - c = 0; - } else if (c == 0 && tx == NULL) { - omap2_mcspi_set_enable(spi, 0); - } - - *rx++ = __raw_readl(rx_reg); - dev_vdbg(&spi->dev, "read-%d %08x\n", - word_len, *(rx - 1)); - } - } while (c >= 4); - } - - /* for TX_ONLY mode, be sure all words have shifted out */ - if (xfer->rx_buf == NULL) { - if (mcspi_wait_for_reg_bit(chstat_reg, - OMAP2_MCSPI_CHSTAT_TXS) < 0) { - dev_err(&spi->dev, "TXS timed out\n"); - } else if (mcspi_wait_for_reg_bit(chstat_reg, - OMAP2_MCSPI_CHSTAT_EOT) < 0) - dev_err(&spi->dev, "EOT timed out\n"); - - /* disable chan to purge rx datas received in TX_ONLY transfer, - * otherwise these rx datas will affect the direct following - * RX_ONLY transfer. - */ - omap2_mcspi_set_enable(spi, 0); - } -out: - omap2_mcspi_set_enable(spi, 1); - return count - c; -} - -static u32 omap2_mcspi_calc_divisor(u32 speed_hz) -{ - u32 div; - - for (div = 0; div < 15; div++) - if (speed_hz >= (OMAP2_MCSPI_MAX_FREQ >> div)) - return div; - - return 15; -} - -/* called only when no transfer is active to this device */ -static int omap2_mcspi_setup_transfer(struct spi_device *spi, - struct spi_transfer *t) -{ - struct omap2_mcspi_cs *cs = spi->controller_state; - struct omap2_mcspi *mcspi; - struct spi_master *spi_cntrl; - u32 l = 0, div = 0; - u8 word_len = spi->bits_per_word; - u32 speed_hz = spi->max_speed_hz; - - mcspi = spi_master_get_devdata(spi->master); - spi_cntrl = mcspi->master; - - if (t != NULL && t->bits_per_word) - word_len = t->bits_per_word; - - cs->word_len = word_len; - - if (t && t->speed_hz) - speed_hz = t->speed_hz; - - speed_hz = min_t(u32, speed_hz, OMAP2_MCSPI_MAX_FREQ); - div = omap2_mcspi_calc_divisor(speed_hz); - - l = mcspi_cached_chconf0(spi); - - /* standard 4-wire master mode: SCK, MOSI/out, MISO/in, nCS - * REVISIT: this controller could support SPI_3WIRE mode. - */ - l &= ~(OMAP2_MCSPI_CHCONF_IS|OMAP2_MCSPI_CHCONF_DPE1); - l |= OMAP2_MCSPI_CHCONF_DPE0; - - /* wordlength */ - l &= ~OMAP2_MCSPI_CHCONF_WL_MASK; - l |= (word_len - 1) << 7; - - /* set chipselect polarity; manage with FORCE */ - if (!(spi->mode & SPI_CS_HIGH)) - l |= OMAP2_MCSPI_CHCONF_EPOL; /* active-low; normal */ - else - l &= ~OMAP2_MCSPI_CHCONF_EPOL; - - /* set clock divisor */ - l &= ~OMAP2_MCSPI_CHCONF_CLKD_MASK; - l |= div << 2; - - /* set SPI mode 0..3 */ - if (spi->mode & SPI_CPOL) - l |= OMAP2_MCSPI_CHCONF_POL; - else - l &= ~OMAP2_MCSPI_CHCONF_POL; - if (spi->mode & SPI_CPHA) - l |= OMAP2_MCSPI_CHCONF_PHA; - else - l &= ~OMAP2_MCSPI_CHCONF_PHA; - - mcspi_write_chconf0(spi, l); - - dev_dbg(&spi->dev, "setup: speed %d, sample %s edge, clk %s\n", - OMAP2_MCSPI_MAX_FREQ >> div, - (spi->mode & SPI_CPHA) ? "trailing" : "leading", - (spi->mode & SPI_CPOL) ? "inverted" : "normal"); - - return 0; -} - -static void omap2_mcspi_dma_rx_callback(int lch, u16 ch_status, void *data) -{ - struct spi_device *spi = data; - struct omap2_mcspi *mcspi; - struct omap2_mcspi_dma *mcspi_dma; - - mcspi = spi_master_get_devdata(spi->master); - mcspi_dma = &(mcspi->dma_channels[spi->chip_select]); - - complete(&mcspi_dma->dma_rx_completion); - - /* We must disable the DMA RX request */ - omap2_mcspi_set_dma_req(spi, 1, 0); -} - -static void omap2_mcspi_dma_tx_callback(int lch, u16 ch_status, void *data) -{ - struct spi_device *spi = data; - struct omap2_mcspi *mcspi; - struct omap2_mcspi_dma *mcspi_dma; - - mcspi = spi_master_get_devdata(spi->master); - mcspi_dma = &(mcspi->dma_channels[spi->chip_select]); - - complete(&mcspi_dma->dma_tx_completion); - - /* We must disable the DMA TX request */ - omap2_mcspi_set_dma_req(spi, 0, 0); -} - -static int omap2_mcspi_request_dma(struct spi_device *spi) -{ - struct spi_master *master = spi->master; - struct omap2_mcspi *mcspi; - struct omap2_mcspi_dma *mcspi_dma; - - mcspi = spi_master_get_devdata(master); - mcspi_dma = mcspi->dma_channels + spi->chip_select; - - if (omap_request_dma(mcspi_dma->dma_rx_sync_dev, "McSPI RX", - omap2_mcspi_dma_rx_callback, spi, - &mcspi_dma->dma_rx_channel)) { - dev_err(&spi->dev, "no RX DMA channel for McSPI\n"); - return -EAGAIN; - } - - if (omap_request_dma(mcspi_dma->dma_tx_sync_dev, "McSPI TX", - omap2_mcspi_dma_tx_callback, spi, - &mcspi_dma->dma_tx_channel)) { - omap_free_dma(mcspi_dma->dma_rx_channel); - mcspi_dma->dma_rx_channel = -1; - dev_err(&spi->dev, "no TX DMA channel for McSPI\n"); - return -EAGAIN; - } - - init_completion(&mcspi_dma->dma_rx_completion); - init_completion(&mcspi_dma->dma_tx_completion); - - return 0; -} - -static int omap2_mcspi_setup(struct spi_device *spi) -{ - int ret; - struct omap2_mcspi *mcspi; - struct omap2_mcspi_dma *mcspi_dma; - struct omap2_mcspi_cs *cs = spi->controller_state; - - if (spi->bits_per_word < 4 || spi->bits_per_word > 32) { - dev_dbg(&spi->dev, "setup: unsupported %d bit words\n", - spi->bits_per_word); - return -EINVAL; - } - - mcspi = spi_master_get_devdata(spi->master); - mcspi_dma = &mcspi->dma_channels[spi->chip_select]; - - if (!cs) { - cs = kzalloc(sizeof *cs, GFP_KERNEL); - if (!cs) - return -ENOMEM; - cs->base = mcspi->base + spi->chip_select * 0x14; - cs->phys = mcspi->phys + spi->chip_select * 0x14; - cs->chconf0 = 0; - spi->controller_state = cs; - /* Link this to context save list */ - list_add_tail(&cs->node, - &omap2_mcspi_ctx[mcspi->master->bus_num - 1].cs); - } - - if (mcspi_dma->dma_rx_channel == -1 - || mcspi_dma->dma_tx_channel == -1) { - ret = omap2_mcspi_request_dma(spi); - if (ret < 0) - return ret; - } - - ret = omap2_mcspi_enable_clocks(mcspi); - if (ret < 0) - return ret; - - ret = omap2_mcspi_setup_transfer(spi, NULL); - omap2_mcspi_disable_clocks(mcspi); - - return ret; -} - -static void omap2_mcspi_cleanup(struct spi_device *spi) -{ - struct omap2_mcspi *mcspi; - struct omap2_mcspi_dma *mcspi_dma; - struct omap2_mcspi_cs *cs; - - mcspi = spi_master_get_devdata(spi->master); - - if (spi->controller_state) { - /* Unlink controller state from context save list */ - cs = spi->controller_state; - list_del(&cs->node); - - kfree(spi->controller_state); - } - - if (spi->chip_select < spi->master->num_chipselect) { - mcspi_dma = &mcspi->dma_channels[spi->chip_select]; - - if (mcspi_dma->dma_rx_channel != -1) { - omap_free_dma(mcspi_dma->dma_rx_channel); - mcspi_dma->dma_rx_channel = -1; - } - if (mcspi_dma->dma_tx_channel != -1) { - omap_free_dma(mcspi_dma->dma_tx_channel); - mcspi_dma->dma_tx_channel = -1; - } - } -} - -static void omap2_mcspi_work(struct work_struct *work) -{ - struct omap2_mcspi *mcspi; - - mcspi = container_of(work, struct omap2_mcspi, work); - - if (omap2_mcspi_enable_clocks(mcspi) < 0) - return; - - spin_lock_irq(&mcspi->lock); - - /* We only enable one channel at a time -- the one whose message is - * at the head of the queue -- although this controller would gladly - * arbitrate among multiple channels. This corresponds to "single - * channel" master mode. As a side effect, we need to manage the - * chipselect with the FORCE bit ... CS != channel enable. - */ - while (!list_empty(&mcspi->msg_queue)) { - struct spi_message *m; - struct spi_device *spi; - struct spi_transfer *t = NULL; - int cs_active = 0; - struct omap2_mcspi_cs *cs; - struct omap2_mcspi_device_config *cd; - int par_override = 0; - int status = 0; - u32 chconf; - - m = container_of(mcspi->msg_queue.next, struct spi_message, - queue); - - list_del_init(&m->queue); - spin_unlock_irq(&mcspi->lock); - - spi = m->spi; - cs = spi->controller_state; - cd = spi->controller_data; - - omap2_mcspi_set_enable(spi, 1); - list_for_each_entry(t, &m->transfers, transfer_list) { - if (t->tx_buf == NULL && t->rx_buf == NULL && t->len) { - status = -EINVAL; - break; - } - if (par_override || t->speed_hz || t->bits_per_word) { - par_override = 1; - status = omap2_mcspi_setup_transfer(spi, t); - if (status < 0) - break; - if (!t->speed_hz && !t->bits_per_word) - par_override = 0; - } - - if (!cs_active) { - omap2_mcspi_force_cs(spi, 1); - cs_active = 1; - } - - chconf = mcspi_cached_chconf0(spi); - chconf &= ~OMAP2_MCSPI_CHCONF_TRM_MASK; - chconf &= ~OMAP2_MCSPI_CHCONF_TURBO; - - if (t->tx_buf == NULL) - chconf |= OMAP2_MCSPI_CHCONF_TRM_RX_ONLY; - else if (t->rx_buf == NULL) - chconf |= OMAP2_MCSPI_CHCONF_TRM_TX_ONLY; - - if (cd && cd->turbo_mode && t->tx_buf == NULL) { - /* Turbo mode is for more than one word */ - if (t->len > ((cs->word_len + 7) >> 3)) - chconf |= OMAP2_MCSPI_CHCONF_TURBO; - } - - mcspi_write_chconf0(spi, chconf); - - if (t->len) { - unsigned count; - - /* RX_ONLY mode needs dummy data in TX reg */ - if (t->tx_buf == NULL) - __raw_writel(0, cs->base - + OMAP2_MCSPI_TX0); - - if (m->is_dma_mapped || t->len >= DMA_MIN_BYTES) - count = omap2_mcspi_txrx_dma(spi, t); - else - count = omap2_mcspi_txrx_pio(spi, t); - m->actual_length += count; - - if (count != t->len) { - status = -EIO; - break; - } - } - - if (t->delay_usecs) - udelay(t->delay_usecs); - - /* ignore the "leave it on after last xfer" hint */ - if (t->cs_change) { - omap2_mcspi_force_cs(spi, 0); - cs_active = 0; - } - } - - /* Restore defaults if they were overriden */ - if (par_override) { - par_override = 0; - status = omap2_mcspi_setup_transfer(spi, NULL); - } - - if (cs_active) - omap2_mcspi_force_cs(spi, 0); - - omap2_mcspi_set_enable(spi, 0); - - m->status = status; - m->complete(m->context); - - spin_lock_irq(&mcspi->lock); - } - - spin_unlock_irq(&mcspi->lock); - - omap2_mcspi_disable_clocks(mcspi); -} - -static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m) -{ - struct omap2_mcspi *mcspi; - unsigned long flags; - struct spi_transfer *t; - - m->actual_length = 0; - m->status = 0; - - /* reject invalid messages and transfers */ - if (list_empty(&m->transfers) || !m->complete) - return -EINVAL; - list_for_each_entry(t, &m->transfers, transfer_list) { - const void *tx_buf = t->tx_buf; - void *rx_buf = t->rx_buf; - unsigned len = t->len; - - if (t->speed_hz > OMAP2_MCSPI_MAX_FREQ - || (len && !(rx_buf || tx_buf)) - || (t->bits_per_word && - ( t->bits_per_word < 4 - || t->bits_per_word > 32))) { - dev_dbg(&spi->dev, "transfer: %d Hz, %d %s%s, %d bpw\n", - t->speed_hz, - len, - tx_buf ? "tx" : "", - rx_buf ? "rx" : "", - t->bits_per_word); - return -EINVAL; - } - if (t->speed_hz && t->speed_hz < (OMAP2_MCSPI_MAX_FREQ >> 15)) { - dev_dbg(&spi->dev, "speed_hz %d below minimum %d Hz\n", - t->speed_hz, - OMAP2_MCSPI_MAX_FREQ >> 15); - return -EINVAL; - } - - if (m->is_dma_mapped || len < DMA_MIN_BYTES) - continue; - - if (tx_buf != NULL) { - t->tx_dma = dma_map_single(&spi->dev, (void *) tx_buf, - len, DMA_TO_DEVICE); - if (dma_mapping_error(&spi->dev, t->tx_dma)) { - dev_dbg(&spi->dev, "dma %cX %d bytes error\n", - 'T', len); - return -EINVAL; - } - } - if (rx_buf != NULL) { - t->rx_dma = dma_map_single(&spi->dev, rx_buf, t->len, - DMA_FROM_DEVICE); - if (dma_mapping_error(&spi->dev, t->rx_dma)) { - dev_dbg(&spi->dev, "dma %cX %d bytes error\n", - 'R', len); - if (tx_buf != NULL) - dma_unmap_single(&spi->dev, t->tx_dma, - len, DMA_TO_DEVICE); - return -EINVAL; - } - } - } - - mcspi = spi_master_get_devdata(spi->master); - - spin_lock_irqsave(&mcspi->lock, flags); - list_add_tail(&m->queue, &mcspi->msg_queue); - queue_work(omap2_mcspi_wq, &mcspi->work); - spin_unlock_irqrestore(&mcspi->lock, flags); - - return 0; -} - -static int __init omap2_mcspi_master_setup(struct omap2_mcspi *mcspi) -{ - struct spi_master *master = mcspi->master; - u32 tmp; - int ret = 0; - - ret = omap2_mcspi_enable_clocks(mcspi); - if (ret < 0) - return ret; - - tmp = OMAP2_MCSPI_WAKEUPENABLE_WKEN; - mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE, tmp); - omap2_mcspi_ctx[master->bus_num - 1].wakeupenable = tmp; - - omap2_mcspi_set_master_mode(master); - omap2_mcspi_disable_clocks(mcspi); - return 0; -} - -static int omap_mcspi_runtime_resume(struct device *dev) -{ - struct omap2_mcspi *mcspi; - struct spi_master *master; - - master = dev_get_drvdata(dev); - mcspi = spi_master_get_devdata(master); - omap2_mcspi_restore_ctx(mcspi); - - return 0; -} - - -static int __init omap2_mcspi_probe(struct platform_device *pdev) -{ - struct spi_master *master; - struct omap2_mcspi_platform_config *pdata = pdev->dev.platform_data; - struct omap2_mcspi *mcspi; - struct resource *r; - int status = 0, i; - - master = spi_alloc_master(&pdev->dev, sizeof *mcspi); - if (master == NULL) { - dev_dbg(&pdev->dev, "master allocation failed\n"); - return -ENOMEM; - } - - /* the spi->mode bits understood by this driver: */ - master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; - - if (pdev->id != -1) - master->bus_num = pdev->id; - - master->setup = omap2_mcspi_setup; - master->transfer = omap2_mcspi_transfer; - master->cleanup = omap2_mcspi_cleanup; - master->num_chipselect = pdata->num_cs; - - dev_set_drvdata(&pdev->dev, master); - - mcspi = spi_master_get_devdata(master); - mcspi->master = master; - - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (r == NULL) { - status = -ENODEV; - goto err1; - } - if (!request_mem_region(r->start, (r->end - r->start) + 1, - dev_name(&pdev->dev))) { - status = -EBUSY; - goto err1; - } - - r->start += pdata->regs_offset; - r->end += pdata->regs_offset; - mcspi->phys = r->start; - mcspi->base = ioremap(r->start, r->end - r->start + 1); - if (!mcspi->base) { - dev_dbg(&pdev->dev, "can't ioremap MCSPI\n"); - status = -ENOMEM; - goto err2; - } - - mcspi->dev = &pdev->dev; - INIT_WORK(&mcspi->work, omap2_mcspi_work); - - spin_lock_init(&mcspi->lock); - INIT_LIST_HEAD(&mcspi->msg_queue); - INIT_LIST_HEAD(&omap2_mcspi_ctx[master->bus_num - 1].cs); - - mcspi->dma_channels = kcalloc(master->num_chipselect, - sizeof(struct omap2_mcspi_dma), - GFP_KERNEL); - - if (mcspi->dma_channels == NULL) - goto err2; - - for (i = 0; i < master->num_chipselect; i++) { - char dma_ch_name[14]; - struct resource *dma_res; - - sprintf(dma_ch_name, "rx%d", i); - dma_res = platform_get_resource_byname(pdev, IORESOURCE_DMA, - dma_ch_name); - if (!dma_res) { - dev_dbg(&pdev->dev, "cannot get DMA RX channel\n"); - status = -ENODEV; - break; - } - - mcspi->dma_channels[i].dma_rx_channel = -1; - mcspi->dma_channels[i].dma_rx_sync_dev = dma_res->start; - sprintf(dma_ch_name, "tx%d", i); - dma_res = platform_get_resource_byname(pdev, IORESOURCE_DMA, - dma_ch_name); - if (!dma_res) { - dev_dbg(&pdev->dev, "cannot get DMA TX channel\n"); - status = -ENODEV; - break; - } - - mcspi->dma_channels[i].dma_tx_channel = -1; - mcspi->dma_channels[i].dma_tx_sync_dev = dma_res->start; - } - - pm_runtime_enable(&pdev->dev); - - if (status || omap2_mcspi_master_setup(mcspi) < 0) - goto err3; - - status = spi_register_master(master); - if (status < 0) - goto err4; - - return status; - -err4: - spi_master_put(master); -err3: - kfree(mcspi->dma_channels); -err2: - release_mem_region(r->start, (r->end - r->start) + 1); - iounmap(mcspi->base); -err1: - return status; -} - -static int __exit omap2_mcspi_remove(struct platform_device *pdev) -{ - struct spi_master *master; - struct omap2_mcspi *mcspi; - struct omap2_mcspi_dma *dma_channels; - struct resource *r; - void __iomem *base; - - master = dev_get_drvdata(&pdev->dev); - mcspi = spi_master_get_devdata(master); - dma_channels = mcspi->dma_channels; - - omap2_mcspi_disable_clocks(mcspi); - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - release_mem_region(r->start, (r->end - r->start) + 1); - - base = mcspi->base; - spi_unregister_master(master); - iounmap(base); - kfree(dma_channels); - - return 0; -} - -/* work with hotplug and coldplug */ -MODULE_ALIAS("platform:omap2_mcspi"); - -#ifdef CONFIG_SUSPEND -/* - * When SPI wake up from off-mode, CS is in activate state. If it was in - * unactive state when driver was suspend, then force it to unactive state at - * wake up. - */ -static int omap2_mcspi_resume(struct device *dev) -{ - struct spi_master *master = dev_get_drvdata(dev); - struct omap2_mcspi *mcspi = spi_master_get_devdata(master); - struct omap2_mcspi_cs *cs; - - omap2_mcspi_enable_clocks(mcspi); - list_for_each_entry(cs, &omap2_mcspi_ctx[master->bus_num - 1].cs, - node) { - if ((cs->chconf0 & OMAP2_MCSPI_CHCONF_FORCE) == 0) { - - /* - * We need to toggle CS state for OMAP take this - * change in account. - */ - MOD_REG_BIT(cs->chconf0, OMAP2_MCSPI_CHCONF_FORCE, 1); - __raw_writel(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0); - MOD_REG_BIT(cs->chconf0, OMAP2_MCSPI_CHCONF_FORCE, 0); - __raw_writel(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0); - } - } - omap2_mcspi_disable_clocks(mcspi); - return 0; -} -#else -#define omap2_mcspi_resume NULL -#endif - -static const struct dev_pm_ops omap2_mcspi_pm_ops = { - .resume = omap2_mcspi_resume, - .runtime_resume = omap_mcspi_runtime_resume, -}; - -static struct platform_driver omap2_mcspi_driver = { - .driver = { - .name = "omap2_mcspi", - .owner = THIS_MODULE, - .pm = &omap2_mcspi_pm_ops - }, - .remove = __exit_p(omap2_mcspi_remove), -}; - - -static int __init omap2_mcspi_init(void) -{ - omap2_mcspi_wq = create_singlethread_workqueue( - omap2_mcspi_driver.driver.name); - if (omap2_mcspi_wq == NULL) - return -1; - return platform_driver_probe(&omap2_mcspi_driver, omap2_mcspi_probe); -} -subsys_initcall(omap2_mcspi_init); - -static void __exit omap2_mcspi_exit(void) -{ - platform_driver_unregister(&omap2_mcspi_driver); - - destroy_workqueue(omap2_mcspi_wq); -} -module_exit(omap2_mcspi_exit); - -MODULE_LICENSE("GPL"); diff --git a/drivers/spi/omap_spi_100k.c b/drivers/spi/omap_spi_100k.c deleted file mode 100644 index 9bd1c92..0000000 --- a/drivers/spi/omap_spi_100k.c +++ /dev/null @@ -1,637 +0,0 @@ -/* - * OMAP7xx SPI 100k controller driver - * Author: Fabrice Crohas - * from original omap1_mcspi driver - * - * Copyright (C) 2005, 2006 Nokia Corporation - * Author: Samuel Ortiz and - * Juha Yrj�l� - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include - -#define OMAP1_SPI100K_MAX_FREQ 48000000 - -#define ICR_SPITAS (OMAP7XX_ICR_BASE + 0x12) - -#define SPI_SETUP1 0x00 -#define SPI_SETUP2 0x02 -#define SPI_CTRL 0x04 -#define SPI_STATUS 0x06 -#define SPI_TX_LSB 0x08 -#define SPI_TX_MSB 0x0a -#define SPI_RX_LSB 0x0c -#define SPI_RX_MSB 0x0e - -#define SPI_SETUP1_INT_READ_ENABLE (1UL << 5) -#define SPI_SETUP1_INT_WRITE_ENABLE (1UL << 4) -#define SPI_SETUP1_CLOCK_DIVISOR(x) ((x) << 1) -#define SPI_SETUP1_CLOCK_ENABLE (1UL << 0) - -#define SPI_SETUP2_ACTIVE_EDGE_FALLING (0UL << 0) -#define SPI_SETUP2_ACTIVE_EDGE_RISING (1UL << 0) -#define SPI_SETUP2_NEGATIVE_LEVEL (0UL << 5) -#define SPI_SETUP2_POSITIVE_LEVEL (1UL << 5) -#define SPI_SETUP2_LEVEL_TRIGGER (0UL << 10) -#define SPI_SETUP2_EDGE_TRIGGER (1UL << 10) - -#define SPI_CTRL_SEN(x) ((x) << 7) -#define SPI_CTRL_WORD_SIZE(x) (((x) - 1) << 2) -#define SPI_CTRL_WR (1UL << 1) -#define SPI_CTRL_RD (1UL << 0) - -#define SPI_STATUS_WE (1UL << 1) -#define SPI_STATUS_RD (1UL << 0) - -#define WRITE 0 -#define READ 1 - - -/* use PIO for small transfers, avoiding DMA setup/teardown overhead and - * cache operations; better heuristics consider wordsize and bitrate. - */ -#define DMA_MIN_BYTES 8 - -#define SPI_RUNNING 0 -#define SPI_SHUTDOWN 1 - -struct omap1_spi100k { - struct work_struct work; - - /* lock protects queue and registers */ - spinlock_t lock; - struct list_head msg_queue; - struct spi_master *master; - struct clk *ick; - struct clk *fck; - - /* Virtual base address of the controller */ - void __iomem *base; - - /* State of the SPI */ - unsigned int state; -}; - -struct omap1_spi100k_cs { - void __iomem *base; - int word_len; -}; - -static struct workqueue_struct *omap1_spi100k_wq; - -#define MOD_REG_BIT(val, mask, set) do { \ - if (set) \ - val |= mask; \ - else \ - val &= ~mask; \ -} while (0) - -static void spi100k_enable_clock(struct spi_master *master) -{ - unsigned int val; - struct omap1_spi100k *spi100k = spi_master_get_devdata(master); - - /* enable SPI */ - val = readw(spi100k->base + SPI_SETUP1); - val |= SPI_SETUP1_CLOCK_ENABLE; - writew(val, spi100k->base + SPI_SETUP1); -} - -static void spi100k_disable_clock(struct spi_master *master) -{ - unsigned int val; - struct omap1_spi100k *spi100k = spi_master_get_devdata(master); - - /* disable SPI */ - val = readw(spi100k->base + SPI_SETUP1); - val &= ~SPI_SETUP1_CLOCK_ENABLE; - writew(val, spi100k->base + SPI_SETUP1); -} - -static void spi100k_write_data(struct spi_master *master, int len, int data) -{ - struct omap1_spi100k *spi100k = spi_master_get_devdata(master); - - /* write 16-bit word, shifting 8-bit data if necessary */ - if (len <= 8) { - data <<= 8; - len = 16; - } - - spi100k_enable_clock(master); - writew( data , spi100k->base + SPI_TX_MSB); - - writew(SPI_CTRL_SEN(0) | - SPI_CTRL_WORD_SIZE(len) | - SPI_CTRL_WR, - spi100k->base + SPI_CTRL); - - /* Wait for bit ack send change */ - while((readw(spi100k->base + SPI_STATUS) & SPI_STATUS_WE) != SPI_STATUS_WE); - udelay(1000); - - spi100k_disable_clock(master); -} - -static int spi100k_read_data(struct spi_master *master, int len) -{ - int dataH,dataL; - struct omap1_spi100k *spi100k = spi_master_get_devdata(master); - - /* Always do at least 16 bits */ - if (len <= 8) - len = 16; - - spi100k_enable_clock(master); - writew(SPI_CTRL_SEN(0) | - SPI_CTRL_WORD_SIZE(len) | - SPI_CTRL_RD, - spi100k->base + SPI_CTRL); - - while((readw(spi100k->base + SPI_STATUS) & SPI_STATUS_RD) != SPI_STATUS_RD); - udelay(1000); - - dataL = readw(spi100k->base + SPI_RX_LSB); - dataH = readw(spi100k->base + SPI_RX_MSB); - spi100k_disable_clock(master); - - return dataL; -} - -static void spi100k_open(struct spi_master *master) -{ - /* get control of SPI */ - struct omap1_spi100k *spi100k = spi_master_get_devdata(master); - - writew(SPI_SETUP1_INT_READ_ENABLE | - SPI_SETUP1_INT_WRITE_ENABLE | - SPI_SETUP1_CLOCK_DIVISOR(0), spi100k->base + SPI_SETUP1); - - /* configure clock and interrupts */ - writew(SPI_SETUP2_ACTIVE_EDGE_FALLING | - SPI_SETUP2_NEGATIVE_LEVEL | - SPI_SETUP2_LEVEL_TRIGGER, spi100k->base + SPI_SETUP2); -} - -static void omap1_spi100k_force_cs(struct omap1_spi100k *spi100k, int enable) -{ - if (enable) - writew(0x05fc, spi100k->base + SPI_CTRL); - else - writew(0x05fd, spi100k->base + SPI_CTRL); -} - -static unsigned -omap1_spi100k_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) -{ - struct omap1_spi100k *spi100k; - struct omap1_spi100k_cs *cs = spi->controller_state; - unsigned int count, c; - int word_len; - - spi100k = spi_master_get_devdata(spi->master); - count = xfer->len; - c = count; - word_len = cs->word_len; - - if (word_len <= 8) { - u8 *rx; - const u8 *tx; - - rx = xfer->rx_buf; - tx = xfer->tx_buf; - do { - c-=1; - if (xfer->tx_buf != NULL) - spi100k_write_data(spi->master, word_len, *tx++); - if (xfer->rx_buf != NULL) - *rx++ = spi100k_read_data(spi->master, word_len); - } while(c); - } else if (word_len <= 16) { - u16 *rx; - const u16 *tx; - - rx = xfer->rx_buf; - tx = xfer->tx_buf; - do { - c-=2; - if (xfer->tx_buf != NULL) - spi100k_write_data(spi->master,word_len, *tx++); - if (xfer->rx_buf != NULL) - *rx++ = spi100k_read_data(spi->master,word_len); - } while(c); - } else if (word_len <= 32) { - u32 *rx; - const u32 *tx; - - rx = xfer->rx_buf; - tx = xfer->tx_buf; - do { - c-=4; - if (xfer->tx_buf != NULL) - spi100k_write_data(spi->master,word_len, *tx); - if (xfer->rx_buf != NULL) - *rx = spi100k_read_data(spi->master,word_len); - } while(c); - } - return count - c; -} - -/* called only when no transfer is active to this device */ -static int omap1_spi100k_setup_transfer(struct spi_device *spi, - struct spi_transfer *t) -{ - struct omap1_spi100k *spi100k = spi_master_get_devdata(spi->master); - struct omap1_spi100k_cs *cs = spi->controller_state; - u8 word_len = spi->bits_per_word; - - if (t != NULL && t->bits_per_word) - word_len = t->bits_per_word; - if (!word_len) - word_len = 8; - - if (spi->bits_per_word > 32) - return -EINVAL; - cs->word_len = word_len; - - /* SPI init before transfer */ - writew(0x3e , spi100k->base + SPI_SETUP1); - writew(0x00 , spi100k->base + SPI_STATUS); - writew(0x3e , spi100k->base + SPI_CTRL); - - return 0; -} - -/* the spi->mode bits understood by this driver: */ -#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH) - -static int omap1_spi100k_setup(struct spi_device *spi) -{ - int ret; - struct omap1_spi100k *spi100k; - struct omap1_spi100k_cs *cs = spi->controller_state; - - if (spi->bits_per_word < 4 || spi->bits_per_word > 32) { - dev_dbg(&spi->dev, "setup: unsupported %d bit words\n", - spi->bits_per_word); - return -EINVAL; - } - - spi100k = spi_master_get_devdata(spi->master); - - if (!cs) { - cs = kzalloc(sizeof *cs, GFP_KERNEL); - if (!cs) - return -ENOMEM; - cs->base = spi100k->base + spi->chip_select * 0x14; - spi->controller_state = cs; - } - - spi100k_open(spi->master); - - clk_enable(spi100k->ick); - clk_enable(spi100k->fck); - - ret = omap1_spi100k_setup_transfer(spi, NULL); - - clk_disable(spi100k->ick); - clk_disable(spi100k->fck); - - return ret; -} - -static void omap1_spi100k_work(struct work_struct *work) -{ - struct omap1_spi100k *spi100k; - int status = 0; - - spi100k = container_of(work, struct omap1_spi100k, work); - spin_lock_irq(&spi100k->lock); - - clk_enable(spi100k->ick); - clk_enable(spi100k->fck); - - /* We only enable one channel at a time -- the one whose message is - * at the head of the queue -- although this controller would gladly - * arbitrate among multiple channels. This corresponds to "single - * channel" master mode. As a side effect, we need to manage the - * chipselect with the FORCE bit ... CS != channel enable. - */ - while (!list_empty(&spi100k->msg_queue)) { - struct spi_message *m; - struct spi_device *spi; - struct spi_transfer *t = NULL; - int cs_active = 0; - struct omap1_spi100k_cs *cs; - int par_override = 0; - - m = container_of(spi100k->msg_queue.next, struct spi_message, - queue); - - list_del_init(&m->queue); - spin_unlock_irq(&spi100k->lock); - - spi = m->spi; - cs = spi->controller_state; - - list_for_each_entry(t, &m->transfers, transfer_list) { - if (t->tx_buf == NULL && t->rx_buf == NULL && t->len) { - status = -EINVAL; - break; - } - if (par_override || t->speed_hz || t->bits_per_word) { - par_override = 1; - status = omap1_spi100k_setup_transfer(spi, t); - if (status < 0) - break; - if (!t->speed_hz && !t->bits_per_word) - par_override = 0; - } - - if (!cs_active) { - omap1_spi100k_force_cs(spi100k, 1); - cs_active = 1; - } - - if (t->len) { - unsigned count; - - count = omap1_spi100k_txrx_pio(spi, t); - m->actual_length += count; - - if (count != t->len) { - status = -EIO; - break; - } - } - - if (t->delay_usecs) - udelay(t->delay_usecs); - - /* ignore the "leave it on after last xfer" hint */ - - if (t->cs_change) { - omap1_spi100k_force_cs(spi100k, 0); - cs_active = 0; - } - } - - /* Restore defaults if they were overriden */ - if (par_override) { - par_override = 0; - status = omap1_spi100k_setup_transfer(spi, NULL); - } - - if (cs_active) - omap1_spi100k_force_cs(spi100k, 0); - - m->status = status; - m->complete(m->context); - - spin_lock_irq(&spi100k->lock); - } - - clk_disable(spi100k->ick); - clk_disable(spi100k->fck); - spin_unlock_irq(&spi100k->lock); - - if (status < 0) - printk(KERN_WARNING "spi transfer failed with %d\n", status); -} - -static int omap1_spi100k_transfer(struct spi_device *spi, struct spi_message *m) -{ - struct omap1_spi100k *spi100k; - unsigned long flags; - struct spi_transfer *t; - - m->actual_length = 0; - m->status = -EINPROGRESS; - - spi100k = spi_master_get_devdata(spi->master); - - /* Don't accept new work if we're shutting down */ - if (spi100k->state == SPI_SHUTDOWN) - return -ESHUTDOWN; - - /* reject invalid messages and transfers */ - if (list_empty(&m->transfers) || !m->complete) - return -EINVAL; - - list_for_each_entry(t, &m->transfers, transfer_list) { - const void *tx_buf = t->tx_buf; - void *rx_buf = t->rx_buf; - unsigned len = t->len; - - if (t->speed_hz > OMAP1_SPI100K_MAX_FREQ - || (len && !(rx_buf || tx_buf)) - || (t->bits_per_word && - ( t->bits_per_word < 4 - || t->bits_per_word > 32))) { - dev_dbg(&spi->dev, "transfer: %d Hz, %d %s%s, %d bpw\n", - t->speed_hz, - len, - tx_buf ? "tx" : "", - rx_buf ? "rx" : "", - t->bits_per_word); - return -EINVAL; - } - - if (t->speed_hz && t->speed_hz < OMAP1_SPI100K_MAX_FREQ/(1<<16)) { - dev_dbg(&spi->dev, "%d Hz max exceeds %d\n", - t->speed_hz, - OMAP1_SPI100K_MAX_FREQ/(1<<16)); - return -EINVAL; - } - - } - - spin_lock_irqsave(&spi100k->lock, flags); - list_add_tail(&m->queue, &spi100k->msg_queue); - queue_work(omap1_spi100k_wq, &spi100k->work); - spin_unlock_irqrestore(&spi100k->lock, flags); - - return 0; -} - -static int __init omap1_spi100k_reset(struct omap1_spi100k *spi100k) -{ - return 0; -} - -static int __devinit omap1_spi100k_probe(struct platform_device *pdev) -{ - struct spi_master *master; - struct omap1_spi100k *spi100k; - int status = 0; - - if (!pdev->id) - return -EINVAL; - - master = spi_alloc_master(&pdev->dev, sizeof *spi100k); - if (master == NULL) { - dev_dbg(&pdev->dev, "master allocation failed\n"); - return -ENOMEM; - } - - if (pdev->id != -1) - master->bus_num = pdev->id; - - master->setup = omap1_spi100k_setup; - master->transfer = omap1_spi100k_transfer; - master->cleanup = NULL; - master->num_chipselect = 2; - master->mode_bits = MODEBITS; - - dev_set_drvdata(&pdev->dev, master); - - spi100k = spi_master_get_devdata(master); - spi100k->master = master; - - /* - * The memory region base address is taken as the platform_data. - * You should allocate this with ioremap() before initializing - * the SPI. - */ - spi100k->base = (void __iomem *) pdev->dev.platform_data; - - INIT_WORK(&spi100k->work, omap1_spi100k_work); - - spin_lock_init(&spi100k->lock); - INIT_LIST_HEAD(&spi100k->msg_queue); - spi100k->ick = clk_get(&pdev->dev, "ick"); - if (IS_ERR(spi100k->ick)) { - dev_dbg(&pdev->dev, "can't get spi100k_ick\n"); - status = PTR_ERR(spi100k->ick); - goto err1; - } - - spi100k->fck = clk_get(&pdev->dev, "fck"); - if (IS_ERR(spi100k->fck)) { - dev_dbg(&pdev->dev, "can't get spi100k_fck\n"); - status = PTR_ERR(spi100k->fck); - goto err2; - } - - if (omap1_spi100k_reset(spi100k) < 0) - goto err3; - - status = spi_register_master(master); - if (status < 0) - goto err3; - - spi100k->state = SPI_RUNNING; - - return status; - -err3: - clk_put(spi100k->fck); -err2: - clk_put(spi100k->ick); -err1: - spi_master_put(master); - return status; -} - -static int __exit omap1_spi100k_remove(struct platform_device *pdev) -{ - struct spi_master *master; - struct omap1_spi100k *spi100k; - struct resource *r; - unsigned limit = 500; - unsigned long flags; - int status = 0; - - master = dev_get_drvdata(&pdev->dev); - spi100k = spi_master_get_devdata(master); - - spin_lock_irqsave(&spi100k->lock, flags); - - spi100k->state = SPI_SHUTDOWN; - while (!list_empty(&spi100k->msg_queue) && limit--) { - spin_unlock_irqrestore(&spi100k->lock, flags); - msleep(10); - spin_lock_irqsave(&spi100k->lock, flags); - } - - if (!list_empty(&spi100k->msg_queue)) - status = -EBUSY; - - spin_unlock_irqrestore(&spi100k->lock, flags); - - if (status != 0) - return status; - - clk_put(spi100k->fck); - clk_put(spi100k->ick); - - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - - spi_unregister_master(master); - - return 0; -} - -static struct platform_driver omap1_spi100k_driver = { - .driver = { - .name = "omap1_spi100k", - .owner = THIS_MODULE, - }, - .remove = __exit_p(omap1_spi100k_remove), -}; - - -static int __init omap1_spi100k_init(void) -{ - omap1_spi100k_wq = create_singlethread_workqueue( - omap1_spi100k_driver.driver.name); - - if (omap1_spi100k_wq == NULL) - return -1; - - return platform_driver_probe(&omap1_spi100k_driver, omap1_spi100k_probe); -} - -static void __exit omap1_spi100k_exit(void) -{ - platform_driver_unregister(&omap1_spi100k_driver); - - destroy_workqueue(omap1_spi100k_wq); -} - -module_init(omap1_spi100k_init); -module_exit(omap1_spi100k_exit); - -MODULE_DESCRIPTION("OMAP7xx SPI 100k controller driver"); -MODULE_AUTHOR("Fabrice Crohas "); -MODULE_LICENSE("GPL"); - diff --git a/drivers/spi/omap_uwire.c b/drivers/spi/omap_uwire.c deleted file mode 100644 index 160d326..0000000 --- a/drivers/spi/omap_uwire.c +++ /dev/null @@ -1,593 +0,0 @@ -/* - * omap_uwire.c -- MicroWire interface driver for OMAP - * - * Copyright 2003 MontaVista Software Inc. - * - * Ported to 2.6 OMAP uwire interface. - * Copyright (C) 2004 Texas Instruments. - * - * Generalization patches by Juha Yrjola - * - * Copyright (C) 2005 David Brownell (ported to 2.6 SPI interface) - * Copyright (C) 2006 Nokia - * - * Many updates by Imre Deak - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * - * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF - * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON - * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 675 Mass Ave, Cambridge, MA 02139, USA. - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include -#include -#include -#include -#include - -#include -#include /* OMAP7XX_IO_CONF registers */ - - -/* FIXME address is now a platform device resource, - * and irqs should show there too... - */ -#define UWIRE_BASE_PHYS 0xFFFB3000 - -/* uWire Registers: */ -#define UWIRE_IO_SIZE 0x20 -#define UWIRE_TDR 0x00 -#define UWIRE_RDR 0x00 -#define UWIRE_CSR 0x01 -#define UWIRE_SR1 0x02 -#define UWIRE_SR2 0x03 -#define UWIRE_SR3 0x04 -#define UWIRE_SR4 0x05 -#define UWIRE_SR5 0x06 - -/* CSR bits */ -#define RDRB (1 << 15) -#define CSRB (1 << 14) -#define START (1 << 13) -#define CS_CMD (1 << 12) - -/* SR1 or SR2 bits */ -#define UWIRE_READ_FALLING_EDGE 0x0001 -#define UWIRE_READ_RISING_EDGE 0x0000 -#define UWIRE_WRITE_FALLING_EDGE 0x0000 -#define UWIRE_WRITE_RISING_EDGE 0x0002 -#define UWIRE_CS_ACTIVE_LOW 0x0000 -#define UWIRE_CS_ACTIVE_HIGH 0x0004 -#define UWIRE_FREQ_DIV_2 0x0000 -#define UWIRE_FREQ_DIV_4 0x0008 -#define UWIRE_FREQ_DIV_8 0x0010 -#define UWIRE_CHK_READY 0x0020 -#define UWIRE_CLK_INVERTED 0x0040 - - -struct uwire_spi { - struct spi_bitbang bitbang; - struct clk *ck; -}; - -struct uwire_state { - unsigned bits_per_word; - unsigned div1_idx; -}; - -/* REVISIT compile time constant for idx_shift? */ -/* - * Or, put it in a structure which is used throughout the driver; - * that avoids having to issue two loads for each bit of static data. - */ -static unsigned int uwire_idx_shift; -static void __iomem *uwire_base; - -static inline void uwire_write_reg(int idx, u16 val) -{ - __raw_writew(val, uwire_base + (idx << uwire_idx_shift)); -} - -static inline u16 uwire_read_reg(int idx) -{ - return __raw_readw(uwire_base + (idx << uwire_idx_shift)); -} - -static inline void omap_uwire_configure_mode(u8 cs, unsigned long flags) -{ - u16 w, val = 0; - int shift, reg; - - if (flags & UWIRE_CLK_INVERTED) - val ^= 0x03; - val = flags & 0x3f; - if (cs & 1) - shift = 6; - else - shift = 0; - if (cs <= 1) - reg = UWIRE_SR1; - else - reg = UWIRE_SR2; - - w = uwire_read_reg(reg); - w &= ~(0x3f << shift); - w |= val << shift; - uwire_write_reg(reg, w); -} - -static int wait_uwire_csr_flag(u16 mask, u16 val, int might_not_catch) -{ - u16 w; - int c = 0; - unsigned long max_jiffies = jiffies + HZ; - - for (;;) { - w = uwire_read_reg(UWIRE_CSR); - if ((w & mask) == val) - break; - if (time_after(jiffies, max_jiffies)) { - printk(KERN_ERR "%s: timeout. reg=%#06x " - "mask=%#06x val=%#06x\n", - __func__, w, mask, val); - return -1; - } - c++; - if (might_not_catch && c > 64) - break; - } - return 0; -} - -static void uwire_set_clk1_div(int div1_idx) -{ - u16 w; - - w = uwire_read_reg(UWIRE_SR3); - w &= ~(0x03 << 1); - w |= div1_idx << 1; - uwire_write_reg(UWIRE_SR3, w); -} - -static void uwire_chipselect(struct spi_device *spi, int value) -{ - struct uwire_state *ust = spi->controller_state; - u16 w; - int old_cs; - - - BUG_ON(wait_uwire_csr_flag(CSRB, 0, 0)); - - w = uwire_read_reg(UWIRE_CSR); - old_cs = (w >> 10) & 0x03; - if (value == BITBANG_CS_INACTIVE || old_cs != spi->chip_select) { - /* Deselect this CS, or the previous CS */ - w &= ~CS_CMD; - uwire_write_reg(UWIRE_CSR, w); - } - /* activate specfied chipselect */ - if (value == BITBANG_CS_ACTIVE) { - uwire_set_clk1_div(ust->div1_idx); - /* invert clock? */ - if (spi->mode & SPI_CPOL) - uwire_write_reg(UWIRE_SR4, 1); - else - uwire_write_reg(UWIRE_SR4, 0); - - w = spi->chip_select << 10; - w |= CS_CMD; - uwire_write_reg(UWIRE_CSR, w); - } -} - -static int uwire_txrx(struct spi_device *spi, struct spi_transfer *t) -{ - struct uwire_state *ust = spi->controller_state; - unsigned len = t->len; - unsigned bits = ust->bits_per_word; - unsigned bytes; - u16 val, w; - int status = 0; - - if (!t->tx_buf && !t->rx_buf) - return 0; - - /* Microwire doesn't read and write concurrently */ - if (t->tx_buf && t->rx_buf) - return -EPERM; - - w = spi->chip_select << 10; - w |= CS_CMD; - - if (t->tx_buf) { - const u8 *buf = t->tx_buf; - - /* NOTE: DMA could be used for TX transfers */ - - /* write one or two bytes at a time */ - while (len >= 1) { - /* tx bit 15 is first sent; we byteswap multibyte words - * (msb-first) on the way out from memory. - */ - val = *buf++; - if (bits > 8) { - bytes = 2; - val |= *buf++ << 8; - } else - bytes = 1; - val <<= 16 - bits; - -#ifdef VERBOSE - pr_debug("%s: write-%d =%04x\n", - dev_name(&spi->dev), bits, val); -#endif - if (wait_uwire_csr_flag(CSRB, 0, 0)) - goto eio; - - uwire_write_reg(UWIRE_TDR, val); - - /* start write */ - val = START | w | (bits << 5); - - uwire_write_reg(UWIRE_CSR, val); - len -= bytes; - - /* Wait till write actually starts. - * This is needed with MPU clock 60+ MHz. - * REVISIT: we may not have time to catch it... - */ - if (wait_uwire_csr_flag(CSRB, CSRB, 1)) - goto eio; - - status += bytes; - } - - /* REVISIT: save this for later to get more i/o overlap */ - if (wait_uwire_csr_flag(CSRB, 0, 0)) - goto eio; - - } else if (t->rx_buf) { - u8 *buf = t->rx_buf; - - /* read one or two bytes at a time */ - while (len) { - if (bits > 8) { - bytes = 2; - } else - bytes = 1; - - /* start read */ - val = START | w | (bits << 0); - uwire_write_reg(UWIRE_CSR, val); - len -= bytes; - - /* Wait till read actually starts */ - (void) wait_uwire_csr_flag(CSRB, CSRB, 1); - - if (wait_uwire_csr_flag(RDRB | CSRB, - RDRB, 0)) - goto eio; - - /* rx bit 0 is last received; multibyte words will - * be properly byteswapped on the way to memory. - */ - val = uwire_read_reg(UWIRE_RDR); - val &= (1 << bits) - 1; - *buf++ = (u8) val; - if (bytes == 2) - *buf++ = val >> 8; - status += bytes; -#ifdef VERBOSE - pr_debug("%s: read-%d =%04x\n", - dev_name(&spi->dev), bits, val); -#endif - - } - } - return status; -eio: - return -EIO; -} - -static int uwire_setup_transfer(struct spi_device *spi, struct spi_transfer *t) -{ - struct uwire_state *ust = spi->controller_state; - struct uwire_spi *uwire; - unsigned flags = 0; - unsigned bits; - unsigned hz; - unsigned long rate; - int div1_idx; - int div1; - int div2; - int status; - - uwire = spi_master_get_devdata(spi->master); - - if (spi->chip_select > 3) { - pr_debug("%s: cs%d?\n", dev_name(&spi->dev), spi->chip_select); - status = -ENODEV; - goto done; - } - - bits = spi->bits_per_word; - if (t != NULL && t->bits_per_word) - bits = t->bits_per_word; - - if (bits > 16) { - pr_debug("%s: wordsize %d?\n", dev_name(&spi->dev), bits); - status = -ENODEV; - goto done; - } - ust->bits_per_word = bits; - - /* mode 0..3, clock inverted separately; - * standard nCS signaling; - * don't treat DI=high as "not ready" - */ - if (spi->mode & SPI_CS_HIGH) - flags |= UWIRE_CS_ACTIVE_HIGH; - - if (spi->mode & SPI_CPOL) - flags |= UWIRE_CLK_INVERTED; - - switch (spi->mode & (SPI_CPOL | SPI_CPHA)) { - case SPI_MODE_0: - case SPI_MODE_3: - flags |= UWIRE_WRITE_FALLING_EDGE | UWIRE_READ_RISING_EDGE; - break; - case SPI_MODE_1: - case SPI_MODE_2: - flags |= UWIRE_WRITE_RISING_EDGE | UWIRE_READ_FALLING_EDGE; - break; - } - - /* assume it's already enabled */ - rate = clk_get_rate(uwire->ck); - - hz = spi->max_speed_hz; - if (t != NULL && t->speed_hz) - hz = t->speed_hz; - - if (!hz) { - pr_debug("%s: zero speed?\n", dev_name(&spi->dev)); - status = -EINVAL; - goto done; - } - - /* F_INT = mpu_xor_clk / DIV1 */ - for (div1_idx = 0; div1_idx < 4; div1_idx++) { - switch (div1_idx) { - case 0: - div1 = 2; - break; - case 1: - div1 = 4; - break; - case 2: - div1 = 7; - break; - default: - case 3: - div1 = 10; - break; - } - div2 = (rate / div1 + hz - 1) / hz; - if (div2 <= 8) - break; - } - if (div1_idx == 4) { - pr_debug("%s: lowest clock %ld, need %d\n", - dev_name(&spi->dev), rate / 10 / 8, hz); - status = -EDOM; - goto done; - } - - /* we have to cache this and reset in uwire_chipselect as this is a - * global parameter and another uwire device can change it under - * us */ - ust->div1_idx = div1_idx; - uwire_set_clk1_div(div1_idx); - - rate /= div1; - - switch (div2) { - case 0: - case 1: - case 2: - flags |= UWIRE_FREQ_DIV_2; - rate /= 2; - break; - case 3: - case 4: - flags |= UWIRE_FREQ_DIV_4; - rate /= 4; - break; - case 5: - case 6: - case 7: - case 8: - flags |= UWIRE_FREQ_DIV_8; - rate /= 8; - break; - } - omap_uwire_configure_mode(spi->chip_select, flags); - pr_debug("%s: uwire flags %02x, armxor %lu KHz, SCK %lu KHz\n", - __func__, flags, - clk_get_rate(uwire->ck) / 1000, - rate / 1000); - status = 0; -done: - return status; -} - -static int uwire_setup(struct spi_device *spi) -{ - struct uwire_state *ust = spi->controller_state; - - if (ust == NULL) { - ust = kzalloc(sizeof(*ust), GFP_KERNEL); - if (ust == NULL) - return -ENOMEM; - spi->controller_state = ust; - } - - return uwire_setup_transfer(spi, NULL); -} - -static void uwire_cleanup(struct spi_device *spi) -{ - kfree(spi->controller_state); -} - -static void uwire_off(struct uwire_spi *uwire) -{ - uwire_write_reg(UWIRE_SR3, 0); - clk_disable(uwire->ck); - clk_put(uwire->ck); - spi_master_put(uwire->bitbang.master); -} - -static int __init uwire_probe(struct platform_device *pdev) -{ - struct spi_master *master; - struct uwire_spi *uwire; - int status; - - master = spi_alloc_master(&pdev->dev, sizeof *uwire); - if (!master) - return -ENODEV; - - uwire = spi_master_get_devdata(master); - - uwire_base = ioremap(UWIRE_BASE_PHYS, UWIRE_IO_SIZE); - if (!uwire_base) { - dev_dbg(&pdev->dev, "can't ioremap UWIRE\n"); - spi_master_put(master); - return -ENOMEM; - } - - dev_set_drvdata(&pdev->dev, uwire); - - uwire->ck = clk_get(&pdev->dev, "fck"); - if (IS_ERR(uwire->ck)) { - status = PTR_ERR(uwire->ck); - dev_dbg(&pdev->dev, "no functional clock?\n"); - spi_master_put(master); - return status; - } - clk_enable(uwire->ck); - - if (cpu_is_omap7xx()) - uwire_idx_shift = 1; - else - uwire_idx_shift = 2; - - uwire_write_reg(UWIRE_SR3, 1); - - /* the spi->mode bits understood by this driver: */ - master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; - - master->flags = SPI_MASTER_HALF_DUPLEX; - - master->bus_num = 2; /* "official" */ - master->num_chipselect = 4; - master->setup = uwire_setup; - master->cleanup = uwire_cleanup; - - uwire->bitbang.master = master; - uwire->bitbang.chipselect = uwire_chipselect; - uwire->bitbang.setup_transfer = uwire_setup_transfer; - uwire->bitbang.txrx_bufs = uwire_txrx; - - status = spi_bitbang_start(&uwire->bitbang); - if (status < 0) { - uwire_off(uwire); - iounmap(uwire_base); - } - return status; -} - -static int __exit uwire_remove(struct platform_device *pdev) -{ - struct uwire_spi *uwire = dev_get_drvdata(&pdev->dev); - int status; - - // FIXME remove all child devices, somewhere ... - - status = spi_bitbang_stop(&uwire->bitbang); - uwire_off(uwire); - iounmap(uwire_base); - return status; -} - -/* work with hotplug and coldplug */ -MODULE_ALIAS("platform:omap_uwire"); - -static struct platform_driver uwire_driver = { - .driver = { - .name = "omap_uwire", - .owner = THIS_MODULE, - }, - .remove = __exit_p(uwire_remove), - // suspend ... unuse ck - // resume ... use ck -}; - -static int __init omap_uwire_init(void) -{ - /* FIXME move these into the relevant board init code. also, include - * H3 support; it uses tsc2101 like H2 (on a different chipselect). - */ - - if (machine_is_omap_h2()) { - /* defaults: W21 SDO, U18 SDI, V19 SCL */ - omap_cfg_reg(N14_1610_UWIRE_CS0); - omap_cfg_reg(N15_1610_UWIRE_CS1); - } - if (machine_is_omap_perseus2()) { - /* configure pins: MPU_UW_nSCS1, MPU_UW_SDO, MPU_UW_SCLK */ - int val = omap_readl(OMAP7XX_IO_CONF_9) & ~0x00EEE000; - omap_writel(val | 0x00AAA000, OMAP7XX_IO_CONF_9); - } - - return platform_driver_probe(&uwire_driver, uwire_probe); -} - -static void __exit omap_uwire_exit(void) -{ - platform_driver_unregister(&uwire_driver); -} - -subsys_initcall(omap_uwire_init); -module_exit(omap_uwire_exit); - -MODULE_LICENSE("GPL"); - diff --git a/drivers/spi/orion_spi.c b/drivers/spi/orion_spi.c deleted file mode 100644 index 0b677dc..0000000 --- a/drivers/spi/orion_spi.c +++ /dev/null @@ -1,573 +0,0 @@ -/* - * orion_spi.c -- Marvell Orion SPI controller driver - * - * Author: Shadi Ammouri - * Copyright (C) 2007-2008 Marvell Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define DRIVER_NAME "orion_spi" - -#define ORION_NUM_CHIPSELECTS 1 /* only one slave is supported*/ -#define ORION_SPI_WAIT_RDY_MAX_LOOP 2000 /* in usec */ - -#define ORION_SPI_IF_CTRL_REG 0x00 -#define ORION_SPI_IF_CONFIG_REG 0x04 -#define ORION_SPI_DATA_OUT_REG 0x08 -#define ORION_SPI_DATA_IN_REG 0x0c -#define ORION_SPI_INT_CAUSE_REG 0x10 - -#define ORION_SPI_IF_8_16_BIT_MODE (1 << 5) -#define ORION_SPI_CLK_PRESCALE_MASK 0x1F - -struct orion_spi { - struct work_struct work; - - /* Lock access to transfer list. */ - spinlock_t lock; - - struct list_head msg_queue; - struct spi_master *master; - void __iomem *base; - unsigned int max_speed; - unsigned int min_speed; - struct orion_spi_info *spi_info; -}; - -static struct workqueue_struct *orion_spi_wq; - -static inline void __iomem *spi_reg(struct orion_spi *orion_spi, u32 reg) -{ - return orion_spi->base + reg; -} - -static inline void -orion_spi_setbits(struct orion_spi *orion_spi, u32 reg, u32 mask) -{ - void __iomem *reg_addr = spi_reg(orion_spi, reg); - u32 val; - - val = readl(reg_addr); - val |= mask; - writel(val, reg_addr); -} - -static inline void -orion_spi_clrbits(struct orion_spi *orion_spi, u32 reg, u32 mask) -{ - void __iomem *reg_addr = spi_reg(orion_spi, reg); - u32 val; - - val = readl(reg_addr); - val &= ~mask; - writel(val, reg_addr); -} - -static int orion_spi_set_transfer_size(struct orion_spi *orion_spi, int size) -{ - if (size == 16) { - orion_spi_setbits(orion_spi, ORION_SPI_IF_CONFIG_REG, - ORION_SPI_IF_8_16_BIT_MODE); - } else if (size == 8) { - orion_spi_clrbits(orion_spi, ORION_SPI_IF_CONFIG_REG, - ORION_SPI_IF_8_16_BIT_MODE); - } else { - pr_debug("Bad bits per word value %d (only 8 or 16 are " - "allowed).\n", size); - return -EINVAL; - } - - return 0; -} - -static int orion_spi_baudrate_set(struct spi_device *spi, unsigned int speed) -{ - u32 tclk_hz; - u32 rate; - u32 prescale; - u32 reg; - struct orion_spi *orion_spi; - - orion_spi = spi_master_get_devdata(spi->master); - - tclk_hz = orion_spi->spi_info->tclk; - - /* - * the supported rates are: 4,6,8...30 - * round up as we look for equal or less speed - */ - rate = DIV_ROUND_UP(tclk_hz, speed); - rate = roundup(rate, 2); - - /* check if requested speed is too small */ - if (rate > 30) - return -EINVAL; - - if (rate < 4) - rate = 4; - - /* Convert the rate to SPI clock divisor value. */ - prescale = 0x10 + rate/2; - - reg = readl(spi_reg(orion_spi, ORION_SPI_IF_CONFIG_REG)); - reg = ((reg & ~ORION_SPI_CLK_PRESCALE_MASK) | prescale); - writel(reg, spi_reg(orion_spi, ORION_SPI_IF_CONFIG_REG)); - - return 0; -} - -/* - * called only when no transfer is active on the bus - */ -static int -orion_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t) -{ - struct orion_spi *orion_spi; - unsigned int speed = spi->max_speed_hz; - unsigned int bits_per_word = spi->bits_per_word; - int rc; - - orion_spi = spi_master_get_devdata(spi->master); - - if ((t != NULL) && t->speed_hz) - speed = t->speed_hz; - - if ((t != NULL) && t->bits_per_word) - bits_per_word = t->bits_per_word; - - rc = orion_spi_baudrate_set(spi, speed); - if (rc) - return rc; - - return orion_spi_set_transfer_size(orion_spi, bits_per_word); -} - -static void orion_spi_set_cs(struct orion_spi *orion_spi, int enable) -{ - if (enable) - orion_spi_setbits(orion_spi, ORION_SPI_IF_CTRL_REG, 0x1); - else - orion_spi_clrbits(orion_spi, ORION_SPI_IF_CTRL_REG, 0x1); -} - -static inline int orion_spi_wait_till_ready(struct orion_spi *orion_spi) -{ - int i; - - for (i = 0; i < ORION_SPI_WAIT_RDY_MAX_LOOP; i++) { - if (readl(spi_reg(orion_spi, ORION_SPI_INT_CAUSE_REG))) - return 1; - else - udelay(1); - } - - return -1; -} - -static inline int -orion_spi_write_read_8bit(struct spi_device *spi, - const u8 **tx_buf, u8 **rx_buf) -{ - void __iomem *tx_reg, *rx_reg, *int_reg; - struct orion_spi *orion_spi; - - orion_spi = spi_master_get_devdata(spi->master); - tx_reg = spi_reg(orion_spi, ORION_SPI_DATA_OUT_REG); - rx_reg = spi_reg(orion_spi, ORION_SPI_DATA_IN_REG); - int_reg = spi_reg(orion_spi, ORION_SPI_INT_CAUSE_REG); - - /* clear the interrupt cause register */ - writel(0x0, int_reg); - - if (tx_buf && *tx_buf) - writel(*(*tx_buf)++, tx_reg); - else - writel(0, tx_reg); - - if (orion_spi_wait_till_ready(orion_spi) < 0) { - dev_err(&spi->dev, "TXS timed out\n"); - return -1; - } - - if (rx_buf && *rx_buf) - *(*rx_buf)++ = readl(rx_reg); - - return 1; -} - -static inline int -orion_spi_write_read_16bit(struct spi_device *spi, - const u16 **tx_buf, u16 **rx_buf) -{ - void __iomem *tx_reg, *rx_reg, *int_reg; - struct orion_spi *orion_spi; - - orion_spi = spi_master_get_devdata(spi->master); - tx_reg = spi_reg(orion_spi, ORION_SPI_DATA_OUT_REG); - rx_reg = spi_reg(orion_spi, ORION_SPI_DATA_IN_REG); - int_reg = spi_reg(orion_spi, ORION_SPI_INT_CAUSE_REG); - - /* clear the interrupt cause register */ - writel(0x0, int_reg); - - if (tx_buf && *tx_buf) - writel(__cpu_to_le16(get_unaligned((*tx_buf)++)), tx_reg); - else - writel(0, tx_reg); - - if (orion_spi_wait_till_ready(orion_spi) < 0) { - dev_err(&spi->dev, "TXS timed out\n"); - return -1; - } - - if (rx_buf && *rx_buf) - put_unaligned(__le16_to_cpu(readl(rx_reg)), (*rx_buf)++); - - return 1; -} - -static unsigned int -orion_spi_write_read(struct spi_device *spi, struct spi_transfer *xfer) -{ - struct orion_spi *orion_spi; - unsigned int count; - int word_len; - - orion_spi = spi_master_get_devdata(spi->master); - word_len = spi->bits_per_word; - count = xfer->len; - - if (word_len == 8) { - const u8 *tx = xfer->tx_buf; - u8 *rx = xfer->rx_buf; - - do { - if (orion_spi_write_read_8bit(spi, &tx, &rx) < 0) - goto out; - count--; - } while (count); - } else if (word_len == 16) { - const u16 *tx = xfer->tx_buf; - u16 *rx = xfer->rx_buf; - - do { - if (orion_spi_write_read_16bit(spi, &tx, &rx) < 0) - goto out; - count -= 2; - } while (count); - } - -out: - return xfer->len - count; -} - - -static void orion_spi_work(struct work_struct *work) -{ - struct orion_spi *orion_spi = - container_of(work, struct orion_spi, work); - - spin_lock_irq(&orion_spi->lock); - while (!list_empty(&orion_spi->msg_queue)) { - struct spi_message *m; - struct spi_device *spi; - struct spi_transfer *t = NULL; - int par_override = 0; - int status = 0; - int cs_active = 0; - - m = container_of(orion_spi->msg_queue.next, struct spi_message, - queue); - - list_del_init(&m->queue); - spin_unlock_irq(&orion_spi->lock); - - spi = m->spi; - - /* Load defaults */ - status = orion_spi_setup_transfer(spi, NULL); - - if (status < 0) - goto msg_done; - - list_for_each_entry(t, &m->transfers, transfer_list) { - if (par_override || t->speed_hz || t->bits_per_word) { - par_override = 1; - status = orion_spi_setup_transfer(spi, t); - if (status < 0) - break; - if (!t->speed_hz && !t->bits_per_word) - par_override = 0; - } - - if (!cs_active) { - orion_spi_set_cs(orion_spi, 1); - cs_active = 1; - } - - if (t->len) - m->actual_length += - orion_spi_write_read(spi, t); - - if (t->delay_usecs) - udelay(t->delay_usecs); - - if (t->cs_change) { - orion_spi_set_cs(orion_spi, 0); - cs_active = 0; - } - } - -msg_done: - if (cs_active) - orion_spi_set_cs(orion_spi, 0); - - m->status = status; - m->complete(m->context); - - spin_lock_irq(&orion_spi->lock); - } - - spin_unlock_irq(&orion_spi->lock); -} - -static int __init orion_spi_reset(struct orion_spi *orion_spi) -{ - /* Verify that the CS is deasserted */ - orion_spi_set_cs(orion_spi, 0); - - return 0; -} - -static int orion_spi_setup(struct spi_device *spi) -{ - struct orion_spi *orion_spi; - - orion_spi = spi_master_get_devdata(spi->master); - - /* Fix ac timing if required. */ - if (orion_spi->spi_info->enable_clock_fix) - orion_spi_setbits(orion_spi, ORION_SPI_IF_CONFIG_REG, - (1 << 14)); - - if ((spi->max_speed_hz == 0) - || (spi->max_speed_hz > orion_spi->max_speed)) - spi->max_speed_hz = orion_spi->max_speed; - - if (spi->max_speed_hz < orion_spi->min_speed) { - dev_err(&spi->dev, "setup: requested speed too low %d Hz\n", - spi->max_speed_hz); - return -EINVAL; - } - - /* - * baudrate & width will be set orion_spi_setup_transfer - */ - return 0; -} - -static int orion_spi_transfer(struct spi_device *spi, struct spi_message *m) -{ - struct orion_spi *orion_spi; - struct spi_transfer *t = NULL; - unsigned long flags; - - m->actual_length = 0; - m->status = 0; - - /* reject invalid messages and transfers */ - if (list_empty(&m->transfers) || !m->complete) - return -EINVAL; - - orion_spi = spi_master_get_devdata(spi->master); - - list_for_each_entry(t, &m->transfers, transfer_list) { - unsigned int bits_per_word = spi->bits_per_word; - - if (t->tx_buf == NULL && t->rx_buf == NULL && t->len) { - dev_err(&spi->dev, - "message rejected : " - "invalid transfer data buffers\n"); - goto msg_rejected; - } - - if (t->bits_per_word) - bits_per_word = t->bits_per_word; - - if ((bits_per_word != 8) && (bits_per_word != 16)) { - dev_err(&spi->dev, - "message rejected : " - "invalid transfer bits_per_word (%d bits)\n", - bits_per_word); - goto msg_rejected; - } - /*make sure buffer length is even when working in 16 bit mode*/ - if ((t->bits_per_word == 16) && (t->len & 1)) { - dev_err(&spi->dev, - "message rejected : " - "odd data length (%d) while in 16 bit mode\n", - t->len); - goto msg_rejected; - } - - if (t->speed_hz && t->speed_hz < orion_spi->min_speed) { - dev_err(&spi->dev, - "message rejected : " - "device min speed (%d Hz) exceeds " - "required transfer speed (%d Hz)\n", - orion_spi->min_speed, t->speed_hz); - goto msg_rejected; - } - } - - - spin_lock_irqsave(&orion_spi->lock, flags); - list_add_tail(&m->queue, &orion_spi->msg_queue); - queue_work(orion_spi_wq, &orion_spi->work); - spin_unlock_irqrestore(&orion_spi->lock, flags); - - return 0; -msg_rejected: - /* Message rejected and not queued */ - m->status = -EINVAL; - if (m->complete) - m->complete(m->context); - return -EINVAL; -} - -static int __init orion_spi_probe(struct platform_device *pdev) -{ - struct spi_master *master; - struct orion_spi *spi; - struct resource *r; - struct orion_spi_info *spi_info; - int status = 0; - - spi_info = pdev->dev.platform_data; - - master = spi_alloc_master(&pdev->dev, sizeof *spi); - if (master == NULL) { - dev_dbg(&pdev->dev, "master allocation failed\n"); - return -ENOMEM; - } - - if (pdev->id != -1) - master->bus_num = pdev->id; - - /* we support only mode 0, and no options */ - master->mode_bits = 0; - - master->setup = orion_spi_setup; - master->transfer = orion_spi_transfer; - master->num_chipselect = ORION_NUM_CHIPSELECTS; - - dev_set_drvdata(&pdev->dev, master); - - spi = spi_master_get_devdata(master); - spi->master = master; - spi->spi_info = spi_info; - - spi->max_speed = DIV_ROUND_UP(spi_info->tclk, 4); - spi->min_speed = DIV_ROUND_UP(spi_info->tclk, 30); - - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (r == NULL) { - status = -ENODEV; - goto out; - } - - if (!request_mem_region(r->start, (r->end - r->start) + 1, - dev_name(&pdev->dev))) { - status = -EBUSY; - goto out; - } - spi->base = ioremap(r->start, SZ_1K); - - INIT_WORK(&spi->work, orion_spi_work); - - spin_lock_init(&spi->lock); - INIT_LIST_HEAD(&spi->msg_queue); - - if (orion_spi_reset(spi) < 0) - goto out_rel_mem; - - status = spi_register_master(master); - if (status < 0) - goto out_rel_mem; - - return status; - -out_rel_mem: - release_mem_region(r->start, (r->end - r->start) + 1); - -out: - spi_master_put(master); - return status; -} - - -static int __exit orion_spi_remove(struct platform_device *pdev) -{ - struct spi_master *master; - struct orion_spi *spi; - struct resource *r; - - master = dev_get_drvdata(&pdev->dev); - spi = spi_master_get_devdata(master); - - cancel_work_sync(&spi->work); - - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - release_mem_region(r->start, (r->end - r->start) + 1); - - spi_unregister_master(master); - - return 0; -} - -MODULE_ALIAS("platform:" DRIVER_NAME); - -static struct platform_driver orion_spi_driver = { - .driver = { - .name = DRIVER_NAME, - .owner = THIS_MODULE, - }, - .remove = __exit_p(orion_spi_remove), -}; - -static int __init orion_spi_init(void) -{ - orion_spi_wq = create_singlethread_workqueue( - orion_spi_driver.driver.name); - if (orion_spi_wq == NULL) - return -ENOMEM; - - return platform_driver_probe(&orion_spi_driver, orion_spi_probe); -} -module_init(orion_spi_init); - -static void __exit orion_spi_exit(void) -{ - flush_workqueue(orion_spi_wq); - platform_driver_unregister(&orion_spi_driver); - - destroy_workqueue(orion_spi_wq); -} -module_exit(orion_spi_exit); - -MODULE_DESCRIPTION("Orion SPI driver"); -MODULE_AUTHOR("Shadi Ammouri "); -MODULE_LICENSE("GPL"); diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c deleted file mode 100644 index dc25bee..0000000 --- a/drivers/spi/pxa2xx_spi.c +++ /dev/null @@ -1,1816 +0,0 @@ -/* - * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - - -MODULE_AUTHOR("Stephen Street"); -MODULE_DESCRIPTION("PXA2xx SSP SPI Controller"); -MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:pxa2xx-spi"); - -#define MAX_BUSES 3 - -#define TIMOUT_DFLT 1000 - -#define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR) -#define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK) -#define IS_DMA_ALIGNED(x) ((((u32)(x)) & 0x07) == 0) -#define MAX_DMA_LEN 8191 -#define DMA_ALIGNMENT 8 - -/* - * for testing SSCR1 changes that require SSP restart, basically - * everything except the service and interrupt enables, the pxa270 developer - * manual says only SSCR1_SCFR, SSCR1_SPH, SSCR1_SPO need to be in this - * list, but the PXA255 dev man says all bits without really meaning the - * service and interrupt enables - */ -#define SSCR1_CHANGE_MASK (SSCR1_TTELP | SSCR1_TTE | SSCR1_SCFR \ - | SSCR1_ECRA | SSCR1_ECRB | SSCR1_SCLKDIR \ - | SSCR1_SFRMDIR | SSCR1_RWOT | SSCR1_TRAIL \ - | SSCR1_IFS | SSCR1_STRF | SSCR1_EFWR \ - | SSCR1_RFT | SSCR1_TFT | SSCR1_MWDS \ - | SSCR1_SPH | SSCR1_SPO | SSCR1_LBM) - -#define DEFINE_SSP_REG(reg, off) \ -static inline u32 read_##reg(void const __iomem *p) \ -{ return __raw_readl(p + (off)); } \ -\ -static inline void write_##reg(u32 v, void __iomem *p) \ -{ __raw_writel(v, p + (off)); } - -DEFINE_SSP_REG(SSCR0, 0x00) -DEFINE_SSP_REG(SSCR1, 0x04) -DEFINE_SSP_REG(SSSR, 0x08) -DEFINE_SSP_REG(SSITR, 0x0c) -DEFINE_SSP_REG(SSDR, 0x10) -DEFINE_SSP_REG(SSTO, 0x28) -DEFINE_SSP_REG(SSPSP, 0x2c) - -#define START_STATE ((void*)0) -#define RUNNING_STATE ((void*)1) -#define DONE_STATE ((void*)2) -#define ERROR_STATE ((void*)-1) - -#define QUEUE_RUNNING 0 -#define QUEUE_STOPPED 1 - -struct driver_data { - /* Driver model hookup */ - struct platform_device *pdev; - - /* SSP Info */ - struct ssp_device *ssp; - - /* SPI framework hookup */ - enum pxa_ssp_type ssp_type; - struct spi_master *master; - - /* PXA hookup */ - struct pxa2xx_spi_master *master_info; - - /* DMA setup stuff */ - int rx_channel; - int tx_channel; - u32 *null_dma_buf; - - /* SSP register addresses */ - void __iomem *ioaddr; - u32 ssdr_physical; - - /* SSP masks*/ - u32 dma_cr1; - u32 int_cr1; - u32 clear_sr; - u32 mask_sr; - - /* Driver message queue */ - struct workqueue_struct *workqueue; - struct work_struct pump_messages; - spinlock_t lock; - struct list_head queue; - int busy; - int run; - - /* Message Transfer pump */ - struct tasklet_struct pump_transfers; - - /* Current message transfer state info */ - struct spi_message* cur_msg; - struct spi_transfer* cur_transfer; - struct chip_data *cur_chip; - size_t len; - void *tx; - void *tx_end; - void *rx; - void *rx_end; - int dma_mapped; - dma_addr_t rx_dma; - dma_addr_t tx_dma; - size_t rx_map_len; - size_t tx_map_len; - u8 n_bytes; - u32 dma_width; - int (*write)(struct driver_data *drv_data); - int (*read)(struct driver_data *drv_data); - irqreturn_t (*transfer_handler)(struct driver_data *drv_data); - void (*cs_control)(u32 command); -}; - -struct chip_data { - u32 cr0; - u32 cr1; - u32 psp; - u32 timeout; - u8 n_bytes; - u32 dma_width; - u32 dma_burst_size; - u32 threshold; - u32 dma_threshold; - u8 enable_dma; - u8 bits_per_word; - u32 speed_hz; - union { - int gpio_cs; - unsigned int frm; - }; - int gpio_cs_inverted; - int (*write)(struct driver_data *drv_data); - int (*read)(struct driver_data *drv_data); - void (*cs_control)(u32 command); -}; - -static void pump_messages(struct work_struct *work); - -static void cs_assert(struct driver_data *drv_data) -{ - struct chip_data *chip = drv_data->cur_chip; - - if (drv_data->ssp_type == CE4100_SSP) { - write_SSSR(drv_data->cur_chip->frm, drv_data->ioaddr); - return; - } - - if (chip->cs_control) { - chip->cs_control(PXA2XX_CS_ASSERT); - return; - } - - if (gpio_is_valid(chip->gpio_cs)) - gpio_set_value(chip->gpio_cs, chip->gpio_cs_inverted); -} - -static void cs_deassert(struct driver_data *drv_data) -{ - struct chip_data *chip = drv_data->cur_chip; - - if (drv_data->ssp_type == CE4100_SSP) - return; - - if (chip->cs_control) { - chip->cs_control(PXA2XX_CS_DEASSERT); - return; - } - - if (gpio_is_valid(chip->gpio_cs)) - gpio_set_value(chip->gpio_cs, !chip->gpio_cs_inverted); -} - -static void write_SSSR_CS(struct driver_data *drv_data, u32 val) -{ - void __iomem *reg = drv_data->ioaddr; - - if (drv_data->ssp_type == CE4100_SSP) - val |= read_SSSR(reg) & SSSR_ALT_FRM_MASK; - - write_SSSR(val, reg); -} - -static int pxa25x_ssp_comp(struct driver_data *drv_data) -{ - if (drv_data->ssp_type == PXA25x_SSP) - return 1; - if (drv_data->ssp_type == CE4100_SSP) - return 1; - return 0; -} - -static int flush(struct driver_data *drv_data) -{ - unsigned long limit = loops_per_jiffy << 1; - - void __iomem *reg = drv_data->ioaddr; - - do { - while (read_SSSR(reg) & SSSR_RNE) { - read_SSDR(reg); - } - } while ((read_SSSR(reg) & SSSR_BSY) && --limit); - write_SSSR_CS(drv_data, SSSR_ROR); - - return limit; -} - -static int null_writer(struct driver_data *drv_data) -{ - void __iomem *reg = drv_data->ioaddr; - u8 n_bytes = drv_data->n_bytes; - - if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK) - || (drv_data->tx == drv_data->tx_end)) - return 0; - - write_SSDR(0, reg); - drv_data->tx += n_bytes; - - return 1; -} - -static int null_reader(struct driver_data *drv_data) -{ - void __iomem *reg = drv_data->ioaddr; - u8 n_bytes = drv_data->n_bytes; - - while ((read_SSSR(reg) & SSSR_RNE) - && (drv_data->rx < drv_data->rx_end)) { - read_SSDR(reg); - drv_data->rx += n_bytes; - } - - return drv_data->rx == drv_data->rx_end; -} - -static int u8_writer(struct driver_data *drv_data) -{ - void __iomem *reg = drv_data->ioaddr; - - if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK) - || (drv_data->tx == drv_data->tx_end)) - return 0; - - write_SSDR(*(u8 *)(drv_data->tx), reg); - ++drv_data->tx; - - return 1; -} - -static int u8_reader(struct driver_data *drv_data) -{ - void __iomem *reg = drv_data->ioaddr; - - while ((read_SSSR(reg) & SSSR_RNE) - && (drv_data->rx < drv_data->rx_end)) { - *(u8 *)(drv_data->rx) = read_SSDR(reg); - ++drv_data->rx; - } - - return drv_data->rx == drv_data->rx_end; -} - -static int u16_writer(struct driver_data *drv_data) -{ - void __iomem *reg = drv_data->ioaddr; - - if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK) - || (drv_data->tx == drv_data->tx_end)) - return 0; - - write_SSDR(*(u16 *)(drv_data->tx), reg); - drv_data->tx += 2; - - return 1; -} - -static int u16_reader(struct driver_data *drv_data) -{ - void __iomem *reg = drv_data->ioaddr; - - while ((read_SSSR(reg) & SSSR_RNE) - && (drv_data->rx < drv_data->rx_end)) { - *(u16 *)(drv_data->rx) = read_SSDR(reg); - drv_data->rx += 2; - } - - return drv_data->rx == drv_data->rx_end; -} - -static int u32_writer(struct driver_data *drv_data) -{ - void __iomem *reg = drv_data->ioaddr; - - if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK) - || (drv_data->tx == drv_data->tx_end)) - return 0; - - write_SSDR(*(u32 *)(drv_data->tx), reg); - drv_data->tx += 4; - - return 1; -} - -static int u32_reader(struct driver_data *drv_data) -{ - void __iomem *reg = drv_data->ioaddr; - - while ((read_SSSR(reg) & SSSR_RNE) - && (drv_data->rx < drv_data->rx_end)) { - *(u32 *)(drv_data->rx) = read_SSDR(reg); - drv_data->rx += 4; - } - - return drv_data->rx == drv_data->rx_end; -} - -static void *next_transfer(struct driver_data *drv_data) -{ - struct spi_message *msg = drv_data->cur_msg; - struct spi_transfer *trans = drv_data->cur_transfer; - - /* Move to next transfer */ - if (trans->transfer_list.next != &msg->transfers) { - drv_data->cur_transfer = - list_entry(trans->transfer_list.next, - struct spi_transfer, - transfer_list); - return RUNNING_STATE; - } else - return DONE_STATE; -} - -static int map_dma_buffers(struct driver_data *drv_data) -{ - struct spi_message *msg = drv_data->cur_msg; - struct device *dev = &msg->spi->dev; - - if (!drv_data->cur_chip->enable_dma) - return 0; - - if (msg->is_dma_mapped) - return drv_data->rx_dma && drv_data->tx_dma; - - if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx)) - return 0; - - /* Modify setup if rx buffer is null */ - if (drv_data->rx == NULL) { - *drv_data->null_dma_buf = 0; - drv_data->rx = drv_data->null_dma_buf; - drv_data->rx_map_len = 4; - } else - drv_data->rx_map_len = drv_data->len; - - - /* Modify setup if tx buffer is null */ - if (drv_data->tx == NULL) { - *drv_data->null_dma_buf = 0; - drv_data->tx = drv_data->null_dma_buf; - drv_data->tx_map_len = 4; - } else - drv_data->tx_map_len = drv_data->len; - - /* Stream map the tx buffer. Always do DMA_TO_DEVICE first - * so we flush the cache *before* invalidating it, in case - * the tx and rx buffers overlap. - */ - drv_data->tx_dma = dma_map_single(dev, drv_data->tx, - drv_data->tx_map_len, DMA_TO_DEVICE); - if (dma_mapping_error(dev, drv_data->tx_dma)) - return 0; - - /* Stream map the rx buffer */ - drv_data->rx_dma = dma_map_single(dev, drv_data->rx, - drv_data->rx_map_len, DMA_FROM_DEVICE); - if (dma_mapping_error(dev, drv_data->rx_dma)) { - dma_unmap_single(dev, drv_data->tx_dma, - drv_data->tx_map_len, DMA_TO_DEVICE); - return 0; - } - - return 1; -} - -static void unmap_dma_buffers(struct driver_data *drv_data) -{ - struct device *dev; - - if (!drv_data->dma_mapped) - return; - - if (!drv_data->cur_msg->is_dma_mapped) { - dev = &drv_data->cur_msg->spi->dev; - dma_unmap_single(dev, drv_data->rx_dma, - drv_data->rx_map_len, DMA_FROM_DEVICE); - dma_unmap_single(dev, drv_data->tx_dma, - drv_data->tx_map_len, DMA_TO_DEVICE); - } - - drv_data->dma_mapped = 0; -} - -/* caller already set message->status; dma and pio irqs are blocked */ -static void giveback(struct driver_data *drv_data) -{ - struct spi_transfer* last_transfer; - unsigned long flags; - struct spi_message *msg; - - spin_lock_irqsave(&drv_data->lock, flags); - msg = drv_data->cur_msg; - drv_data->cur_msg = NULL; - drv_data->cur_transfer = NULL; - queue_work(drv_data->workqueue, &drv_data->pump_messages); - spin_unlock_irqrestore(&drv_data->lock, flags); - - last_transfer = list_entry(msg->transfers.prev, - struct spi_transfer, - transfer_list); - - /* Delay if requested before any change in chip select */ - if (last_transfer->delay_usecs) - udelay(last_transfer->delay_usecs); - - /* Drop chip select UNLESS cs_change is true or we are returning - * a message with an error, or next message is for another chip - */ - if (!last_transfer->cs_change) - cs_deassert(drv_data); - else { - struct spi_message *next_msg; - - /* Holding of cs was hinted, but we need to make sure - * the next message is for the same chip. Don't waste - * time with the following tests unless this was hinted. - * - * We cannot postpone this until pump_messages, because - * after calling msg->complete (below) the driver that - * sent the current message could be unloaded, which - * could invalidate the cs_control() callback... - */ - - /* get a pointer to the next message, if any */ - spin_lock_irqsave(&drv_data->lock, flags); - if (list_empty(&drv_data->queue)) - next_msg = NULL; - else - next_msg = list_entry(drv_data->queue.next, - struct spi_message, queue); - spin_unlock_irqrestore(&drv_data->lock, flags); - - /* see if the next and current messages point - * to the same chip - */ - if (next_msg && next_msg->spi != msg->spi) - next_msg = NULL; - if (!next_msg || msg->state == ERROR_STATE) - cs_deassert(drv_data); - } - - msg->state = NULL; - if (msg->complete) - msg->complete(msg->context); - - drv_data->cur_chip = NULL; -} - -static int wait_ssp_rx_stall(void const __iomem *ioaddr) -{ - unsigned long limit = loops_per_jiffy << 1; - - while ((read_SSSR(ioaddr) & SSSR_BSY) && --limit) - cpu_relax(); - - return limit; -} - -static int wait_dma_channel_stop(int channel) -{ - unsigned long limit = loops_per_jiffy << 1; - - while (!(DCSR(channel) & DCSR_STOPSTATE) && --limit) - cpu_relax(); - - return limit; -} - -static void dma_error_stop(struct driver_data *drv_data, const char *msg) -{ - void __iomem *reg = drv_data->ioaddr; - - /* Stop and reset */ - DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; - DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; - write_SSSR_CS(drv_data, drv_data->clear_sr); - write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); - if (!pxa25x_ssp_comp(drv_data)) - write_SSTO(0, reg); - flush(drv_data); - write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); - - unmap_dma_buffers(drv_data); - - dev_err(&drv_data->pdev->dev, "%s\n", msg); - - drv_data->cur_msg->state = ERROR_STATE; - tasklet_schedule(&drv_data->pump_transfers); -} - -static void dma_transfer_complete(struct driver_data *drv_data) -{ - void __iomem *reg = drv_data->ioaddr; - struct spi_message *msg = drv_data->cur_msg; - - /* Clear and disable interrupts on SSP and DMA channels*/ - write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); - write_SSSR_CS(drv_data, drv_data->clear_sr); - DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; - DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; - - if (wait_dma_channel_stop(drv_data->rx_channel) == 0) - dev_err(&drv_data->pdev->dev, - "dma_handler: dma rx channel stop failed\n"); - - if (wait_ssp_rx_stall(drv_data->ioaddr) == 0) - dev_err(&drv_data->pdev->dev, - "dma_transfer: ssp rx stall failed\n"); - - unmap_dma_buffers(drv_data); - - /* update the buffer pointer for the amount completed in dma */ - drv_data->rx += drv_data->len - - (DCMD(drv_data->rx_channel) & DCMD_LENGTH); - - /* read trailing data from fifo, it does not matter how many - * bytes are in the fifo just read until buffer is full - * or fifo is empty, which ever occurs first */ - drv_data->read(drv_data); - - /* return count of what was actually read */ - msg->actual_length += drv_data->len - - (drv_data->rx_end - drv_data->rx); - - /* Transfer delays and chip select release are - * handled in pump_transfers or giveback - */ - - /* Move to next transfer */ - msg->state = next_transfer(drv_data); - - /* Schedule transfer tasklet */ - tasklet_schedule(&drv_data->pump_transfers); -} - -static void dma_handler(int channel, void *data) -{ - struct driver_data *drv_data = data; - u32 irq_status = DCSR(channel) & DMA_INT_MASK; - - if (irq_status & DCSR_BUSERR) { - - if (channel == drv_data->tx_channel) - dma_error_stop(drv_data, - "dma_handler: " - "bad bus address on tx channel"); - else - dma_error_stop(drv_data, - "dma_handler: " - "bad bus address on rx channel"); - return; - } - - /* PXA255x_SSP has no timeout interrupt, wait for tailing bytes */ - if ((channel == drv_data->tx_channel) - && (irq_status & DCSR_ENDINTR) - && (drv_data->ssp_type == PXA25x_SSP)) { - - /* Wait for rx to stall */ - if (wait_ssp_rx_stall(drv_data->ioaddr) == 0) - dev_err(&drv_data->pdev->dev, - "dma_handler: ssp rx stall failed\n"); - - /* finish this transfer, start the next */ - dma_transfer_complete(drv_data); - } -} - -static irqreturn_t dma_transfer(struct driver_data *drv_data) -{ - u32 irq_status; - void __iomem *reg = drv_data->ioaddr; - - irq_status = read_SSSR(reg) & drv_data->mask_sr; - if (irq_status & SSSR_ROR) { - dma_error_stop(drv_data, "dma_transfer: fifo overrun"); - return IRQ_HANDLED; - } - - /* Check for false positive timeout */ - if ((irq_status & SSSR_TINT) - && (DCSR(drv_data->tx_channel) & DCSR_RUN)) { - write_SSSR(SSSR_TINT, reg); - return IRQ_HANDLED; - } - - if (irq_status & SSSR_TINT || drv_data->rx == drv_data->rx_end) { - - /* Clear and disable timeout interrupt, do the rest in - * dma_transfer_complete */ - if (!pxa25x_ssp_comp(drv_data)) - write_SSTO(0, reg); - - /* finish this transfer, start the next */ - dma_transfer_complete(drv_data); - - return IRQ_HANDLED; - } - - /* Opps problem detected */ - return IRQ_NONE; -} - -static void reset_sccr1(struct driver_data *drv_data) -{ - void __iomem *reg = drv_data->ioaddr; - struct chip_data *chip = drv_data->cur_chip; - u32 sccr1_reg; - - sccr1_reg = read_SSCR1(reg) & ~drv_data->int_cr1; - sccr1_reg &= ~SSCR1_RFT; - sccr1_reg |= chip->threshold; - write_SSCR1(sccr1_reg, reg); -} - -static void int_error_stop(struct driver_data *drv_data, const char* msg) -{ - void __iomem *reg = drv_data->ioaddr; - - /* Stop and reset SSP */ - write_SSSR_CS(drv_data, drv_data->clear_sr); - reset_sccr1(drv_data); - if (!pxa25x_ssp_comp(drv_data)) - write_SSTO(0, reg); - flush(drv_data); - write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); - - dev_err(&drv_data->pdev->dev, "%s\n", msg); - - drv_data->cur_msg->state = ERROR_STATE; - tasklet_schedule(&drv_data->pump_transfers); -} - -static void int_transfer_complete(struct driver_data *drv_data) -{ - void __iomem *reg = drv_data->ioaddr; - - /* Stop SSP */ - write_SSSR_CS(drv_data, drv_data->clear_sr); - reset_sccr1(drv_data); - if (!pxa25x_ssp_comp(drv_data)) - write_SSTO(0, reg); - - /* Update total byte transferred return count actual bytes read */ - drv_data->cur_msg->actual_length += drv_data->len - - (drv_data->rx_end - drv_data->rx); - - /* Transfer delays and chip select release are - * handled in pump_transfers or giveback - */ - - /* Move to next transfer */ - drv_data->cur_msg->state = next_transfer(drv_data); - - /* Schedule transfer tasklet */ - tasklet_schedule(&drv_data->pump_transfers); -} - -static irqreturn_t interrupt_transfer(struct driver_data *drv_data) -{ - void __iomem *reg = drv_data->ioaddr; - - u32 irq_mask = (read_SSCR1(reg) & SSCR1_TIE) ? - drv_data->mask_sr : drv_data->mask_sr & ~SSSR_TFS; - - u32 irq_status = read_SSSR(reg) & irq_mask; - - if (irq_status & SSSR_ROR) { - int_error_stop(drv_data, "interrupt_transfer: fifo overrun"); - return IRQ_HANDLED; - } - - if (irq_status & SSSR_TINT) { - write_SSSR(SSSR_TINT, reg); - if (drv_data->read(drv_data)) { - int_transfer_complete(drv_data); - return IRQ_HANDLED; - } - } - - /* Drain rx fifo, Fill tx fifo and prevent overruns */ - do { - if (drv_data->read(drv_data)) { - int_transfer_complete(drv_data); - return IRQ_HANDLED; - } - } while (drv_data->write(drv_data)); - - if (drv_data->read(drv_data)) { - int_transfer_complete(drv_data); - return IRQ_HANDLED; - } - - if (drv_data->tx == drv_data->tx_end) { - u32 bytes_left; - u32 sccr1_reg; - - sccr1_reg = read_SSCR1(reg); - sccr1_reg &= ~SSCR1_TIE; - - /* - * PXA25x_SSP has no timeout, set up rx threshould for the - * remaining RX bytes. - */ - if (pxa25x_ssp_comp(drv_data)) { - - sccr1_reg &= ~SSCR1_RFT; - - bytes_left = drv_data->rx_end - drv_data->rx; - switch (drv_data->n_bytes) { - case 4: - bytes_left >>= 1; - case 2: - bytes_left >>= 1; - } - - if (bytes_left > RX_THRESH_DFLT) - bytes_left = RX_THRESH_DFLT; - - sccr1_reg |= SSCR1_RxTresh(bytes_left); - } - write_SSCR1(sccr1_reg, reg); - } - - /* We did something */ - return IRQ_HANDLED; -} - -static irqreturn_t ssp_int(int irq, void *dev_id) -{ - struct driver_data *drv_data = dev_id; - void __iomem *reg = drv_data->ioaddr; - u32 sccr1_reg = read_SSCR1(reg); - u32 mask = drv_data->mask_sr; - u32 status; - - status = read_SSSR(reg); - - /* Ignore possible writes if we don't need to write */ - if (!(sccr1_reg & SSCR1_TIE)) - mask &= ~SSSR_TFS; - - if (!(status & mask)) - return IRQ_NONE; - - if (!drv_data->cur_msg) { - - write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); - write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg); - if (!pxa25x_ssp_comp(drv_data)) - write_SSTO(0, reg); - write_SSSR_CS(drv_data, drv_data->clear_sr); - - dev_err(&drv_data->pdev->dev, "bad message state " - "in interrupt handler\n"); - - /* Never fail */ - return IRQ_HANDLED; - } - - return drv_data->transfer_handler(drv_data); -} - -static int set_dma_burst_and_threshold(struct chip_data *chip, - struct spi_device *spi, - u8 bits_per_word, u32 *burst_code, - u32 *threshold) -{ - struct pxa2xx_spi_chip *chip_info = - (struct pxa2xx_spi_chip *)spi->controller_data; - int bytes_per_word; - int burst_bytes; - int thresh_words; - int req_burst_size; - int retval = 0; - - /* Set the threshold (in registers) to equal the same amount of data - * as represented by burst size (in bytes). The computation below - * is (burst_size rounded up to nearest 8 byte, word or long word) - * divided by (bytes/register); the tx threshold is the inverse of - * the rx, so that there will always be enough data in the rx fifo - * to satisfy a burst, and there will always be enough space in the - * tx fifo to accept a burst (a tx burst will overwrite the fifo if - * there is not enough space), there must always remain enough empty - * space in the rx fifo for any data loaded to the tx fifo. - * Whenever burst_size (in bytes) equals bits/word, the fifo threshold - * will be 8, or half the fifo; - * The threshold can only be set to 2, 4 or 8, but not 16, because - * to burst 16 to the tx fifo, the fifo would have to be empty; - * however, the minimum fifo trigger level is 1, and the tx will - * request service when the fifo is at this level, with only 15 spaces. - */ - - /* find bytes/word */ - if (bits_per_word <= 8) - bytes_per_word = 1; - else if (bits_per_word <= 16) - bytes_per_word = 2; - else - bytes_per_word = 4; - - /* use struct pxa2xx_spi_chip->dma_burst_size if available */ - if (chip_info) - req_burst_size = chip_info->dma_burst_size; - else { - switch (chip->dma_burst_size) { - default: - /* if the default burst size is not set, - * do it now */ - chip->dma_burst_size = DCMD_BURST8; - case DCMD_BURST8: - req_burst_size = 8; - break; - case DCMD_BURST16: - req_burst_size = 16; - break; - case DCMD_BURST32: - req_burst_size = 32; - break; - } - } - if (req_burst_size <= 8) { - *burst_code = DCMD_BURST8; - burst_bytes = 8; - } else if (req_burst_size <= 16) { - if (bytes_per_word == 1) { - /* don't burst more than 1/2 the fifo */ - *burst_code = DCMD_BURST8; - burst_bytes = 8; - retval = 1; - } else { - *burst_code = DCMD_BURST16; - burst_bytes = 16; - } - } else { - if (bytes_per_word == 1) { - /* don't burst more than 1/2 the fifo */ - *burst_code = DCMD_BURST8; - burst_bytes = 8; - retval = 1; - } else if (bytes_per_word == 2) { - /* don't burst more than 1/2 the fifo */ - *burst_code = DCMD_BURST16; - burst_bytes = 16; - retval = 1; - } else { - *burst_code = DCMD_BURST32; - burst_bytes = 32; - } - } - - thresh_words = burst_bytes / bytes_per_word; - - /* thresh_words will be between 2 and 8 */ - *threshold = (SSCR1_RxTresh(thresh_words) & SSCR1_RFT) - | (SSCR1_TxTresh(16-thresh_words) & SSCR1_TFT); - - return retval; -} - -static unsigned int ssp_get_clk_div(struct ssp_device *ssp, int rate) -{ - unsigned long ssp_clk = clk_get_rate(ssp->clk); - - if (ssp->type == PXA25x_SSP || ssp->type == CE4100_SSP) - return ((ssp_clk / (2 * rate) - 1) & 0xff) << 8; - else - return ((ssp_clk / rate - 1) & 0xfff) << 8; -} - -static void pump_transfers(unsigned long data) -{ - struct driver_data *drv_data = (struct driver_data *)data; - struct spi_message *message = NULL; - struct spi_transfer *transfer = NULL; - struct spi_transfer *previous = NULL; - struct chip_data *chip = NULL; - struct ssp_device *ssp = drv_data->ssp; - void __iomem *reg = drv_data->ioaddr; - u32 clk_div = 0; - u8 bits = 0; - u32 speed = 0; - u32 cr0; - u32 cr1; - u32 dma_thresh = drv_data->cur_chip->dma_threshold; - u32 dma_burst = drv_data->cur_chip->dma_burst_size; - - /* Get current state information */ - message = drv_data->cur_msg; - transfer = drv_data->cur_transfer; - chip = drv_data->cur_chip; - - /* Handle for abort */ - if (message->state == ERROR_STATE) { - message->status = -EIO; - giveback(drv_data); - return; - } - - /* Handle end of message */ - if (message->state == DONE_STATE) { - message->status = 0; - giveback(drv_data); - return; - } - - /* Delay if requested at end of transfer before CS change */ - if (message->state == RUNNING_STATE) { - previous = list_entry(transfer->transfer_list.prev, - struct spi_transfer, - transfer_list); - if (previous->delay_usecs) - udelay(previous->delay_usecs); - - /* Drop chip select only if cs_change is requested */ - if (previous->cs_change) - cs_deassert(drv_data); - } - - /* Check for transfers that need multiple DMA segments */ - if (transfer->len > MAX_DMA_LEN && chip->enable_dma) { - - /* reject already-mapped transfers; PIO won't always work */ - if (message->is_dma_mapped - || transfer->rx_dma || transfer->tx_dma) { - dev_err(&drv_data->pdev->dev, - "pump_transfers: mapped transfer length " - "of %u is greater than %d\n", - transfer->len, MAX_DMA_LEN); - message->status = -EINVAL; - giveback(drv_data); - return; - } - - /* warn ... we force this to PIO mode */ - if (printk_ratelimit()) - dev_warn(&message->spi->dev, "pump_transfers: " - "DMA disabled for transfer length %ld " - "greater than %d\n", - (long)drv_data->len, MAX_DMA_LEN); - } - - /* Setup the transfer state based on the type of transfer */ - if (flush(drv_data) == 0) { - dev_err(&drv_data->pdev->dev, "pump_transfers: flush failed\n"); - message->status = -EIO; - giveback(drv_data); - return; - } - drv_data->n_bytes = chip->n_bytes; - drv_data->dma_width = chip->dma_width; - drv_data->tx = (void *)transfer->tx_buf; - drv_data->tx_end = drv_data->tx + transfer->len; - drv_data->rx = transfer->rx_buf; - drv_data->rx_end = drv_data->rx + transfer->len; - drv_data->rx_dma = transfer->rx_dma; - drv_data->tx_dma = transfer->tx_dma; - drv_data->len = transfer->len & DCMD_LENGTH; - drv_data->write = drv_data->tx ? chip->write : null_writer; - drv_data->read = drv_data->rx ? chip->read : null_reader; - - /* Change speed and bit per word on a per transfer */ - cr0 = chip->cr0; - if (transfer->speed_hz || transfer->bits_per_word) { - - bits = chip->bits_per_word; - speed = chip->speed_hz; - - if (transfer->speed_hz) - speed = transfer->speed_hz; - - if (transfer->bits_per_word) - bits = transfer->bits_per_word; - - clk_div = ssp_get_clk_div(ssp, speed); - - if (bits <= 8) { - drv_data->n_bytes = 1; - drv_data->dma_width = DCMD_WIDTH1; - drv_data->read = drv_data->read != null_reader ? - u8_reader : null_reader; - drv_data->write = drv_data->write != null_writer ? - u8_writer : null_writer; - } else if (bits <= 16) { - drv_data->n_bytes = 2; - drv_data->dma_width = DCMD_WIDTH2; - drv_data->read = drv_data->read != null_reader ? - u16_reader : null_reader; - drv_data->write = drv_data->write != null_writer ? - u16_writer : null_writer; - } else if (bits <= 32) { - drv_data->n_bytes = 4; - drv_data->dma_width = DCMD_WIDTH4; - drv_data->read = drv_data->read != null_reader ? - u32_reader : null_reader; - drv_data->write = drv_data->write != null_writer ? - u32_writer : null_writer; - } - /* if bits/word is changed in dma mode, then must check the - * thresholds and burst also */ - if (chip->enable_dma) { - if (set_dma_burst_and_threshold(chip, message->spi, - bits, &dma_burst, - &dma_thresh)) - if (printk_ratelimit()) - dev_warn(&message->spi->dev, - "pump_transfers: " - "DMA burst size reduced to " - "match bits_per_word\n"); - } - - cr0 = clk_div - | SSCR0_Motorola - | SSCR0_DataSize(bits > 16 ? bits - 16 : bits) - | SSCR0_SSE - | (bits > 16 ? SSCR0_EDSS : 0); - } - - message->state = RUNNING_STATE; - - /* Try to map dma buffer and do a dma transfer if successful, but - * only if the length is non-zero and less than MAX_DMA_LEN. - * - * Zero-length non-descriptor DMA is illegal on PXA2xx; force use - * of PIO instead. Care is needed above because the transfer may - * have have been passed with buffers that are already dma mapped. - * A zero-length transfer in PIO mode will not try to write/read - * to/from the buffers - * - * REVISIT large transfers are exactly where we most want to be - * using DMA. If this happens much, split those transfers into - * multiple DMA segments rather than forcing PIO. - */ - drv_data->dma_mapped = 0; - if (drv_data->len > 0 && drv_data->len <= MAX_DMA_LEN) - drv_data->dma_mapped = map_dma_buffers(drv_data); - if (drv_data->dma_mapped) { - - /* Ensure we have the correct interrupt handler */ - drv_data->transfer_handler = dma_transfer; - - /* Setup rx DMA Channel */ - DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; - DSADR(drv_data->rx_channel) = drv_data->ssdr_physical; - DTADR(drv_data->rx_channel) = drv_data->rx_dma; - if (drv_data->rx == drv_data->null_dma_buf) - /* No target address increment */ - DCMD(drv_data->rx_channel) = DCMD_FLOWSRC - | drv_data->dma_width - | dma_burst - | drv_data->len; - else - DCMD(drv_data->rx_channel) = DCMD_INCTRGADDR - | DCMD_FLOWSRC - | drv_data->dma_width - | dma_burst - | drv_data->len; - - /* Setup tx DMA Channel */ - DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; - DSADR(drv_data->tx_channel) = drv_data->tx_dma; - DTADR(drv_data->tx_channel) = drv_data->ssdr_physical; - if (drv_data->tx == drv_data->null_dma_buf) - /* No source address increment */ - DCMD(drv_data->tx_channel) = DCMD_FLOWTRG - | drv_data->dma_width - | dma_burst - | drv_data->len; - else - DCMD(drv_data->tx_channel) = DCMD_INCSRCADDR - | DCMD_FLOWTRG - | drv_data->dma_width - | dma_burst - | drv_data->len; - - /* Enable dma end irqs on SSP to detect end of transfer */ - if (drv_data->ssp_type == PXA25x_SSP) - DCMD(drv_data->tx_channel) |= DCMD_ENDIRQEN; - - /* Clear status and start DMA engine */ - cr1 = chip->cr1 | dma_thresh | drv_data->dma_cr1; - write_SSSR(drv_data->clear_sr, reg); - DCSR(drv_data->rx_channel) |= DCSR_RUN; - DCSR(drv_data->tx_channel) |= DCSR_RUN; - } else { - /* Ensure we have the correct interrupt handler */ - drv_data->transfer_handler = interrupt_transfer; - - /* Clear status */ - cr1 = chip->cr1 | chip->threshold | drv_data->int_cr1; - write_SSSR_CS(drv_data, drv_data->clear_sr); - } - - /* see if we need to reload the config registers */ - if ((read_SSCR0(reg) != cr0) - || (read_SSCR1(reg) & SSCR1_CHANGE_MASK) != - (cr1 & SSCR1_CHANGE_MASK)) { - - /* stop the SSP, and update the other bits */ - write_SSCR0(cr0 & ~SSCR0_SSE, reg); - if (!pxa25x_ssp_comp(drv_data)) - write_SSTO(chip->timeout, reg); - /* first set CR1 without interrupt and service enables */ - write_SSCR1(cr1 & SSCR1_CHANGE_MASK, reg); - /* restart the SSP */ - write_SSCR0(cr0, reg); - - } else { - if (!pxa25x_ssp_comp(drv_data)) - write_SSTO(chip->timeout, reg); - } - - cs_assert(drv_data); - - /* after chip select, release the data by enabling service - * requests and interrupts, without changing any mode bits */ - write_SSCR1(cr1, reg); -} - -static void pump_messages(struct work_struct *work) -{ - struct driver_data *drv_data = - container_of(work, struct driver_data, pump_messages); - unsigned long flags; - - /* Lock queue and check for queue work */ - spin_lock_irqsave(&drv_data->lock, flags); - if (list_empty(&drv_data->queue) || drv_data->run == QUEUE_STOPPED) { - drv_data->busy = 0; - spin_unlock_irqrestore(&drv_data->lock, flags); - return; - } - - /* Make sure we are not already running a message */ - if (drv_data->cur_msg) { - spin_unlock_irqrestore(&drv_data->lock, flags); - return; - } - - /* Extract head of queue */ - drv_data->cur_msg = list_entry(drv_data->queue.next, - struct spi_message, queue); - list_del_init(&drv_data->cur_msg->queue); - - /* Initial message state*/ - drv_data->cur_msg->state = START_STATE; - drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next, - struct spi_transfer, - transfer_list); - - /* prepare to setup the SSP, in pump_transfers, using the per - * chip configuration */ - drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi); - - /* Mark as busy and launch transfers */ - tasklet_schedule(&drv_data->pump_transfers); - - drv_data->busy = 1; - spin_unlock_irqrestore(&drv_data->lock, flags); -} - -static int transfer(struct spi_device *spi, struct spi_message *msg) -{ - struct driver_data *drv_data = spi_master_get_devdata(spi->master); - unsigned long flags; - - spin_lock_irqsave(&drv_data->lock, flags); - - if (drv_data->run == QUEUE_STOPPED) { - spin_unlock_irqrestore(&drv_data->lock, flags); - return -ESHUTDOWN; - } - - msg->actual_length = 0; - msg->status = -EINPROGRESS; - msg->state = START_STATE; - - list_add_tail(&msg->queue, &drv_data->queue); - - if (drv_data->run == QUEUE_RUNNING && !drv_data->busy) - queue_work(drv_data->workqueue, &drv_data->pump_messages); - - spin_unlock_irqrestore(&drv_data->lock, flags); - - return 0; -} - -static int setup_cs(struct spi_device *spi, struct chip_data *chip, - struct pxa2xx_spi_chip *chip_info) -{ - int err = 0; - - if (chip == NULL || chip_info == NULL) - return 0; - - /* NOTE: setup() can be called multiple times, possibly with - * different chip_info, release previously requested GPIO - */ - if (gpio_is_valid(chip->gpio_cs)) - gpio_free(chip->gpio_cs); - - /* If (*cs_control) is provided, ignore GPIO chip select */ - if (chip_info->cs_control) { - chip->cs_control = chip_info->cs_control; - return 0; - } - - if (gpio_is_valid(chip_info->gpio_cs)) { - err = gpio_request(chip_info->gpio_cs, "SPI_CS"); - if (err) { - dev_err(&spi->dev, "failed to request chip select " - "GPIO%d\n", chip_info->gpio_cs); - return err; - } - - chip->gpio_cs = chip_info->gpio_cs; - chip->gpio_cs_inverted = spi->mode & SPI_CS_HIGH; - - err = gpio_direction_output(chip->gpio_cs, - !chip->gpio_cs_inverted); - } - - return err; -} - -static int setup(struct spi_device *spi) -{ - struct pxa2xx_spi_chip *chip_info = NULL; - struct chip_data *chip; - struct driver_data *drv_data = spi_master_get_devdata(spi->master); - struct ssp_device *ssp = drv_data->ssp; - unsigned int clk_div; - uint tx_thres = TX_THRESH_DFLT; - uint rx_thres = RX_THRESH_DFLT; - - if (!pxa25x_ssp_comp(drv_data) - && (spi->bits_per_word < 4 || spi->bits_per_word > 32)) { - dev_err(&spi->dev, "failed setup: ssp_type=%d, bits/wrd=%d " - "b/w not 4-32 for type non-PXA25x_SSP\n", - drv_data->ssp_type, spi->bits_per_word); - return -EINVAL; - } else if (pxa25x_ssp_comp(drv_data) - && (spi->bits_per_word < 4 - || spi->bits_per_word > 16)) { - dev_err(&spi->dev, "failed setup: ssp_type=%d, bits/wrd=%d " - "b/w not 4-16 for type PXA25x_SSP\n", - drv_data->ssp_type, spi->bits_per_word); - return -EINVAL; - } - - /* Only alloc on first setup */ - chip = spi_get_ctldata(spi); - if (!chip) { - chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); - if (!chip) { - dev_err(&spi->dev, - "failed setup: can't allocate chip data\n"); - return -ENOMEM; - } - - if (drv_data->ssp_type == CE4100_SSP) { - if (spi->chip_select > 4) { - dev_err(&spi->dev, "failed setup: " - "cs number must not be > 4.\n"); - kfree(chip); - return -EINVAL; - } - - chip->frm = spi->chip_select; - } else - chip->gpio_cs = -1; - chip->enable_dma = 0; - chip->timeout = TIMOUT_DFLT; - chip->dma_burst_size = drv_data->master_info->enable_dma ? - DCMD_BURST8 : 0; - } - - /* protocol drivers may change the chip settings, so... - * if chip_info exists, use it */ - chip_info = spi->controller_data; - - /* chip_info isn't always needed */ - chip->cr1 = 0; - if (chip_info) { - if (chip_info->timeout) - chip->timeout = chip_info->timeout; - if (chip_info->tx_threshold) - tx_thres = chip_info->tx_threshold; - if (chip_info->rx_threshold) - rx_thres = chip_info->rx_threshold; - chip->enable_dma = drv_data->master_info->enable_dma; - chip->dma_threshold = 0; - if (chip_info->enable_loopback) - chip->cr1 = SSCR1_LBM; - } - - chip->threshold = (SSCR1_RxTresh(rx_thres) & SSCR1_RFT) | - (SSCR1_TxTresh(tx_thres) & SSCR1_TFT); - - /* set dma burst and threshold outside of chip_info path so that if - * chip_info goes away after setting chip->enable_dma, the - * burst and threshold can still respond to changes in bits_per_word */ - if (chip->enable_dma) { - /* set up legal burst and threshold for dma */ - if (set_dma_burst_and_threshold(chip, spi, spi->bits_per_word, - &chip->dma_burst_size, - &chip->dma_threshold)) { - dev_warn(&spi->dev, "in setup: DMA burst size reduced " - "to match bits_per_word\n"); - } - } - - clk_div = ssp_get_clk_div(ssp, spi->max_speed_hz); - chip->speed_hz = spi->max_speed_hz; - - chip->cr0 = clk_div - | SSCR0_Motorola - | SSCR0_DataSize(spi->bits_per_word > 16 ? - spi->bits_per_word - 16 : spi->bits_per_word) - | SSCR0_SSE - | (spi->bits_per_word > 16 ? SSCR0_EDSS : 0); - chip->cr1 &= ~(SSCR1_SPO | SSCR1_SPH); - chip->cr1 |= (((spi->mode & SPI_CPHA) != 0) ? SSCR1_SPH : 0) - | (((spi->mode & SPI_CPOL) != 0) ? SSCR1_SPO : 0); - - /* NOTE: PXA25x_SSP _could_ use external clocking ... */ - if (!pxa25x_ssp_comp(drv_data)) - dev_dbg(&spi->dev, "%ld Hz actual, %s\n", - clk_get_rate(ssp->clk) - / (1 + ((chip->cr0 & SSCR0_SCR(0xfff)) >> 8)), - chip->enable_dma ? "DMA" : "PIO"); - else - dev_dbg(&spi->dev, "%ld Hz actual, %s\n", - clk_get_rate(ssp->clk) / 2 - / (1 + ((chip->cr0 & SSCR0_SCR(0x0ff)) >> 8)), - chip->enable_dma ? "DMA" : "PIO"); - - if (spi->bits_per_word <= 8) { - chip->n_bytes = 1; - chip->dma_width = DCMD_WIDTH1; - chip->read = u8_reader; - chip->write = u8_writer; - } else if (spi->bits_per_word <= 16) { - chip->n_bytes = 2; - chip->dma_width = DCMD_WIDTH2; - chip->read = u16_reader; - chip->write = u16_writer; - } else if (spi->bits_per_word <= 32) { - chip->cr0 |= SSCR0_EDSS; - chip->n_bytes = 4; - chip->dma_width = DCMD_WIDTH4; - chip->read = u32_reader; - chip->write = u32_writer; - } else { - dev_err(&spi->dev, "invalid wordsize\n"); - return -ENODEV; - } - chip->bits_per_word = spi->bits_per_word; - - spi_set_ctldata(spi, chip); - - if (drv_data->ssp_type == CE4100_SSP) - return 0; - - return setup_cs(spi, chip, chip_info); -} - -static void cleanup(struct spi_device *spi) -{ - struct chip_data *chip = spi_get_ctldata(spi); - struct driver_data *drv_data = spi_master_get_devdata(spi->master); - - if (!chip) - return; - - if (drv_data->ssp_type != CE4100_SSP && gpio_is_valid(chip->gpio_cs)) - gpio_free(chip->gpio_cs); - - kfree(chip); -} - -static int __devinit init_queue(struct driver_data *drv_data) -{ - INIT_LIST_HEAD(&drv_data->queue); - spin_lock_init(&drv_data->lock); - - drv_data->run = QUEUE_STOPPED; - drv_data->busy = 0; - - tasklet_init(&drv_data->pump_transfers, - pump_transfers, (unsigned long)drv_data); - - INIT_WORK(&drv_data->pump_messages, pump_messages); - drv_data->workqueue = create_singlethread_workqueue( - dev_name(drv_data->master->dev.parent)); - if (drv_data->workqueue == NULL) - return -EBUSY; - - return 0; -} - -static int start_queue(struct driver_data *drv_data) -{ - unsigned long flags; - - spin_lock_irqsave(&drv_data->lock, flags); - - if (drv_data->run == QUEUE_RUNNING || drv_data->busy) { - spin_unlock_irqrestore(&drv_data->lock, flags); - return -EBUSY; - } - - drv_data->run = QUEUE_RUNNING; - drv_data->cur_msg = NULL; - drv_data->cur_transfer = NULL; - drv_data->cur_chip = NULL; - spin_unlock_irqrestore(&drv_data->lock, flags); - - queue_work(drv_data->workqueue, &drv_data->pump_messages); - - return 0; -} - -static int stop_queue(struct driver_data *drv_data) -{ - unsigned long flags; - unsigned limit = 500; - int status = 0; - - spin_lock_irqsave(&drv_data->lock, flags); - - /* This is a bit lame, but is optimized for the common execution path. - * A wait_queue on the drv_data->busy could be used, but then the common - * execution path (pump_messages) would be required to call wake_up or - * friends on every SPI message. Do this instead */ - drv_data->run = QUEUE_STOPPED; - while ((!list_empty(&drv_data->queue) || drv_data->busy) && limit--) { - spin_unlock_irqrestore(&drv_data->lock, flags); - msleep(10); - spin_lock_irqsave(&drv_data->lock, flags); - } - - if (!list_empty(&drv_data->queue) || drv_data->busy) - status = -EBUSY; - - spin_unlock_irqrestore(&drv_data->lock, flags); - - return status; -} - -static int destroy_queue(struct driver_data *drv_data) -{ - int status; - - status = stop_queue(drv_data); - /* we are unloading the module or failing to load (only two calls - * to this routine), and neither call can handle a return value. - * However, destroy_workqueue calls flush_workqueue, and that will - * block until all work is done. If the reason that stop_queue - * timed out is that the work will never finish, then it does no - * good to call destroy_workqueue, so return anyway. */ - if (status != 0) - return status; - - destroy_workqueue(drv_data->workqueue); - - return 0; -} - -static int __devinit pxa2xx_spi_probe(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct pxa2xx_spi_master *platform_info; - struct spi_master *master; - struct driver_data *drv_data; - struct ssp_device *ssp; - int status; - - platform_info = dev->platform_data; - - ssp = pxa_ssp_request(pdev->id, pdev->name); - if (ssp == NULL) { - dev_err(&pdev->dev, "failed to request SSP%d\n", pdev->id); - return -ENODEV; - } - - /* Allocate master with space for drv_data and null dma buffer */ - master = spi_alloc_master(dev, sizeof(struct driver_data) + 16); - if (!master) { - dev_err(&pdev->dev, "cannot alloc spi_master\n"); - pxa_ssp_free(ssp); - return -ENOMEM; - } - drv_data = spi_master_get_devdata(master); - drv_data->master = master; - drv_data->master_info = platform_info; - drv_data->pdev = pdev; - drv_data->ssp = ssp; - - master->dev.parent = &pdev->dev; - master->dev.of_node = pdev->dev.of_node; - /* the spi->mode bits understood by this driver: */ - master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; - - master->bus_num = pdev->id; - master->num_chipselect = platform_info->num_chipselect; - master->dma_alignment = DMA_ALIGNMENT; - master->cleanup = cleanup; - master->setup = setup; - master->transfer = transfer; - - drv_data->ssp_type = ssp->type; - drv_data->null_dma_buf = (u32 *)ALIGN((u32)(drv_data + - sizeof(struct driver_data)), 8); - - drv_data->ioaddr = ssp->mmio_base; - drv_data->ssdr_physical = ssp->phys_base + SSDR; - if (pxa25x_ssp_comp(drv_data)) { - drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE; - drv_data->dma_cr1 = 0; - drv_data->clear_sr = SSSR_ROR; - drv_data->mask_sr = SSSR_RFS | SSSR_TFS | SSSR_ROR; - } else { - drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE | SSCR1_TINTE; - drv_data->dma_cr1 = SSCR1_TSRE | SSCR1_RSRE | SSCR1_TINTE; - drv_data->clear_sr = SSSR_ROR | SSSR_TINT; - drv_data->mask_sr = SSSR_TINT | SSSR_RFS | SSSR_TFS | SSSR_ROR; - } - - status = request_irq(ssp->irq, ssp_int, IRQF_SHARED, dev_name(dev), - drv_data); - if (status < 0) { - dev_err(&pdev->dev, "cannot get IRQ %d\n", ssp->irq); - goto out_error_master_alloc; - } - - /* Setup DMA if requested */ - drv_data->tx_channel = -1; - drv_data->rx_channel = -1; - if (platform_info->enable_dma) { - - /* Get two DMA channels (rx and tx) */ - drv_data->rx_channel = pxa_request_dma("pxa2xx_spi_ssp_rx", - DMA_PRIO_HIGH, - dma_handler, - drv_data); - if (drv_data->rx_channel < 0) { - dev_err(dev, "problem (%d) requesting rx channel\n", - drv_data->rx_channel); - status = -ENODEV; - goto out_error_irq_alloc; - } - drv_data->tx_channel = pxa_request_dma("pxa2xx_spi_ssp_tx", - DMA_PRIO_MEDIUM, - dma_handler, - drv_data); - if (drv_data->tx_channel < 0) { - dev_err(dev, "problem (%d) requesting tx channel\n", - drv_data->tx_channel); - status = -ENODEV; - goto out_error_dma_alloc; - } - - DRCMR(ssp->drcmr_rx) = DRCMR_MAPVLD | drv_data->rx_channel; - DRCMR(ssp->drcmr_tx) = DRCMR_MAPVLD | drv_data->tx_channel; - } - - /* Enable SOC clock */ - clk_enable(ssp->clk); - - /* Load default SSP configuration */ - write_SSCR0(0, drv_data->ioaddr); - write_SSCR1(SSCR1_RxTresh(RX_THRESH_DFLT) | - SSCR1_TxTresh(TX_THRESH_DFLT), - drv_data->ioaddr); - write_SSCR0(SSCR0_SCR(2) - | SSCR0_Motorola - | SSCR0_DataSize(8), - drv_data->ioaddr); - if (!pxa25x_ssp_comp(drv_data)) - write_SSTO(0, drv_data->ioaddr); - write_SSPSP(0, drv_data->ioaddr); - - /* Initial and start queue */ - status = init_queue(drv_data); - if (status != 0) { - dev_err(&pdev->dev, "problem initializing queue\n"); - goto out_error_clock_enabled; - } - status = start_queue(drv_data); - if (status != 0) { - dev_err(&pdev->dev, "problem starting queue\n"); - goto out_error_clock_enabled; - } - - /* Register with the SPI framework */ - platform_set_drvdata(pdev, drv_data); - status = spi_register_master(master); - if (status != 0) { - dev_err(&pdev->dev, "problem registering spi master\n"); - goto out_error_queue_alloc; - } - - return status; - -out_error_queue_alloc: - destroy_queue(drv_data); - -out_error_clock_enabled: - clk_disable(ssp->clk); - -out_error_dma_alloc: - if (drv_data->tx_channel != -1) - pxa_free_dma(drv_data->tx_channel); - if (drv_data->rx_channel != -1) - pxa_free_dma(drv_data->rx_channel); - -out_error_irq_alloc: - free_irq(ssp->irq, drv_data); - -out_error_master_alloc: - spi_master_put(master); - pxa_ssp_free(ssp); - return status; -} - -static int pxa2xx_spi_remove(struct platform_device *pdev) -{ - struct driver_data *drv_data = platform_get_drvdata(pdev); - struct ssp_device *ssp; - int status = 0; - - if (!drv_data) - return 0; - ssp = drv_data->ssp; - - /* Remove the queue */ - status = destroy_queue(drv_data); - if (status != 0) - /* the kernel does not check the return status of this - * this routine (mod->exit, within the kernel). Therefore - * nothing is gained by returning from here, the module is - * going away regardless, and we should not leave any more - * resources allocated than necessary. We cannot free the - * message memory in drv_data->queue, but we can release the - * resources below. I think the kernel should honor -EBUSY - * returns but... */ - dev_err(&pdev->dev, "pxa2xx_spi_remove: workqueue will not " - "complete, message memory not freed\n"); - - /* Disable the SSP at the peripheral and SOC level */ - write_SSCR0(0, drv_data->ioaddr); - clk_disable(ssp->clk); - - /* Release DMA */ - if (drv_data->master_info->enable_dma) { - DRCMR(ssp->drcmr_rx) = 0; - DRCMR(ssp->drcmr_tx) = 0; - pxa_free_dma(drv_data->tx_channel); - pxa_free_dma(drv_data->rx_channel); - } - - /* Release IRQ */ - free_irq(ssp->irq, drv_data); - - /* Release SSP */ - pxa_ssp_free(ssp); - - /* Disconnect from the SPI framework */ - spi_unregister_master(drv_data->master); - - /* Prevent double remove */ - platform_set_drvdata(pdev, NULL); - - return 0; -} - -static void pxa2xx_spi_shutdown(struct platform_device *pdev) -{ - int status = 0; - - if ((status = pxa2xx_spi_remove(pdev)) != 0) - dev_err(&pdev->dev, "shutdown failed with %d\n", status); -} - -#ifdef CONFIG_PM -static int pxa2xx_spi_suspend(struct device *dev) -{ - struct driver_data *drv_data = dev_get_drvdata(dev); - struct ssp_device *ssp = drv_data->ssp; - int status = 0; - - status = stop_queue(drv_data); - if (status != 0) - return status; - write_SSCR0(0, drv_data->ioaddr); - clk_disable(ssp->clk); - - return 0; -} - -static int pxa2xx_spi_resume(struct device *dev) -{ - struct driver_data *drv_data = dev_get_drvdata(dev); - struct ssp_device *ssp = drv_data->ssp; - int status = 0; - - if (drv_data->rx_channel != -1) - DRCMR(drv_data->ssp->drcmr_rx) = - DRCMR_MAPVLD | drv_data->rx_channel; - if (drv_data->tx_channel != -1) - DRCMR(drv_data->ssp->drcmr_tx) = - DRCMR_MAPVLD | drv_data->tx_channel; - - /* Enable the SSP clock */ - clk_enable(ssp->clk); - - /* Start the queue running */ - status = start_queue(drv_data); - if (status != 0) { - dev_err(dev, "problem starting queue (%d)\n", status); - return status; - } - - return 0; -} - -static const struct dev_pm_ops pxa2xx_spi_pm_ops = { - .suspend = pxa2xx_spi_suspend, - .resume = pxa2xx_spi_resume, -}; -#endif - -static struct platform_driver driver = { - .driver = { - .name = "pxa2xx-spi", - .owner = THIS_MODULE, -#ifdef CONFIG_PM - .pm = &pxa2xx_spi_pm_ops, -#endif - }, - .probe = pxa2xx_spi_probe, - .remove = pxa2xx_spi_remove, - .shutdown = pxa2xx_spi_shutdown, -}; - -static int __init pxa2xx_spi_init(void) -{ - return platform_driver_register(&driver); -} -subsys_initcall(pxa2xx_spi_init); - -static void __exit pxa2xx_spi_exit(void) -{ - platform_driver_unregister(&driver); -} -module_exit(pxa2xx_spi_exit); diff --git a/drivers/spi/pxa2xx_spi_pci.c b/drivers/spi/pxa2xx_spi_pci.c deleted file mode 100644 index 378e504..0000000 --- a/drivers/spi/pxa2xx_spi_pci.c +++ /dev/null @@ -1,180 +0,0 @@ -/* - * CE4100's SPI device is more or less the same one as found on PXA - * - */ -#include -#include -#include -#include - -struct ce4100_info { - struct ssp_device ssp; - struct platform_device *spi_pdev; -}; - -static DEFINE_MUTEX(ssp_lock); -static LIST_HEAD(ssp_list); - -struct ssp_device *pxa_ssp_request(int port, const char *label) -{ - struct ssp_device *ssp = NULL; - - mutex_lock(&ssp_lock); - - list_for_each_entry(ssp, &ssp_list, node) { - if (ssp->port_id == port && ssp->use_count == 0) { - ssp->use_count++; - ssp->label = label; - break; - } - } - - mutex_unlock(&ssp_lock); - - if (&ssp->node == &ssp_list) - return NULL; - - return ssp; -} -EXPORT_SYMBOL_GPL(pxa_ssp_request); - -void pxa_ssp_free(struct ssp_device *ssp) -{ - mutex_lock(&ssp_lock); - if (ssp->use_count) { - ssp->use_count--; - ssp->label = NULL; - } else - dev_err(&ssp->pdev->dev, "device already free\n"); - mutex_unlock(&ssp_lock); -} -EXPORT_SYMBOL_GPL(pxa_ssp_free); - -static int __devinit ce4100_spi_probe(struct pci_dev *dev, - const struct pci_device_id *ent) -{ - int ret; - resource_size_t phys_beg; - resource_size_t phys_len; - struct ce4100_info *spi_info; - struct platform_device *pdev; - struct pxa2xx_spi_master spi_pdata; - struct ssp_device *ssp; - - ret = pci_enable_device(dev); - if (ret) - return ret; - - phys_beg = pci_resource_start(dev, 0); - phys_len = pci_resource_len(dev, 0); - - if (!request_mem_region(phys_beg, phys_len, - "CE4100 SPI")) { - dev_err(&dev->dev, "Can't request register space.\n"); - ret = -EBUSY; - return ret; - } - - pdev = platform_device_alloc("pxa2xx-spi", dev->devfn); - spi_info = kzalloc(sizeof(*spi_info), GFP_KERNEL); - if (!pdev || !spi_info ) { - ret = -ENOMEM; - goto err_nomem; - } - memset(&spi_pdata, 0, sizeof(spi_pdata)); - spi_pdata.num_chipselect = dev->devfn; - - ret = platform_device_add_data(pdev, &spi_pdata, sizeof(spi_pdata)); - if (ret) - goto err_nomem; - - pdev->dev.parent = &dev->dev; - pdev->dev.of_node = dev->dev.of_node; - ssp = &spi_info->ssp; - ssp->phys_base = pci_resource_start(dev, 0); - ssp->mmio_base = ioremap(phys_beg, phys_len); - if (!ssp->mmio_base) { - dev_err(&pdev->dev, "failed to ioremap() registers\n"); - ret = -EIO; - goto err_nomem; - } - ssp->irq = dev->irq; - ssp->port_id = pdev->id; - ssp->type = PXA25x_SSP; - - mutex_lock(&ssp_lock); - list_add(&ssp->node, &ssp_list); - mutex_unlock(&ssp_lock); - - pci_set_drvdata(dev, spi_info); - - ret = platform_device_add(pdev); - if (ret) - goto err_dev_add; - - return ret; - -err_dev_add: - pci_set_drvdata(dev, NULL); - mutex_lock(&ssp_lock); - list_del(&ssp->node); - mutex_unlock(&ssp_lock); - iounmap(ssp->mmio_base); - -err_nomem: - release_mem_region(phys_beg, phys_len); - platform_device_put(pdev); - kfree(spi_info); - return ret; -} - -static void __devexit ce4100_spi_remove(struct pci_dev *dev) -{ - struct ce4100_info *spi_info; - struct ssp_device *ssp; - - spi_info = pci_get_drvdata(dev); - ssp = &spi_info->ssp; - platform_device_unregister(spi_info->spi_pdev); - - iounmap(ssp->mmio_base); - release_mem_region(pci_resource_start(dev, 0), - pci_resource_len(dev, 0)); - - mutex_lock(&ssp_lock); - list_del(&ssp->node); - mutex_unlock(&ssp_lock); - - pci_set_drvdata(dev, NULL); - pci_disable_device(dev); - kfree(spi_info); -} - -static struct pci_device_id ce4100_spi_devices[] __devinitdata = { - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2e6a) }, - { }, -}; -MODULE_DEVICE_TABLE(pci, ce4100_spi_devices); - -static struct pci_driver ce4100_spi_driver = { - .name = "ce4100_spi", - .id_table = ce4100_spi_devices, - .probe = ce4100_spi_probe, - .remove = __devexit_p(ce4100_spi_remove), -}; - -static int __init ce4100_spi_init(void) -{ - return pci_register_driver(&ce4100_spi_driver); -} -module_init(ce4100_spi_init); - -static void __exit ce4100_spi_exit(void) -{ - pci_unregister_driver(&ce4100_spi_driver); -} -module_exit(ce4100_spi_exit); - -MODULE_DESCRIPTION("CE4100 PCI-SPI glue code for PXA's driver"); -MODULE_LICENSE("GPL v2"); -MODULE_AUTHOR("Sebastian Andrzej Siewior "); diff --git a/drivers/spi/spi-altera.c b/drivers/spi/spi-altera.c new file mode 100644 index 0000000..4813a63 --- /dev/null +++ b/drivers/spi/spi-altera.c @@ -0,0 +1,339 @@ +/* + * Altera SPI driver + * + * Copyright (C) 2008 Thomas Chou + * + * Based on spi_s3c24xx.c, which is: + * Copyright (c) 2006 Ben Dooks + * Copyright (c) 2006 Simtec Electronics + * Ben Dooks + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRV_NAME "spi_altera" + +#define ALTERA_SPI_RXDATA 0 +#define ALTERA_SPI_TXDATA 4 +#define ALTERA_SPI_STATUS 8 +#define ALTERA_SPI_CONTROL 12 +#define ALTERA_SPI_SLAVE_SEL 20 + +#define ALTERA_SPI_STATUS_ROE_MSK 0x8 +#define ALTERA_SPI_STATUS_TOE_MSK 0x10 +#define ALTERA_SPI_STATUS_TMT_MSK 0x20 +#define ALTERA_SPI_STATUS_TRDY_MSK 0x40 +#define ALTERA_SPI_STATUS_RRDY_MSK 0x80 +#define ALTERA_SPI_STATUS_E_MSK 0x100 + +#define ALTERA_SPI_CONTROL_IROE_MSK 0x8 +#define ALTERA_SPI_CONTROL_ITOE_MSK 0x10 +#define ALTERA_SPI_CONTROL_ITRDY_MSK 0x40 +#define ALTERA_SPI_CONTROL_IRRDY_MSK 0x80 +#define ALTERA_SPI_CONTROL_IE_MSK 0x100 +#define ALTERA_SPI_CONTROL_SSO_MSK 0x400 + +struct altera_spi { + /* bitbang has to be first */ + struct spi_bitbang bitbang; + struct completion done; + + void __iomem *base; + int irq; + int len; + int count; + int bytes_per_word; + unsigned long imr; + + /* data buffers */ + const unsigned char *tx; + unsigned char *rx; +}; + +static inline struct altera_spi *altera_spi_to_hw(struct spi_device *sdev) +{ + return spi_master_get_devdata(sdev->master); +} + +static void altera_spi_chipsel(struct spi_device *spi, int value) +{ + struct altera_spi *hw = altera_spi_to_hw(spi); + + if (spi->mode & SPI_CS_HIGH) { + switch (value) { + case BITBANG_CS_INACTIVE: + writel(1 << spi->chip_select, + hw->base + ALTERA_SPI_SLAVE_SEL); + hw->imr |= ALTERA_SPI_CONTROL_SSO_MSK; + writel(hw->imr, hw->base + ALTERA_SPI_CONTROL); + break; + + case BITBANG_CS_ACTIVE: + hw->imr &= ~ALTERA_SPI_CONTROL_SSO_MSK; + writel(hw->imr, hw->base + ALTERA_SPI_CONTROL); + writel(0, hw->base + ALTERA_SPI_SLAVE_SEL); + break; + } + } else { + switch (value) { + case BITBANG_CS_INACTIVE: + hw->imr &= ~ALTERA_SPI_CONTROL_SSO_MSK; + writel(hw->imr, hw->base + ALTERA_SPI_CONTROL); + break; + + case BITBANG_CS_ACTIVE: + writel(1 << spi->chip_select, + hw->base + ALTERA_SPI_SLAVE_SEL); + hw->imr |= ALTERA_SPI_CONTROL_SSO_MSK; + writel(hw->imr, hw->base + ALTERA_SPI_CONTROL); + break; + } + } +} + +static int altera_spi_setupxfer(struct spi_device *spi, struct spi_transfer *t) +{ + return 0; +} + +static int altera_spi_setup(struct spi_device *spi) +{ + return 0; +} + +static inline unsigned int hw_txbyte(struct altera_spi *hw, int count) +{ + if (hw->tx) { + switch (hw->bytes_per_word) { + case 1: + return hw->tx[count]; + case 2: + return (hw->tx[count * 2] + | (hw->tx[count * 2 + 1] << 8)); + } + } + return 0; +} + +static int altera_spi_txrx(struct spi_device *spi, struct spi_transfer *t) +{ + struct altera_spi *hw = altera_spi_to_hw(spi); + + hw->tx = t->tx_buf; + hw->rx = t->rx_buf; + hw->count = 0; + hw->bytes_per_word = (t->bits_per_word ? : spi->bits_per_word) / 8; + hw->len = t->len / hw->bytes_per_word; + + if (hw->irq >= 0) { + /* enable receive interrupt */ + hw->imr |= ALTERA_SPI_CONTROL_IRRDY_MSK; + writel(hw->imr, hw->base + ALTERA_SPI_CONTROL); + + /* send the first byte */ + writel(hw_txbyte(hw, 0), hw->base + ALTERA_SPI_TXDATA); + + wait_for_completion(&hw->done); + /* disable receive interrupt */ + hw->imr &= ~ALTERA_SPI_CONTROL_IRRDY_MSK; + writel(hw->imr, hw->base + ALTERA_SPI_CONTROL); + } else { + /* send the first byte */ + writel(hw_txbyte(hw, 0), hw->base + ALTERA_SPI_TXDATA); + + while (1) { + unsigned int rxd; + + while (!(readl(hw->base + ALTERA_SPI_STATUS) & + ALTERA_SPI_STATUS_RRDY_MSK)) + cpu_relax(); + + rxd = readl(hw->base + ALTERA_SPI_RXDATA); + if (hw->rx) { + switch (hw->bytes_per_word) { + case 1: + hw->rx[hw->count] = rxd; + break; + case 2: + hw->rx[hw->count * 2] = rxd; + hw->rx[hw->count * 2 + 1] = rxd >> 8; + break; + } + } + + hw->count++; + + if (hw->count < hw->len) + writel(hw_txbyte(hw, hw->count), + hw->base + ALTERA_SPI_TXDATA); + else + break; + } + + } + + return hw->count * hw->bytes_per_word; +} + +static irqreturn_t altera_spi_irq(int irq, void *dev) +{ + struct altera_spi *hw = dev; + unsigned int rxd; + + rxd = readl(hw->base + ALTERA_SPI_RXDATA); + if (hw->rx) { + switch (hw->bytes_per_word) { + case 1: + hw->rx[hw->count] = rxd; + break; + case 2: + hw->rx[hw->count * 2] = rxd; + hw->rx[hw->count * 2 + 1] = rxd >> 8; + break; + } + } + + hw->count++; + + if (hw->count < hw->len) + writel(hw_txbyte(hw, hw->count), hw->base + ALTERA_SPI_TXDATA); + else + complete(&hw->done); + + return IRQ_HANDLED; +} + +static int __devinit altera_spi_probe(struct platform_device *pdev) +{ + struct altera_spi_platform_data *platp = pdev->dev.platform_data; + struct altera_spi *hw; + struct spi_master *master; + struct resource *res; + int err = -ENODEV; + + master = spi_alloc_master(&pdev->dev, sizeof(struct altera_spi)); + if (!master) + return err; + + /* setup the master state. */ + master->bus_num = pdev->id; + master->num_chipselect = 16; + master->mode_bits = SPI_CS_HIGH; + master->setup = altera_spi_setup; + + hw = spi_master_get_devdata(master); + platform_set_drvdata(pdev, hw); + + /* setup the state for the bitbang driver */ + hw->bitbang.master = spi_master_get(master); + if (!hw->bitbang.master) + return err; + hw->bitbang.setup_transfer = altera_spi_setupxfer; + hw->bitbang.chipselect = altera_spi_chipsel; + hw->bitbang.txrx_bufs = altera_spi_txrx; + + /* find and map our resources */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + goto exit_busy; + if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res), + pdev->name)) + goto exit_busy; + hw->base = devm_ioremap_nocache(&pdev->dev, res->start, + resource_size(res)); + if (!hw->base) + goto exit_busy; + /* program defaults into the registers */ + hw->imr = 0; /* disable spi interrupts */ + writel(hw->imr, hw->base + ALTERA_SPI_CONTROL); + writel(0, hw->base + ALTERA_SPI_STATUS); /* clear status reg */ + if (readl(hw->base + ALTERA_SPI_STATUS) & ALTERA_SPI_STATUS_RRDY_MSK) + readl(hw->base + ALTERA_SPI_RXDATA); /* flush rxdata */ + /* irq is optional */ + hw->irq = platform_get_irq(pdev, 0); + if (hw->irq >= 0) { + init_completion(&hw->done); + err = devm_request_irq(&pdev->dev, hw->irq, altera_spi_irq, 0, + pdev->name, hw); + if (err) + goto exit; + } + /* find platform data */ + if (!platp) + hw->bitbang.master->dev.of_node = pdev->dev.of_node; + + /* register our spi controller */ + err = spi_bitbang_start(&hw->bitbang); + if (err) + goto exit; + dev_info(&pdev->dev, "base %p, irq %d\n", hw->base, hw->irq); + + return 0; + +exit_busy: + err = -EBUSY; +exit: + platform_set_drvdata(pdev, NULL); + spi_master_put(master); + return err; +} + +static int __devexit altera_spi_remove(struct platform_device *dev) +{ + struct altera_spi *hw = platform_get_drvdata(dev); + struct spi_master *master = hw->bitbang.master; + + spi_bitbang_stop(&hw->bitbang); + platform_set_drvdata(dev, NULL); + spi_master_put(master); + return 0; +} + +#ifdef CONFIG_OF +static const struct of_device_id altera_spi_match[] = { + { .compatible = "ALTR,spi-1.0", }, + {}, +}; +MODULE_DEVICE_TABLE(of, altera_spi_match); +#else /* CONFIG_OF */ +#define altera_spi_match NULL +#endif /* CONFIG_OF */ + +static struct platform_driver altera_spi_driver = { + .probe = altera_spi_probe, + .remove = __devexit_p(altera_spi_remove), + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + .pm = NULL, + .of_match_table = altera_spi_match, + }, +}; + +static int __init altera_spi_init(void) +{ + return platform_driver_register(&altera_spi_driver); +} +module_init(altera_spi_init); + +static void __exit altera_spi_exit(void) +{ + platform_driver_unregister(&altera_spi_driver); +} +module_exit(altera_spi_exit); + +MODULE_DESCRIPTION("Altera SPI driver"); +MODULE_AUTHOR("Thomas Chou "); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:" DRV_NAME); diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c new file mode 100644 index 0000000..fcff810 --- /dev/null +++ b/drivers/spi/spi-ath79.c @@ -0,0 +1,292 @@ +/* + * SPI controller driver for the Atheros AR71XX/AR724X/AR913X SoCs + * + * Copyright (C) 2009-2011 Gabor Juhos + * + * This driver has been based on the spi-gpio.c: + * Copyright (C) 2006,2008 David Brownell + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#define DRV_NAME "ath79-spi" + +struct ath79_spi { + struct spi_bitbang bitbang; + u32 ioc_base; + u32 reg_ctrl; + void __iomem *base; +}; + +static inline u32 ath79_spi_rr(struct ath79_spi *sp, unsigned reg) +{ + return ioread32(sp->base + reg); +} + +static inline void ath79_spi_wr(struct ath79_spi *sp, unsigned reg, u32 val) +{ + iowrite32(val, sp->base + reg); +} + +static inline struct ath79_spi *ath79_spidev_to_sp(struct spi_device *spi) +{ + return spi_master_get_devdata(spi->master); +} + +static void ath79_spi_chipselect(struct spi_device *spi, int is_active) +{ + struct ath79_spi *sp = ath79_spidev_to_sp(spi); + int cs_high = (spi->mode & SPI_CS_HIGH) ? is_active : !is_active; + + if (is_active) { + /* set initial clock polarity */ + if (spi->mode & SPI_CPOL) + sp->ioc_base |= AR71XX_SPI_IOC_CLK; + else + sp->ioc_base &= ~AR71XX_SPI_IOC_CLK; + + ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base); + } + + if (spi->chip_select) { + struct ath79_spi_controller_data *cdata = spi->controller_data; + + /* SPI is normally active-low */ + gpio_set_value(cdata->gpio, cs_high); + } else { + if (cs_high) + sp->ioc_base |= AR71XX_SPI_IOC_CS0; + else + sp->ioc_base &= ~AR71XX_SPI_IOC_CS0; + + ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base); + } + +} + +static int ath79_spi_setup_cs(struct spi_device *spi) +{ + struct ath79_spi *sp = ath79_spidev_to_sp(spi); + struct ath79_spi_controller_data *cdata; + + cdata = spi->controller_data; + if (spi->chip_select && !cdata) + return -EINVAL; + + /* enable GPIO mode */ + ath79_spi_wr(sp, AR71XX_SPI_REG_FS, AR71XX_SPI_FS_GPIO); + + /* save CTRL register */ + sp->reg_ctrl = ath79_spi_rr(sp, AR71XX_SPI_REG_CTRL); + sp->ioc_base = ath79_spi_rr(sp, AR71XX_SPI_REG_IOC); + + /* TODO: setup speed? */ + ath79_spi_wr(sp, AR71XX_SPI_REG_CTRL, 0x43); + + if (spi->chip_select) { + int status = 0; + + status = gpio_request(cdata->gpio, dev_name(&spi->dev)); + if (status) + return status; + + status = gpio_direction_output(cdata->gpio, + spi->mode & SPI_CS_HIGH); + if (status) { + gpio_free(cdata->gpio); + return status; + } + } else { + if (spi->mode & SPI_CS_HIGH) + sp->ioc_base |= AR71XX_SPI_IOC_CS0; + else + sp->ioc_base &= ~AR71XX_SPI_IOC_CS0; + ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base); + } + + return 0; +} + +static void ath79_spi_cleanup_cs(struct spi_device *spi) +{ + struct ath79_spi *sp = ath79_spidev_to_sp(spi); + + if (spi->chip_select) { + struct ath79_spi_controller_data *cdata = spi->controller_data; + gpio_free(cdata->gpio); + } + + /* restore CTRL register */ + ath79_spi_wr(sp, AR71XX_SPI_REG_CTRL, sp->reg_ctrl); + /* disable GPIO mode */ + ath79_spi_wr(sp, AR71XX_SPI_REG_FS, 0); +} + +static int ath79_spi_setup(struct spi_device *spi) +{ + int status = 0; + + if (spi->bits_per_word > 32) + return -EINVAL; + + if (!spi->controller_state) { + status = ath79_spi_setup_cs(spi); + if (status) + return status; + } + + status = spi_bitbang_setup(spi); + if (status && !spi->controller_state) + ath79_spi_cleanup_cs(spi); + + return status; +} + +static void ath79_spi_cleanup(struct spi_device *spi) +{ + ath79_spi_cleanup_cs(spi); + spi_bitbang_cleanup(spi); +} + +static u32 ath79_spi_txrx_mode0(struct spi_device *spi, unsigned nsecs, + u32 word, u8 bits) +{ + struct ath79_spi *sp = ath79_spidev_to_sp(spi); + u32 ioc = sp->ioc_base; + + /* clock starts at inactive polarity */ + for (word <<= (32 - bits); likely(bits); bits--) { + u32 out; + + if (word & (1 << 31)) + out = ioc | AR71XX_SPI_IOC_DO; + else + out = ioc & ~AR71XX_SPI_IOC_DO; + + /* setup MSB (to slave) on trailing edge */ + ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, out); + ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, out | AR71XX_SPI_IOC_CLK); + + word <<= 1; + } + + return ath79_spi_rr(sp, AR71XX_SPI_REG_RDS); +} + +static __devinit int ath79_spi_probe(struct platform_device *pdev) +{ + struct spi_master *master; + struct ath79_spi *sp; + struct ath79_spi_platform_data *pdata; + struct resource *r; + int ret; + + master = spi_alloc_master(&pdev->dev, sizeof(*sp)); + if (master == NULL) { + dev_err(&pdev->dev, "failed to allocate spi master\n"); + return -ENOMEM; + } + + sp = spi_master_get_devdata(master); + platform_set_drvdata(pdev, sp); + + pdata = pdev->dev.platform_data; + + master->setup = ath79_spi_setup; + master->cleanup = ath79_spi_cleanup; + if (pdata) { + master->bus_num = pdata->bus_num; + master->num_chipselect = pdata->num_chipselect; + } else { + master->bus_num = -1; + master->num_chipselect = 1; + } + + sp->bitbang.master = spi_master_get(master); + sp->bitbang.chipselect = ath79_spi_chipselect; + sp->bitbang.txrx_word[SPI_MODE_0] = ath79_spi_txrx_mode0; + sp->bitbang.setup_transfer = spi_bitbang_setup_transfer; + sp->bitbang.flags = SPI_CS_HIGH; + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (r == NULL) { + ret = -ENOENT; + goto err_put_master; + } + + sp->base = ioremap(r->start, r->end - r->start + 1); + if (!sp->base) { + ret = -ENXIO; + goto err_put_master; + } + + ret = spi_bitbang_start(&sp->bitbang); + if (ret) + goto err_unmap; + + return 0; + +err_unmap: + iounmap(sp->base); +err_put_master: + platform_set_drvdata(pdev, NULL); + spi_master_put(sp->bitbang.master); + + return ret; +} + +static __devexit int ath79_spi_remove(struct platform_device *pdev) +{ + struct ath79_spi *sp = platform_get_drvdata(pdev); + + spi_bitbang_stop(&sp->bitbang); + iounmap(sp->base); + platform_set_drvdata(pdev, NULL); + spi_master_put(sp->bitbang.master); + + return 0; +} + +static struct platform_driver ath79_spi_driver = { + .probe = ath79_spi_probe, + .remove = __devexit_p(ath79_spi_remove), + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + }, +}; + +static __init int ath79_spi_init(void) +{ + return platform_driver_register(&ath79_spi_driver); +} +module_init(ath79_spi_init); + +static __exit void ath79_spi_exit(void) +{ + platform_driver_unregister(&ath79_spi_driver); +} +module_exit(ath79_spi_exit); + +MODULE_DESCRIPTION("SPI controller driver for Atheros AR71XX/AR724X/AR913X"); +MODULE_AUTHOR("Gabor Juhos "); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:" DRV_NAME); diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c new file mode 100644 index 0000000..82dee9a --- /dev/null +++ b/drivers/spi/spi-atmel.c @@ -0,0 +1,1093 @@ +/* + * Driver for Atmel AT32 and AT91 SPI Controllers + * + * Copyright (C) 2006 Atmel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +/* SPI register offsets */ +#define SPI_CR 0x0000 +#define SPI_MR 0x0004 +#define SPI_RDR 0x0008 +#define SPI_TDR 0x000c +#define SPI_SR 0x0010 +#define SPI_IER 0x0014 +#define SPI_IDR 0x0018 +#define SPI_IMR 0x001c +#define SPI_CSR0 0x0030 +#define SPI_CSR1 0x0034 +#define SPI_CSR2 0x0038 +#define SPI_CSR3 0x003c +#define SPI_RPR 0x0100 +#define SPI_RCR 0x0104 +#define SPI_TPR 0x0108 +#define SPI_TCR 0x010c +#define SPI_RNPR 0x0110 +#define SPI_RNCR 0x0114 +#define SPI_TNPR 0x0118 +#define SPI_TNCR 0x011c +#define SPI_PTCR 0x0120 +#define SPI_PTSR 0x0124 + +/* Bitfields in CR */ +#define SPI_SPIEN_OFFSET 0 +#define SPI_SPIEN_SIZE 1 +#define SPI_SPIDIS_OFFSET 1 +#define SPI_SPIDIS_SIZE 1 +#define SPI_SWRST_OFFSET 7 +#define SPI_SWRST_SIZE 1 +#define SPI_LASTXFER_OFFSET 24 +#define SPI_LASTXFER_SIZE 1 + +/* Bitfields in MR */ +#define SPI_MSTR_OFFSET 0 +#define SPI_MSTR_SIZE 1 +#define SPI_PS_OFFSET 1 +#define SPI_PS_SIZE 1 +#define SPI_PCSDEC_OFFSET 2 +#define SPI_PCSDEC_SIZE 1 +#define SPI_FDIV_OFFSET 3 +#define SPI_FDIV_SIZE 1 +#define SPI_MODFDIS_OFFSET 4 +#define SPI_MODFDIS_SIZE 1 +#define SPI_LLB_OFFSET 7 +#define SPI_LLB_SIZE 1 +#define SPI_PCS_OFFSET 16 +#define SPI_PCS_SIZE 4 +#define SPI_DLYBCS_OFFSET 24 +#define SPI_DLYBCS_SIZE 8 + +/* Bitfields in RDR */ +#define SPI_RD_OFFSET 0 +#define SPI_RD_SIZE 16 + +/* Bitfields in TDR */ +#define SPI_TD_OFFSET 0 +#define SPI_TD_SIZE 16 + +/* Bitfields in SR */ +#define SPI_RDRF_OFFSET 0 +#define SPI_RDRF_SIZE 1 +#define SPI_TDRE_OFFSET 1 +#define SPI_TDRE_SIZE 1 +#define SPI_MODF_OFFSET 2 +#define SPI_MODF_SIZE 1 +#define SPI_OVRES_OFFSET 3 +#define SPI_OVRES_SIZE 1 +#define SPI_ENDRX_OFFSET 4 +#define SPI_ENDRX_SIZE 1 +#define SPI_ENDTX_OFFSET 5 +#define SPI_ENDTX_SIZE 1 +#define SPI_RXBUFF_OFFSET 6 +#define SPI_RXBUFF_SIZE 1 +#define SPI_TXBUFE_OFFSET 7 +#define SPI_TXBUFE_SIZE 1 +#define SPI_NSSR_OFFSET 8 +#define SPI_NSSR_SIZE 1 +#define SPI_TXEMPTY_OFFSET 9 +#define SPI_TXEMPTY_SIZE 1 +#define SPI_SPIENS_OFFSET 16 +#define SPI_SPIENS_SIZE 1 + +/* Bitfields in CSR0 */ +#define SPI_CPOL_OFFSET 0 +#define SPI_CPOL_SIZE 1 +#define SPI_NCPHA_OFFSET 1 +#define SPI_NCPHA_SIZE 1 +#define SPI_CSAAT_OFFSET 3 +#define SPI_CSAAT_SIZE 1 +#define SPI_BITS_OFFSET 4 +#define SPI_BITS_SIZE 4 +#define SPI_SCBR_OFFSET 8 +#define SPI_SCBR_SIZE 8 +#define SPI_DLYBS_OFFSET 16 +#define SPI_DLYBS_SIZE 8 +#define SPI_DLYBCT_OFFSET 24 +#define SPI_DLYBCT_SIZE 8 + +/* Bitfields in RCR */ +#define SPI_RXCTR_OFFSET 0 +#define SPI_RXCTR_SIZE 16 + +/* Bitfields in TCR */ +#define SPI_TXCTR_OFFSET 0 +#define SPI_TXCTR_SIZE 16 + +/* Bitfields in RNCR */ +#define SPI_RXNCR_OFFSET 0 +#define SPI_RXNCR_SIZE 16 + +/* Bitfields in TNCR */ +#define SPI_TXNCR_OFFSET 0 +#define SPI_TXNCR_SIZE 16 + +/* Bitfields in PTCR */ +#define SPI_RXTEN_OFFSET 0 +#define SPI_RXTEN_SIZE 1 +#define SPI_RXTDIS_OFFSET 1 +#define SPI_RXTDIS_SIZE 1 +#define SPI_TXTEN_OFFSET 8 +#define SPI_TXTEN_SIZE 1 +#define SPI_TXTDIS_OFFSET 9 +#define SPI_TXTDIS_SIZE 1 + +/* Constants for BITS */ +#define SPI_BITS_8_BPT 0 +#define SPI_BITS_9_BPT 1 +#define SPI_BITS_10_BPT 2 +#define SPI_BITS_11_BPT 3 +#define SPI_BITS_12_BPT 4 +#define SPI_BITS_13_BPT 5 +#define SPI_BITS_14_BPT 6 +#define SPI_BITS_15_BPT 7 +#define SPI_BITS_16_BPT 8 + +/* Bit manipulation macros */ +#define SPI_BIT(name) \ + (1 << SPI_##name##_OFFSET) +#define SPI_BF(name,value) \ + (((value) & ((1 << SPI_##name##_SIZE) - 1)) << SPI_##name##_OFFSET) +#define SPI_BFEXT(name,value) \ + (((value) >> SPI_##name##_OFFSET) & ((1 << SPI_##name##_SIZE) - 1)) +#define SPI_BFINS(name,value,old) \ + ( ((old) & ~(((1 << SPI_##name##_SIZE) - 1) << SPI_##name##_OFFSET)) \ + | SPI_BF(name,value)) + +/* Register access macros */ +#define spi_readl(port,reg) \ + __raw_readl((port)->regs + SPI_##reg) +#define spi_writel(port,reg,value) \ + __raw_writel((value), (port)->regs + SPI_##reg) + + +/* + * The core SPI transfer engine just talks to a register bank to set up + * DMA transfers; transfer queue progress is driven by IRQs. The clock + * framework provides the base clock, subdivided for each spi_device. + */ +struct atmel_spi { + spinlock_t lock; + + void __iomem *regs; + int irq; + struct clk *clk; + struct platform_device *pdev; + struct spi_device *stay; + + u8 stopping; + struct list_head queue; + struct spi_transfer *current_transfer; + unsigned long current_remaining_bytes; + struct spi_transfer *next_transfer; + unsigned long next_remaining_bytes; + + void *buffer; + dma_addr_t buffer_dma; +}; + +/* Controller-specific per-slave state */ +struct atmel_spi_device { + unsigned int npcs_pin; + u32 csr; +}; + +#define BUFFER_SIZE PAGE_SIZE +#define INVALID_DMA_ADDRESS 0xffffffff + +/* + * Version 2 of the SPI controller has + * - CR.LASTXFER + * - SPI_MR.DIV32 may become FDIV or must-be-zero (here: always zero) + * - SPI_SR.TXEMPTY, SPI_SR.NSSR (and corresponding irqs) + * - SPI_CSRx.CSAAT + * - SPI_CSRx.SBCR allows faster clocking + * + * We can determine the controller version by reading the VERSION + * register, but I haven't checked that it exists on all chips, and + * this is cheaper anyway. + */ +static bool atmel_spi_is_v2(void) +{ + return !cpu_is_at91rm9200(); +} + +/* + * Earlier SPI controllers (e.g. on at91rm9200) have a design bug whereby + * they assume that spi slave device state will not change on deselect, so + * that automagic deselection is OK. ("NPCSx rises if no data is to be + * transmitted") Not so! Workaround uses nCSx pins as GPIOs; or newer + * controllers have CSAAT and friends. + * + * Since the CSAAT functionality is a bit weird on newer controllers as + * well, we use GPIO to control nCSx pins on all controllers, updating + * MR.PCS to avoid confusing the controller. Using GPIOs also lets us + * support active-high chipselects despite the controller's belief that + * only active-low devices/systems exists. + * + * However, at91rm9200 has a second erratum whereby nCS0 doesn't work + * right when driven with GPIO. ("Mode Fault does not allow more than one + * Master on Chip Select 0.") No workaround exists for that ... so for + * nCS0 on that chip, we (a) don't use the GPIO, (b) can't support CS_HIGH, + * and (c) will trigger that first erratum in some cases. + * + * TODO: Test if the atmel_spi_is_v2() branch below works on + * AT91RM9200 if we use some other register than CSR0. However, don't + * do this unconditionally since AP7000 has an errata where the BITS + * field in CSR0 overrides all other CSRs. + */ + +static void cs_activate(struct atmel_spi *as, struct spi_device *spi) +{ + struct atmel_spi_device *asd = spi->controller_state; + unsigned active = spi->mode & SPI_CS_HIGH; + u32 mr; + + if (atmel_spi_is_v2()) { + /* + * Always use CSR0. This ensures that the clock + * switches to the correct idle polarity before we + * toggle the CS. + */ + spi_writel(as, CSR0, asd->csr); + spi_writel(as, MR, SPI_BF(PCS, 0x0e) | SPI_BIT(MODFDIS) + | SPI_BIT(MSTR)); + mr = spi_readl(as, MR); + gpio_set_value(asd->npcs_pin, active); + } else { + u32 cpol = (spi->mode & SPI_CPOL) ? SPI_BIT(CPOL) : 0; + int i; + u32 csr; + + /* Make sure clock polarity is correct */ + for (i = 0; i < spi->master->num_chipselect; i++) { + csr = spi_readl(as, CSR0 + 4 * i); + if ((csr ^ cpol) & SPI_BIT(CPOL)) + spi_writel(as, CSR0 + 4 * i, + csr ^ SPI_BIT(CPOL)); + } + + mr = spi_readl(as, MR); + mr = SPI_BFINS(PCS, ~(1 << spi->chip_select), mr); + if (spi->chip_select != 0) + gpio_set_value(asd->npcs_pin, active); + spi_writel(as, MR, mr); + } + + dev_dbg(&spi->dev, "activate %u%s, mr %08x\n", + asd->npcs_pin, active ? " (high)" : "", + mr); +} + +static void cs_deactivate(struct atmel_spi *as, struct spi_device *spi) +{ + struct atmel_spi_device *asd = spi->controller_state; + unsigned active = spi->mode & SPI_CS_HIGH; + u32 mr; + + /* only deactivate *this* device; sometimes transfers to + * another device may be active when this routine is called. + */ + mr = spi_readl(as, MR); + if (~SPI_BFEXT(PCS, mr) & (1 << spi->chip_select)) { + mr = SPI_BFINS(PCS, 0xf, mr); + spi_writel(as, MR, mr); + } + + dev_dbg(&spi->dev, "DEactivate %u%s, mr %08x\n", + asd->npcs_pin, active ? " (low)" : "", + mr); + + if (atmel_spi_is_v2() || spi->chip_select != 0) + gpio_set_value(asd->npcs_pin, !active); +} + +static inline int atmel_spi_xfer_is_last(struct spi_message *msg, + struct spi_transfer *xfer) +{ + return msg->transfers.prev == &xfer->transfer_list; +} + +static inline int atmel_spi_xfer_can_be_chained(struct spi_transfer *xfer) +{ + return xfer->delay_usecs == 0 && !xfer->cs_change; +} + +static void atmel_spi_next_xfer_data(struct spi_master *master, + struct spi_transfer *xfer, + dma_addr_t *tx_dma, + dma_addr_t *rx_dma, + u32 *plen) +{ + struct atmel_spi *as = spi_master_get_devdata(master); + u32 len = *plen; + + /* use scratch buffer only when rx or tx data is unspecified */ + if (xfer->rx_buf) + *rx_dma = xfer->rx_dma + xfer->len - *plen; + else { + *rx_dma = as->buffer_dma; + if (len > BUFFER_SIZE) + len = BUFFER_SIZE; + } + if (xfer->tx_buf) + *tx_dma = xfer->tx_dma + xfer->len - *plen; + else { + *tx_dma = as->buffer_dma; + if (len > BUFFER_SIZE) + len = BUFFER_SIZE; + memset(as->buffer, 0, len); + dma_sync_single_for_device(&as->pdev->dev, + as->buffer_dma, len, DMA_TO_DEVICE); + } + + *plen = len; +} + +/* + * Submit next transfer for DMA. + * lock is held, spi irq is blocked + */ +static void atmel_spi_next_xfer(struct spi_master *master, + struct spi_message *msg) +{ + struct atmel_spi *as = spi_master_get_devdata(master); + struct spi_transfer *xfer; + u32 len, remaining; + u32 ieval; + dma_addr_t tx_dma, rx_dma; + + if (!as->current_transfer) + xfer = list_entry(msg->transfers.next, + struct spi_transfer, transfer_list); + else if (!as->next_transfer) + xfer = list_entry(as->current_transfer->transfer_list.next, + struct spi_transfer, transfer_list); + else + xfer = NULL; + + if (xfer) { + spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS)); + + len = xfer->len; + atmel_spi_next_xfer_data(master, xfer, &tx_dma, &rx_dma, &len); + remaining = xfer->len - len; + + spi_writel(as, RPR, rx_dma); + spi_writel(as, TPR, tx_dma); + + if (msg->spi->bits_per_word > 8) + len >>= 1; + spi_writel(as, RCR, len); + spi_writel(as, TCR, len); + + dev_dbg(&msg->spi->dev, + " start xfer %p: len %u tx %p/%08x rx %p/%08x\n", + xfer, xfer->len, xfer->tx_buf, xfer->tx_dma, + xfer->rx_buf, xfer->rx_dma); + } else { + xfer = as->next_transfer; + remaining = as->next_remaining_bytes; + } + + as->current_transfer = xfer; + as->current_remaining_bytes = remaining; + + if (remaining > 0) + len = remaining; + else if (!atmel_spi_xfer_is_last(msg, xfer) + && atmel_spi_xfer_can_be_chained(xfer)) { + xfer = list_entry(xfer->transfer_list.next, + struct spi_transfer, transfer_list); + len = xfer->len; + } else + xfer = NULL; + + as->next_transfer = xfer; + + if (xfer) { + u32 total; + + total = len; + atmel_spi_next_xfer_data(master, xfer, &tx_dma, &rx_dma, &len); + as->next_remaining_bytes = total - len; + + spi_writel(as, RNPR, rx_dma); + spi_writel(as, TNPR, tx_dma); + + if (msg->spi->bits_per_word > 8) + len >>= 1; + spi_writel(as, RNCR, len); + spi_writel(as, TNCR, len); + + dev_dbg(&msg->spi->dev, + " next xfer %p: len %u tx %p/%08x rx %p/%08x\n", + xfer, xfer->len, xfer->tx_buf, xfer->tx_dma, + xfer->rx_buf, xfer->rx_dma); + ieval = SPI_BIT(ENDRX) | SPI_BIT(OVRES); + } else { + spi_writel(as, RNCR, 0); + spi_writel(as, TNCR, 0); + ieval = SPI_BIT(RXBUFF) | SPI_BIT(ENDRX) | SPI_BIT(OVRES); + } + + /* REVISIT: We're waiting for ENDRX before we start the next + * transfer because we need to handle some difficult timing + * issues otherwise. If we wait for ENDTX in one transfer and + * then starts waiting for ENDRX in the next, it's difficult + * to tell the difference between the ENDRX interrupt we're + * actually waiting for and the ENDRX interrupt of the + * previous transfer. + * + * It should be doable, though. Just not now... + */ + spi_writel(as, IER, ieval); + spi_writel(as, PTCR, SPI_BIT(TXTEN) | SPI_BIT(RXTEN)); +} + +static void atmel_spi_next_message(struct spi_master *master) +{ + struct atmel_spi *as = spi_master_get_devdata(master); + struct spi_message *msg; + struct spi_device *spi; + + BUG_ON(as->current_transfer); + + msg = list_entry(as->queue.next, struct spi_message, queue); + spi = msg->spi; + + dev_dbg(master->dev.parent, "start message %p for %s\n", + msg, dev_name(&spi->dev)); + + /* select chip if it's not still active */ + if (as->stay) { + if (as->stay != spi) { + cs_deactivate(as, as->stay); + cs_activate(as, spi); + } + as->stay = NULL; + } else + cs_activate(as, spi); + + atmel_spi_next_xfer(master, msg); +} + +/* + * For DMA, tx_buf/tx_dma have the same relationship as rx_buf/rx_dma: + * - The buffer is either valid for CPU access, else NULL + * - If the buffer is valid, so is its DMA address + * + * This driver manages the dma address unless message->is_dma_mapped. + */ +static int +atmel_spi_dma_map_xfer(struct atmel_spi *as, struct spi_transfer *xfer) +{ + struct device *dev = &as->pdev->dev; + + xfer->tx_dma = xfer->rx_dma = INVALID_DMA_ADDRESS; + if (xfer->tx_buf) { + /* tx_buf is a const void* where we need a void * for the dma + * mapping */ + void *nonconst_tx = (void *)xfer->tx_buf; + + xfer->tx_dma = dma_map_single(dev, + nonconst_tx, xfer->len, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, xfer->tx_dma)) + return -ENOMEM; + } + if (xfer->rx_buf) { + xfer->rx_dma = dma_map_single(dev, + xfer->rx_buf, xfer->len, + DMA_FROM_DEVICE); + if (dma_mapping_error(dev, xfer->rx_dma)) { + if (xfer->tx_buf) + dma_unmap_single(dev, + xfer->tx_dma, xfer->len, + DMA_TO_DEVICE); + return -ENOMEM; + } + } + return 0; +} + +static void atmel_spi_dma_unmap_xfer(struct spi_master *master, + struct spi_transfer *xfer) +{ + if (xfer->tx_dma != INVALID_DMA_ADDRESS) + dma_unmap_single(master->dev.parent, xfer->tx_dma, + xfer->len, DMA_TO_DEVICE); + if (xfer->rx_dma != INVALID_DMA_ADDRESS) + dma_unmap_single(master->dev.parent, xfer->rx_dma, + xfer->len, DMA_FROM_DEVICE); +} + +static void +atmel_spi_msg_done(struct spi_master *master, struct atmel_spi *as, + struct spi_message *msg, int status, int stay) +{ + if (!stay || status < 0) + cs_deactivate(as, msg->spi); + else + as->stay = msg->spi; + + list_del(&msg->queue); + msg->status = status; + + dev_dbg(master->dev.parent, + "xfer complete: %u bytes transferred\n", + msg->actual_length); + + spin_unlock(&as->lock); + msg->complete(msg->context); + spin_lock(&as->lock); + + as->current_transfer = NULL; + as->next_transfer = NULL; + + /* continue if needed */ + if (list_empty(&as->queue) || as->stopping) + spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS)); + else + atmel_spi_next_message(master); +} + +static irqreturn_t +atmel_spi_interrupt(int irq, void *dev_id) +{ + struct spi_master *master = dev_id; + struct atmel_spi *as = spi_master_get_devdata(master); + struct spi_message *msg; + struct spi_transfer *xfer; + u32 status, pending, imr; + int ret = IRQ_NONE; + + spin_lock(&as->lock); + + xfer = as->current_transfer; + msg = list_entry(as->queue.next, struct spi_message, queue); + + imr = spi_readl(as, IMR); + status = spi_readl(as, SR); + pending = status & imr; + + if (pending & SPI_BIT(OVRES)) { + int timeout; + + ret = IRQ_HANDLED; + + spi_writel(as, IDR, (SPI_BIT(RXBUFF) | SPI_BIT(ENDRX) + | SPI_BIT(OVRES))); + + /* + * When we get an overrun, we disregard the current + * transfer. Data will not be copied back from any + * bounce buffer and msg->actual_len will not be + * updated with the last xfer. + * + * We will also not process any remaning transfers in + * the message. + * + * First, stop the transfer and unmap the DMA buffers. + */ + spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS)); + if (!msg->is_dma_mapped) + atmel_spi_dma_unmap_xfer(master, xfer); + + /* REVISIT: udelay in irq is unfriendly */ + if (xfer->delay_usecs) + udelay(xfer->delay_usecs); + + dev_warn(master->dev.parent, "overrun (%u/%u remaining)\n", + spi_readl(as, TCR), spi_readl(as, RCR)); + + /* + * Clean up DMA registers and make sure the data + * registers are empty. + */ + spi_writel(as, RNCR, 0); + spi_writel(as, TNCR, 0); + spi_writel(as, RCR, 0); + spi_writel(as, TCR, 0); + for (timeout = 1000; timeout; timeout--) + if (spi_readl(as, SR) & SPI_BIT(TXEMPTY)) + break; + if (!timeout) + dev_warn(master->dev.parent, + "timeout waiting for TXEMPTY"); + while (spi_readl(as, SR) & SPI_BIT(RDRF)) + spi_readl(as, RDR); + + /* Clear any overrun happening while cleaning up */ + spi_readl(as, SR); + + atmel_spi_msg_done(master, as, msg, -EIO, 0); + } else if (pending & (SPI_BIT(RXBUFF) | SPI_BIT(ENDRX))) { + ret = IRQ_HANDLED; + + spi_writel(as, IDR, pending); + + if (as->current_remaining_bytes == 0) { + msg->actual_length += xfer->len; + + if (!msg->is_dma_mapped) + atmel_spi_dma_unmap_xfer(master, xfer); + + /* REVISIT: udelay in irq is unfriendly */ + if (xfer->delay_usecs) + udelay(xfer->delay_usecs); + + if (atmel_spi_xfer_is_last(msg, xfer)) { + /* report completed message */ + atmel_spi_msg_done(master, as, msg, 0, + xfer->cs_change); + } else { + if (xfer->cs_change) { + cs_deactivate(as, msg->spi); + udelay(1); + cs_activate(as, msg->spi); + } + + /* + * Not done yet. Submit the next transfer. + * + * FIXME handle protocol options for xfer + */ + atmel_spi_next_xfer(master, msg); + } + } else { + /* + * Keep going, we still have data to send in + * the current transfer. + */ + atmel_spi_next_xfer(master, msg); + } + } + + spin_unlock(&as->lock); + + return ret; +} + +static int atmel_spi_setup(struct spi_device *spi) +{ + struct atmel_spi *as; + struct atmel_spi_device *asd; + u32 scbr, csr; + unsigned int bits = spi->bits_per_word; + unsigned long bus_hz; + unsigned int npcs_pin; + int ret; + + as = spi_master_get_devdata(spi->master); + + if (as->stopping) + return -ESHUTDOWN; + + if (spi->chip_select > spi->master->num_chipselect) { + dev_dbg(&spi->dev, + "setup: invalid chipselect %u (%u defined)\n", + spi->chip_select, spi->master->num_chipselect); + return -EINVAL; + } + + if (bits < 8 || bits > 16) { + dev_dbg(&spi->dev, + "setup: invalid bits_per_word %u (8 to 16)\n", + bits); + return -EINVAL; + } + + /* see notes above re chipselect */ + if (!atmel_spi_is_v2() + && spi->chip_select == 0 + && (spi->mode & SPI_CS_HIGH)) { + dev_dbg(&spi->dev, "setup: can't be active-high\n"); + return -EINVAL; + } + + /* v1 chips start out at half the peripheral bus speed. */ + bus_hz = clk_get_rate(as->clk); + if (!atmel_spi_is_v2()) + bus_hz /= 2; + + if (spi->max_speed_hz) { + /* + * Calculate the lowest divider that satisfies the + * constraint, assuming div32/fdiv/mbz == 0. + */ + scbr = DIV_ROUND_UP(bus_hz, spi->max_speed_hz); + + /* + * If the resulting divider doesn't fit into the + * register bitfield, we can't satisfy the constraint. + */ + if (scbr >= (1 << SPI_SCBR_SIZE)) { + dev_dbg(&spi->dev, + "setup: %d Hz too slow, scbr %u; min %ld Hz\n", + spi->max_speed_hz, scbr, bus_hz/255); + return -EINVAL; + } + } else + /* speed zero means "as slow as possible" */ + scbr = 0xff; + + csr = SPI_BF(SCBR, scbr) | SPI_BF(BITS, bits - 8); + if (spi->mode & SPI_CPOL) + csr |= SPI_BIT(CPOL); + if (!(spi->mode & SPI_CPHA)) + csr |= SPI_BIT(NCPHA); + + /* DLYBS is mostly irrelevant since we manage chipselect using GPIOs. + * + * DLYBCT would add delays between words, slowing down transfers. + * It could potentially be useful to cope with DMA bottlenecks, but + * in those cases it's probably best to just use a lower bitrate. + */ + csr |= SPI_BF(DLYBS, 0); + csr |= SPI_BF(DLYBCT, 0); + + /* chipselect must have been muxed as GPIO (e.g. in board setup) */ + npcs_pin = (unsigned int)spi->controller_data; + asd = spi->controller_state; + if (!asd) { + asd = kzalloc(sizeof(struct atmel_spi_device), GFP_KERNEL); + if (!asd) + return -ENOMEM; + + ret = gpio_request(npcs_pin, dev_name(&spi->dev)); + if (ret) { + kfree(asd); + return ret; + } + + asd->npcs_pin = npcs_pin; + spi->controller_state = asd; + gpio_direction_output(npcs_pin, !(spi->mode & SPI_CS_HIGH)); + } else { + unsigned long flags; + + spin_lock_irqsave(&as->lock, flags); + if (as->stay == spi) + as->stay = NULL; + cs_deactivate(as, spi); + spin_unlock_irqrestore(&as->lock, flags); + } + + asd->csr = csr; + + dev_dbg(&spi->dev, + "setup: %lu Hz bpw %u mode 0x%x -> csr%d %08x\n", + bus_hz / scbr, bits, spi->mode, spi->chip_select, csr); + + if (!atmel_spi_is_v2()) + spi_writel(as, CSR0 + 4 * spi->chip_select, csr); + + return 0; +} + +static int atmel_spi_transfer(struct spi_device *spi, struct spi_message *msg) +{ + struct atmel_spi *as; + struct spi_transfer *xfer; + unsigned long flags; + struct device *controller = spi->master->dev.parent; + u8 bits; + struct atmel_spi_device *asd; + + as = spi_master_get_devdata(spi->master); + + dev_dbg(controller, "new message %p submitted for %s\n", + msg, dev_name(&spi->dev)); + + if (unlikely(list_empty(&msg->transfers))) + return -EINVAL; + + if (as->stopping) + return -ESHUTDOWN; + + list_for_each_entry(xfer, &msg->transfers, transfer_list) { + if (!(xfer->tx_buf || xfer->rx_buf) && xfer->len) { + dev_dbg(&spi->dev, "missing rx or tx buf\n"); + return -EINVAL; + } + + if (xfer->bits_per_word) { + asd = spi->controller_state; + bits = (asd->csr >> 4) & 0xf; + if (bits != xfer->bits_per_word - 8) { + dev_dbg(&spi->dev, "you can't yet change " + "bits_per_word in transfers\n"); + return -ENOPROTOOPT; + } + } + + /* FIXME implement these protocol options!! */ + if (xfer->speed_hz) { + dev_dbg(&spi->dev, "no protocol options yet\n"); + return -ENOPROTOOPT; + } + + /* + * DMA map early, for performance (empties dcache ASAP) and + * better fault reporting. This is a DMA-only driver. + * + * NOTE that if dma_unmap_single() ever starts to do work on + * platforms supported by this driver, we would need to clean + * up mappings for previously-mapped transfers. + */ + if (!msg->is_dma_mapped) { + if (atmel_spi_dma_map_xfer(as, xfer) < 0) + return -ENOMEM; + } + } + +#ifdef VERBOSE + list_for_each_entry(xfer, &msg->transfers, transfer_list) { + dev_dbg(controller, + " xfer %p: len %u tx %p/%08x rx %p/%08x\n", + xfer, xfer->len, + xfer->tx_buf, xfer->tx_dma, + xfer->rx_buf, xfer->rx_dma); + } +#endif + + msg->status = -EINPROGRESS; + msg->actual_length = 0; + + spin_lock_irqsave(&as->lock, flags); + list_add_tail(&msg->queue, &as->queue); + if (!as->current_transfer) + atmel_spi_next_message(spi->master); + spin_unlock_irqrestore(&as->lock, flags); + + return 0; +} + +static void atmel_spi_cleanup(struct spi_device *spi) +{ + struct atmel_spi *as = spi_master_get_devdata(spi->master); + struct atmel_spi_device *asd = spi->controller_state; + unsigned gpio = (unsigned) spi->controller_data; + unsigned long flags; + + if (!asd) + return; + + spin_lock_irqsave(&as->lock, flags); + if (as->stay == spi) { + as->stay = NULL; + cs_deactivate(as, spi); + } + spin_unlock_irqrestore(&as->lock, flags); + + spi->controller_state = NULL; + gpio_free(gpio); + kfree(asd); +} + +/*-------------------------------------------------------------------------*/ + +static int __init atmel_spi_probe(struct platform_device *pdev) +{ + struct resource *regs; + int irq; + struct clk *clk; + int ret; + struct spi_master *master; + struct atmel_spi *as; + + regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!regs) + return -ENXIO; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + clk = clk_get(&pdev->dev, "spi_clk"); + if (IS_ERR(clk)) + return PTR_ERR(clk); + + /* setup spi core then atmel-specific driver state */ + ret = -ENOMEM; + master = spi_alloc_master(&pdev->dev, sizeof *as); + if (!master) + goto out_free; + + /* the spi->mode bits understood by this driver: */ + master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; + + master->bus_num = pdev->id; + master->num_chipselect = 4; + master->setup = atmel_spi_setup; + master->transfer = atmel_spi_transfer; + master->cleanup = atmel_spi_cleanup; + platform_set_drvdata(pdev, master); + + as = spi_master_get_devdata(master); + + /* + * Scratch buffer is used for throwaway rx and tx data. + * It's coherent to minimize dcache pollution. + */ + as->buffer = dma_alloc_coherent(&pdev->dev, BUFFER_SIZE, + &as->buffer_dma, GFP_KERNEL); + if (!as->buffer) + goto out_free; + + spin_lock_init(&as->lock); + INIT_LIST_HEAD(&as->queue); + as->pdev = pdev; + as->regs = ioremap(regs->start, resource_size(regs)); + if (!as->regs) + goto out_free_buffer; + as->irq = irq; + as->clk = clk; + + ret = request_irq(irq, atmel_spi_interrupt, 0, + dev_name(&pdev->dev), master); + if (ret) + goto out_unmap_regs; + + /* Initialize the hardware */ + clk_enable(clk); + spi_writel(as, CR, SPI_BIT(SWRST)); + spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */ + spi_writel(as, MR, SPI_BIT(MSTR) | SPI_BIT(MODFDIS)); + spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS)); + spi_writel(as, CR, SPI_BIT(SPIEN)); + + /* go! */ + dev_info(&pdev->dev, "Atmel SPI Controller at 0x%08lx (irq %d)\n", + (unsigned long)regs->start, irq); + + ret = spi_register_master(master); + if (ret) + goto out_reset_hw; + + return 0; + +out_reset_hw: + spi_writel(as, CR, SPI_BIT(SWRST)); + spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */ + clk_disable(clk); + free_irq(irq, master); +out_unmap_regs: + iounmap(as->regs); +out_free_buffer: + dma_free_coherent(&pdev->dev, BUFFER_SIZE, as->buffer, + as->buffer_dma); +out_free: + clk_put(clk); + spi_master_put(master); + return ret; +} + +static int __exit atmel_spi_remove(struct platform_device *pdev) +{ + struct spi_master *master = platform_get_drvdata(pdev); + struct atmel_spi *as = spi_master_get_devdata(master); + struct spi_message *msg; + + /* reset the hardware and block queue progress */ + spin_lock_irq(&as->lock); + as->stopping = 1; + spi_writel(as, CR, SPI_BIT(SWRST)); + spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */ + spi_readl(as, SR); + spin_unlock_irq(&as->lock); + + /* Terminate remaining queued transfers */ + list_for_each_entry(msg, &as->queue, queue) { + /* REVISIT unmapping the dma is a NOP on ARM and AVR32 + * but we shouldn't depend on that... + */ + msg->status = -ESHUTDOWN; + msg->complete(msg->context); + } + + dma_free_coherent(&pdev->dev, BUFFER_SIZE, as->buffer, + as->buffer_dma); + + clk_disable(as->clk); + clk_put(as->clk); + free_irq(as->irq, master); + iounmap(as->regs); + + spi_unregister_master(master); + + return 0; +} + +#ifdef CONFIG_PM + +static int atmel_spi_suspend(struct platform_device *pdev, pm_message_t mesg) +{ + struct spi_master *master = platform_get_drvdata(pdev); + struct atmel_spi *as = spi_master_get_devdata(master); + + clk_disable(as->clk); + return 0; +} + +static int atmel_spi_resume(struct platform_device *pdev) +{ + struct spi_master *master = platform_get_drvdata(pdev); + struct atmel_spi *as = spi_master_get_devdata(master); + + clk_enable(as->clk); + return 0; +} + +#else +#define atmel_spi_suspend NULL +#define atmel_spi_resume NULL +#endif + + +static struct platform_driver atmel_spi_driver = { + .driver = { + .name = "atmel_spi", + .owner = THIS_MODULE, + }, + .suspend = atmel_spi_suspend, + .resume = atmel_spi_resume, + .remove = __exit_p(atmel_spi_remove), +}; + +static int __init atmel_spi_init(void) +{ + return platform_driver_probe(&atmel_spi_driver, atmel_spi_probe); +} +module_init(atmel_spi_init); + +static void __exit atmel_spi_exit(void) +{ + platform_driver_unregister(&atmel_spi_driver); +} +module_exit(atmel_spi_exit); + +MODULE_DESCRIPTION("Atmel AT32/AT91 SPI Controller driver"); +MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:atmel_spi"); diff --git a/drivers/spi/spi-au1550.c b/drivers/spi/spi-au1550.c new file mode 100644 index 0000000..bddee5f5 --- /dev/null +++ b/drivers/spi/spi-au1550.c @@ -0,0 +1,1032 @@ +/* + * au1550 psc spi controller driver + * may work also with au1200, au1210, au1250 + * will not work on au1000, au1100 and au1500 (no full spi controller there) + * + * Copyright (c) 2006 ATRON electronic GmbH + * Author: Jan Nikitenko + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +static unsigned usedma = 1; +module_param(usedma, uint, 0644); + +/* +#define AU1550_SPI_DEBUG_LOOPBACK +*/ + + +#define AU1550_SPI_DBDMA_DESCRIPTORS 1 +#define AU1550_SPI_DMA_RXTMP_MINSIZE 2048U + +struct au1550_spi { + struct spi_bitbang bitbang; + + volatile psc_spi_t __iomem *regs; + int irq; + unsigned freq_max; + unsigned freq_min; + + unsigned len; + unsigned tx_count; + unsigned rx_count; + const u8 *tx; + u8 *rx; + + void (*rx_word)(struct au1550_spi *hw); + void (*tx_word)(struct au1550_spi *hw); + int (*txrx_bufs)(struct spi_device *spi, struct spi_transfer *t); + irqreturn_t (*irq_callback)(struct au1550_spi *hw); + + struct completion master_done; + + unsigned usedma; + u32 dma_tx_id; + u32 dma_rx_id; + u32 dma_tx_ch; + u32 dma_rx_ch; + + u8 *dma_rx_tmpbuf; + unsigned dma_rx_tmpbuf_size; + u32 dma_rx_tmpbuf_addr; + + struct spi_master *master; + struct device *dev; + struct au1550_spi_info *pdata; + struct resource *ioarea; +}; + + +/* we use an 8-bit memory device for dma transfers to/from spi fifo */ +static dbdev_tab_t au1550_spi_mem_dbdev = +{ + .dev_id = DBDMA_MEM_CHAN, + .dev_flags = DEV_FLAGS_ANYUSE|DEV_FLAGS_SYNC, + .dev_tsize = 0, + .dev_devwidth = 8, + .dev_physaddr = 0x00000000, + .dev_intlevel = 0, + .dev_intpolarity = 0 +}; + +static int ddma_memid; /* id to above mem dma device */ + +static void au1550_spi_bits_handlers_set(struct au1550_spi *hw, int bpw); + + +/* + * compute BRG and DIV bits to setup spi clock based on main input clock rate + * that was specified in platform data structure + * according to au1550 datasheet: + * psc_tempclk = psc_mainclk / (2 << DIV) + * spiclk = psc_tempclk / (2 * (BRG + 1)) + * BRG valid range is 4..63 + * DIV valid range is 0..3 + */ +static u32 au1550_spi_baudcfg(struct au1550_spi *hw, unsigned speed_hz) +{ + u32 mainclk_hz = hw->pdata->mainclk_hz; + u32 div, brg; + + for (div = 0; div < 4; div++) { + brg = mainclk_hz / speed_hz / (4 << div); + /* now we have BRG+1 in brg, so count with that */ + if (brg < (4 + 1)) { + brg = (4 + 1); /* speed_hz too big */ + break; /* set lowest brg (div is == 0) */ + } + if (brg <= (63 + 1)) + break; /* we have valid brg and div */ + } + if (div == 4) { + div = 3; /* speed_hz too small */ + brg = (63 + 1); /* set highest brg and div */ + } + brg--; + return PSC_SPICFG_SET_BAUD(brg) | PSC_SPICFG_SET_DIV(div); +} + +static inline void au1550_spi_mask_ack_all(struct au1550_spi *hw) +{ + hw->regs->psc_spimsk = + PSC_SPIMSK_MM | PSC_SPIMSK_RR | PSC_SPIMSK_RO + | PSC_SPIMSK_RU | PSC_SPIMSK_TR | PSC_SPIMSK_TO + | PSC_SPIMSK_TU | PSC_SPIMSK_SD | PSC_SPIMSK_MD; + au_sync(); + + hw->regs->psc_spievent = + PSC_SPIEVNT_MM | PSC_SPIEVNT_RR | PSC_SPIEVNT_RO + | PSC_SPIEVNT_RU | PSC_SPIEVNT_TR | PSC_SPIEVNT_TO + | PSC_SPIEVNT_TU | PSC_SPIEVNT_SD | PSC_SPIEVNT_MD; + au_sync(); +} + +static void au1550_spi_reset_fifos(struct au1550_spi *hw) +{ + u32 pcr; + + hw->regs->psc_spipcr = PSC_SPIPCR_RC | PSC_SPIPCR_TC; + au_sync(); + do { + pcr = hw->regs->psc_spipcr; + au_sync(); + } while (pcr != 0); +} + +/* + * dma transfers are used for the most common spi word size of 8-bits + * we cannot easily change already set up dma channels' width, so if we wanted + * dma support for more than 8-bit words (up to 24 bits), we would need to + * setup dma channels from scratch on each spi transfer, based on bits_per_word + * instead we have pre set up 8 bit dma channels supporting spi 4 to 8 bits + * transfers, and 9 to 24 bits spi transfers will be done in pio irq based mode + * callbacks to handle dma or pio are set up in au1550_spi_bits_handlers_set() + */ +static void au1550_spi_chipsel(struct spi_device *spi, int value) +{ + struct au1550_spi *hw = spi_master_get_devdata(spi->master); + unsigned cspol = spi->mode & SPI_CS_HIGH ? 1 : 0; + u32 cfg, stat; + + switch (value) { + case BITBANG_CS_INACTIVE: + if (hw->pdata->deactivate_cs) + hw->pdata->deactivate_cs(hw->pdata, spi->chip_select, + cspol); + break; + + case BITBANG_CS_ACTIVE: + au1550_spi_bits_handlers_set(hw, spi->bits_per_word); + + cfg = hw->regs->psc_spicfg; + au_sync(); + hw->regs->psc_spicfg = cfg & ~PSC_SPICFG_DE_ENABLE; + au_sync(); + + if (spi->mode & SPI_CPOL) + cfg |= PSC_SPICFG_BI; + else + cfg &= ~PSC_SPICFG_BI; + if (spi->mode & SPI_CPHA) + cfg &= ~PSC_SPICFG_CDE; + else + cfg |= PSC_SPICFG_CDE; + + if (spi->mode & SPI_LSB_FIRST) + cfg |= PSC_SPICFG_MLF; + else + cfg &= ~PSC_SPICFG_MLF; + + if (hw->usedma && spi->bits_per_word <= 8) + cfg &= ~PSC_SPICFG_DD_DISABLE; + else + cfg |= PSC_SPICFG_DD_DISABLE; + cfg = PSC_SPICFG_CLR_LEN(cfg); + cfg |= PSC_SPICFG_SET_LEN(spi->bits_per_word); + + cfg = PSC_SPICFG_CLR_BAUD(cfg); + cfg &= ~PSC_SPICFG_SET_DIV(3); + cfg |= au1550_spi_baudcfg(hw, spi->max_speed_hz); + + hw->regs->psc_spicfg = cfg | PSC_SPICFG_DE_ENABLE; + au_sync(); + do { + stat = hw->regs->psc_spistat; + au_sync(); + } while ((stat & PSC_SPISTAT_DR) == 0); + + if (hw->pdata->activate_cs) + hw->pdata->activate_cs(hw->pdata, spi->chip_select, + cspol); + break; + } +} + +static int au1550_spi_setupxfer(struct spi_device *spi, struct spi_transfer *t) +{ + struct au1550_spi *hw = spi_master_get_devdata(spi->master); + unsigned bpw, hz; + u32 cfg, stat; + + bpw = spi->bits_per_word; + hz = spi->max_speed_hz; + if (t) { + if (t->bits_per_word) + bpw = t->bits_per_word; + if (t->speed_hz) + hz = t->speed_hz; + } + + if (bpw < 4 || bpw > 24) { + dev_err(&spi->dev, "setupxfer: invalid bits_per_word=%d\n", + bpw); + return -EINVAL; + } + if (hz > spi->max_speed_hz || hz > hw->freq_max || hz < hw->freq_min) { + dev_err(&spi->dev, "setupxfer: clock rate=%d out of range\n", + hz); + return -EINVAL; + } + + au1550_spi_bits_handlers_set(hw, spi->bits_per_word); + + cfg = hw->regs->psc_spicfg; + au_sync(); + hw->regs->psc_spicfg = cfg & ~PSC_SPICFG_DE_ENABLE; + au_sync(); + + if (hw->usedma && bpw <= 8) + cfg &= ~PSC_SPICFG_DD_DISABLE; + else + cfg |= PSC_SPICFG_DD_DISABLE; + cfg = PSC_SPICFG_CLR_LEN(cfg); + cfg |= PSC_SPICFG_SET_LEN(bpw); + + cfg = PSC_SPICFG_CLR_BAUD(cfg); + cfg &= ~PSC_SPICFG_SET_DIV(3); + cfg |= au1550_spi_baudcfg(hw, hz); + + hw->regs->psc_spicfg = cfg; + au_sync(); + + if (cfg & PSC_SPICFG_DE_ENABLE) { + do { + stat = hw->regs->psc_spistat; + au_sync(); + } while ((stat & PSC_SPISTAT_DR) == 0); + } + + au1550_spi_reset_fifos(hw); + au1550_spi_mask_ack_all(hw); + return 0; +} + +static int au1550_spi_setup(struct spi_device *spi) +{ + struct au1550_spi *hw = spi_master_get_devdata(spi->master); + + if (spi->bits_per_word < 4 || spi->bits_per_word > 24) { + dev_err(&spi->dev, "setup: invalid bits_per_word=%d\n", + spi->bits_per_word); + return -EINVAL; + } + + if (spi->max_speed_hz == 0) + spi->max_speed_hz = hw->freq_max; + if (spi->max_speed_hz > hw->freq_max + || spi->max_speed_hz < hw->freq_min) + return -EINVAL; + /* + * NOTE: cannot change speed and other hw settings immediately, + * otherwise sharing of spi bus is not possible, + * so do not call setupxfer(spi, NULL) here + */ + return 0; +} + +/* + * for dma spi transfers, we have to setup rx channel, otherwise there is + * no reliable way how to recognize that spi transfer is done + * dma complete callbacks are called before real spi transfer is finished + * and if only tx dma channel is set up (and rx fifo overflow event masked) + * spi master done event irq is not generated unless rx fifo is empty (emptied) + * so we need rx tmp buffer to use for rx dma if user does not provide one + */ +static int au1550_spi_dma_rxtmp_alloc(struct au1550_spi *hw, unsigned size) +{ + hw->dma_rx_tmpbuf = kmalloc(size, GFP_KERNEL); + if (!hw->dma_rx_tmpbuf) + return -ENOMEM; + hw->dma_rx_tmpbuf_size = size; + hw->dma_rx_tmpbuf_addr = dma_map_single(hw->dev, hw->dma_rx_tmpbuf, + size, DMA_FROM_DEVICE); + if (dma_mapping_error(hw->dev, hw->dma_rx_tmpbuf_addr)) { + kfree(hw->dma_rx_tmpbuf); + hw->dma_rx_tmpbuf = 0; + hw->dma_rx_tmpbuf_size = 0; + return -EFAULT; + } + return 0; +} + +static void au1550_spi_dma_rxtmp_free(struct au1550_spi *hw) +{ + dma_unmap_single(hw->dev, hw->dma_rx_tmpbuf_addr, + hw->dma_rx_tmpbuf_size, DMA_FROM_DEVICE); + kfree(hw->dma_rx_tmpbuf); + hw->dma_rx_tmpbuf = 0; + hw->dma_rx_tmpbuf_size = 0; +} + +static int au1550_spi_dma_txrxb(struct spi_device *spi, struct spi_transfer *t) +{ + struct au1550_spi *hw = spi_master_get_devdata(spi->master); + dma_addr_t dma_tx_addr; + dma_addr_t dma_rx_addr; + u32 res; + + hw->len = t->len; + hw->tx_count = 0; + hw->rx_count = 0; + + hw->tx = t->tx_buf; + hw->rx = t->rx_buf; + dma_tx_addr = t->tx_dma; + dma_rx_addr = t->rx_dma; + + /* + * check if buffers are already dma mapped, map them otherwise: + * - first map the TX buffer, so cache data gets written to memory + * - then map the RX buffer, so that cache entries (with + * soon-to-be-stale data) get removed + * use rx buffer in place of tx if tx buffer was not provided + * use temp rx buffer (preallocated or realloc to fit) for rx dma + */ + if (t->tx_buf) { + if (t->tx_dma == 0) { /* if DMA_ADDR_INVALID, map it */ + dma_tx_addr = dma_map_single(hw->dev, + (void *)t->tx_buf, + t->len, DMA_TO_DEVICE); + if (dma_mapping_error(hw->dev, dma_tx_addr)) + dev_err(hw->dev, "tx dma map error\n"); + } + } + + if (t->rx_buf) { + if (t->rx_dma == 0) { /* if DMA_ADDR_INVALID, map it */ + dma_rx_addr = dma_map_single(hw->dev, + (void *)t->rx_buf, + t->len, DMA_FROM_DEVICE); + if (dma_mapping_error(hw->dev, dma_rx_addr)) + dev_err(hw->dev, "rx dma map error\n"); + } + } else { + if (t->len > hw->dma_rx_tmpbuf_size) { + int ret; + + au1550_spi_dma_rxtmp_free(hw); + ret = au1550_spi_dma_rxtmp_alloc(hw, max(t->len, + AU1550_SPI_DMA_RXTMP_MINSIZE)); + if (ret < 0) + return ret; + } + hw->rx = hw->dma_rx_tmpbuf; + dma_rx_addr = hw->dma_rx_tmpbuf_addr; + dma_sync_single_for_device(hw->dev, dma_rx_addr, + t->len, DMA_FROM_DEVICE); + } + + if (!t->tx_buf) { + dma_sync_single_for_device(hw->dev, dma_rx_addr, + t->len, DMA_BIDIRECTIONAL); + hw->tx = hw->rx; + } + + /* put buffers on the ring */ + res = au1xxx_dbdma_put_dest(hw->dma_rx_ch, virt_to_phys(hw->rx), + t->len, DDMA_FLAGS_IE); + if (!res) + dev_err(hw->dev, "rx dma put dest error\n"); + + res = au1xxx_dbdma_put_source(hw->dma_tx_ch, virt_to_phys(hw->tx), + t->len, DDMA_FLAGS_IE); + if (!res) + dev_err(hw->dev, "tx dma put source error\n"); + + au1xxx_dbdma_start(hw->dma_rx_ch); + au1xxx_dbdma_start(hw->dma_tx_ch); + + /* by default enable nearly all events interrupt */ + hw->regs->psc_spimsk = PSC_SPIMSK_SD; + au_sync(); + + /* start the transfer */ + hw->regs->psc_spipcr = PSC_SPIPCR_MS; + au_sync(); + + wait_for_completion(&hw->master_done); + + au1xxx_dbdma_stop(hw->dma_tx_ch); + au1xxx_dbdma_stop(hw->dma_rx_ch); + + if (!t->rx_buf) { + /* using the temporal preallocated and premapped buffer */ + dma_sync_single_for_cpu(hw->dev, dma_rx_addr, t->len, + DMA_FROM_DEVICE); + } + /* unmap buffers if mapped above */ + if (t->rx_buf && t->rx_dma == 0 ) + dma_unmap_single(hw->dev, dma_rx_addr, t->len, + DMA_FROM_DEVICE); + if (t->tx_buf && t->tx_dma == 0 ) + dma_unmap_single(hw->dev, dma_tx_addr, t->len, + DMA_TO_DEVICE); + + return hw->rx_count < hw->tx_count ? hw->rx_count : hw->tx_count; +} + +static irqreturn_t au1550_spi_dma_irq_callback(struct au1550_spi *hw) +{ + u32 stat, evnt; + + stat = hw->regs->psc_spistat; + evnt = hw->regs->psc_spievent; + au_sync(); + if ((stat & PSC_SPISTAT_DI) == 0) { + dev_err(hw->dev, "Unexpected IRQ!\n"); + return IRQ_NONE; + } + + if ((evnt & (PSC_SPIEVNT_MM | PSC_SPIEVNT_RO + | PSC_SPIEVNT_RU | PSC_SPIEVNT_TO + | PSC_SPIEVNT_TU | PSC_SPIEVNT_SD)) + != 0) { + /* + * due to an spi error we consider transfer as done, + * so mask all events until before next transfer start + * and stop the possibly running dma immediatelly + */ + au1550_spi_mask_ack_all(hw); + au1xxx_dbdma_stop(hw->dma_rx_ch); + au1xxx_dbdma_stop(hw->dma_tx_ch); + + /* get number of transferred bytes */ + hw->rx_count = hw->len - au1xxx_get_dma_residue(hw->dma_rx_ch); + hw->tx_count = hw->len - au1xxx_get_dma_residue(hw->dma_tx_ch); + + au1xxx_dbdma_reset(hw->dma_rx_ch); + au1xxx_dbdma_reset(hw->dma_tx_ch); + au1550_spi_reset_fifos(hw); + + if (evnt == PSC_SPIEVNT_RO) + dev_err(hw->dev, + "dma transfer: receive FIFO overflow!\n"); + else + dev_err(hw->dev, + "dma transfer: unexpected SPI error " + "(event=0x%x stat=0x%x)!\n", evnt, stat); + + complete(&hw->master_done); + return IRQ_HANDLED; + } + + if ((evnt & PSC_SPIEVNT_MD) != 0) { + /* transfer completed successfully */ + au1550_spi_mask_ack_all(hw); + hw->rx_count = hw->len; + hw->tx_count = hw->len; + complete(&hw->master_done); + } + return IRQ_HANDLED; +} + + +/* routines to handle different word sizes in pio mode */ +#define AU1550_SPI_RX_WORD(size, mask) \ +static void au1550_spi_rx_word_##size(struct au1550_spi *hw) \ +{ \ + u32 fifoword = hw->regs->psc_spitxrx & (u32)(mask); \ + au_sync(); \ + if (hw->rx) { \ + *(u##size *)hw->rx = (u##size)fifoword; \ + hw->rx += (size) / 8; \ + } \ + hw->rx_count += (size) / 8; \ +} + +#define AU1550_SPI_TX_WORD(size, mask) \ +static void au1550_spi_tx_word_##size(struct au1550_spi *hw) \ +{ \ + u32 fifoword = 0; \ + if (hw->tx) { \ + fifoword = *(u##size *)hw->tx & (u32)(mask); \ + hw->tx += (size) / 8; \ + } \ + hw->tx_count += (size) / 8; \ + if (hw->tx_count >= hw->len) \ + fifoword |= PSC_SPITXRX_LC; \ + hw->regs->psc_spitxrx = fifoword; \ + au_sync(); \ +} + +AU1550_SPI_RX_WORD(8,0xff) +AU1550_SPI_RX_WORD(16,0xffff) +AU1550_SPI_RX_WORD(32,0xffffff) +AU1550_SPI_TX_WORD(8,0xff) +AU1550_SPI_TX_WORD(16,0xffff) +AU1550_SPI_TX_WORD(32,0xffffff) + +static int au1550_spi_pio_txrxb(struct spi_device *spi, struct spi_transfer *t) +{ + u32 stat, mask; + struct au1550_spi *hw = spi_master_get_devdata(spi->master); + + hw->tx = t->tx_buf; + hw->rx = t->rx_buf; + hw->len = t->len; + hw->tx_count = 0; + hw->rx_count = 0; + + /* by default enable nearly all events after filling tx fifo */ + mask = PSC_SPIMSK_SD; + + /* fill the transmit FIFO */ + while (hw->tx_count < hw->len) { + + hw->tx_word(hw); + + if (hw->tx_count >= hw->len) { + /* mask tx fifo request interrupt as we are done */ + mask |= PSC_SPIMSK_TR; + } + + stat = hw->regs->psc_spistat; + au_sync(); + if (stat & PSC_SPISTAT_TF) + break; + } + + /* enable event interrupts */ + hw->regs->psc_spimsk = mask; + au_sync(); + + /* start the transfer */ + hw->regs->psc_spipcr = PSC_SPIPCR_MS; + au_sync(); + + wait_for_completion(&hw->master_done); + + return hw->rx_count < hw->tx_count ? hw->rx_count : hw->tx_count; +} + +static irqreturn_t au1550_spi_pio_irq_callback(struct au1550_spi *hw) +{ + int busy; + u32 stat, evnt; + + stat = hw->regs->psc_spistat; + evnt = hw->regs->psc_spievent; + au_sync(); + if ((stat & PSC_SPISTAT_DI) == 0) { + dev_err(hw->dev, "Unexpected IRQ!\n"); + return IRQ_NONE; + } + + if ((evnt & (PSC_SPIEVNT_MM | PSC_SPIEVNT_RO + | PSC_SPIEVNT_RU | PSC_SPIEVNT_TO + | PSC_SPIEVNT_SD)) + != 0) { + /* + * due to an error we consider transfer as done, + * so mask all events until before next transfer start + */ + au1550_spi_mask_ack_all(hw); + au1550_spi_reset_fifos(hw); + dev_err(hw->dev, + "pio transfer: unexpected SPI error " + "(event=0x%x stat=0x%x)!\n", evnt, stat); + complete(&hw->master_done); + return IRQ_HANDLED; + } + + /* + * while there is something to read from rx fifo + * or there is a space to write to tx fifo: + */ + do { + busy = 0; + stat = hw->regs->psc_spistat; + au_sync(); + + /* + * Take care to not let the Rx FIFO overflow. + * + * We only write a byte if we have read one at least. Initially, + * the write fifo is full, so we should read from the read fifo + * first. + * In case we miss a word from the read fifo, we should get a + * RO event and should back out. + */ + if (!(stat & PSC_SPISTAT_RE) && hw->rx_count < hw->len) { + hw->rx_word(hw); + busy = 1; + + if (!(stat & PSC_SPISTAT_TF) && hw->tx_count < hw->len) + hw->tx_word(hw); + } + } while (busy); + + hw->regs->psc_spievent = PSC_SPIEVNT_RR | PSC_SPIEVNT_TR; + au_sync(); + + /* + * Restart the SPI transmission in case of a transmit underflow. + * This seems to work despite the notes in the Au1550 data book + * of Figure 8-4 with flowchart for SPI master operation: + * + * """Note 1: An XFR Error Interrupt occurs, unless masked, + * for any of the following events: Tx FIFO Underflow, + * Rx FIFO Overflow, or Multiple-master Error + * Note 2: In case of a Tx Underflow Error, all zeroes are + * transmitted.""" + * + * By simply restarting the spi transfer on Tx Underflow Error, + * we assume that spi transfer was paused instead of zeroes + * transmittion mentioned in the Note 2 of Au1550 data book. + */ + if (evnt & PSC_SPIEVNT_TU) { + hw->regs->psc_spievent = PSC_SPIEVNT_TU | PSC_SPIEVNT_MD; + au_sync(); + hw->regs->psc_spipcr = PSC_SPIPCR_MS; + au_sync(); + } + + if (hw->rx_count >= hw->len) { + /* transfer completed successfully */ + au1550_spi_mask_ack_all(hw); + complete(&hw->master_done); + } + return IRQ_HANDLED; +} + +static int au1550_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t) +{ + struct au1550_spi *hw = spi_master_get_devdata(spi->master); + return hw->txrx_bufs(spi, t); +} + +static irqreturn_t au1550_spi_irq(int irq, void *dev) +{ + struct au1550_spi *hw = dev; + return hw->irq_callback(hw); +} + +static void au1550_spi_bits_handlers_set(struct au1550_spi *hw, int bpw) +{ + if (bpw <= 8) { + if (hw->usedma) { + hw->txrx_bufs = &au1550_spi_dma_txrxb; + hw->irq_callback = &au1550_spi_dma_irq_callback; + } else { + hw->rx_word = &au1550_spi_rx_word_8; + hw->tx_word = &au1550_spi_tx_word_8; + hw->txrx_bufs = &au1550_spi_pio_txrxb; + hw->irq_callback = &au1550_spi_pio_irq_callback; + } + } else if (bpw <= 16) { + hw->rx_word = &au1550_spi_rx_word_16; + hw->tx_word = &au1550_spi_tx_word_16; + hw->txrx_bufs = &au1550_spi_pio_txrxb; + hw->irq_callback = &au1550_spi_pio_irq_callback; + } else { + hw->rx_word = &au1550_spi_rx_word_32; + hw->tx_word = &au1550_spi_tx_word_32; + hw->txrx_bufs = &au1550_spi_pio_txrxb; + hw->irq_callback = &au1550_spi_pio_irq_callback; + } +} + +static void __init au1550_spi_setup_psc_as_spi(struct au1550_spi *hw) +{ + u32 stat, cfg; + + /* set up the PSC for SPI mode */ + hw->regs->psc_ctrl = PSC_CTRL_DISABLE; + au_sync(); + hw->regs->psc_sel = PSC_SEL_PS_SPIMODE; + au_sync(); + + hw->regs->psc_spicfg = 0; + au_sync(); + + hw->regs->psc_ctrl = PSC_CTRL_ENABLE; + au_sync(); + + do { + stat = hw->regs->psc_spistat; + au_sync(); + } while ((stat & PSC_SPISTAT_SR) == 0); + + + cfg = hw->usedma ? 0 : PSC_SPICFG_DD_DISABLE; + cfg |= PSC_SPICFG_SET_LEN(8); + cfg |= PSC_SPICFG_RT_FIFO8 | PSC_SPICFG_TT_FIFO8; + /* use minimal allowed brg and div values as initial setting: */ + cfg |= PSC_SPICFG_SET_BAUD(4) | PSC_SPICFG_SET_DIV(0); + +#ifdef AU1550_SPI_DEBUG_LOOPBACK + cfg |= PSC_SPICFG_LB; +#endif + + hw->regs->psc_spicfg = cfg; + au_sync(); + + au1550_spi_mask_ack_all(hw); + + hw->regs->psc_spicfg |= PSC_SPICFG_DE_ENABLE; + au_sync(); + + do { + stat = hw->regs->psc_spistat; + au_sync(); + } while ((stat & PSC_SPISTAT_DR) == 0); + + au1550_spi_reset_fifos(hw); +} + + +static int __init au1550_spi_probe(struct platform_device *pdev) +{ + struct au1550_spi *hw; + struct spi_master *master; + struct resource *r; + int err = 0; + + master = spi_alloc_master(&pdev->dev, sizeof(struct au1550_spi)); + if (master == NULL) { + dev_err(&pdev->dev, "No memory for spi_master\n"); + err = -ENOMEM; + goto err_nomem; + } + + /* the spi->mode bits understood by this driver: */ + master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST; + + hw = spi_master_get_devdata(master); + + hw->master = spi_master_get(master); + hw->pdata = pdev->dev.platform_data; + hw->dev = &pdev->dev; + + if (hw->pdata == NULL) { + dev_err(&pdev->dev, "No platform data supplied\n"); + err = -ENOENT; + goto err_no_pdata; + } + + r = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!r) { + dev_err(&pdev->dev, "no IRQ\n"); + err = -ENODEV; + goto err_no_iores; + } + hw->irq = r->start; + + hw->usedma = 0; + r = platform_get_resource(pdev, IORESOURCE_DMA, 0); + if (r) { + hw->dma_tx_id = r->start; + r = platform_get_resource(pdev, IORESOURCE_DMA, 1); + if (r) { + hw->dma_rx_id = r->start; + if (usedma && ddma_memid) { + if (pdev->dev.dma_mask == NULL) + dev_warn(&pdev->dev, "no dma mask\n"); + else + hw->usedma = 1; + } + } + } + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!r) { + dev_err(&pdev->dev, "no mmio resource\n"); + err = -ENODEV; + goto err_no_iores; + } + + hw->ioarea = request_mem_region(r->start, sizeof(psc_spi_t), + pdev->name); + if (!hw->ioarea) { + dev_err(&pdev->dev, "Cannot reserve iomem region\n"); + err = -ENXIO; + goto err_no_iores; + } + + hw->regs = (psc_spi_t __iomem *)ioremap(r->start, sizeof(psc_spi_t)); + if (!hw->regs) { + dev_err(&pdev->dev, "cannot ioremap\n"); + err = -ENXIO; + goto err_ioremap; + } + + platform_set_drvdata(pdev, hw); + + init_completion(&hw->master_done); + + hw->bitbang.master = hw->master; + hw->bitbang.setup_transfer = au1550_spi_setupxfer; + hw->bitbang.chipselect = au1550_spi_chipsel; + hw->bitbang.master->setup = au1550_spi_setup; + hw->bitbang.txrx_bufs = au1550_spi_txrx_bufs; + + if (hw->usedma) { + hw->dma_tx_ch = au1xxx_dbdma_chan_alloc(ddma_memid, + hw->dma_tx_id, NULL, (void *)hw); + if (hw->dma_tx_ch == 0) { + dev_err(&pdev->dev, + "Cannot allocate tx dma channel\n"); + err = -ENXIO; + goto err_no_txdma; + } + au1xxx_dbdma_set_devwidth(hw->dma_tx_ch, 8); + if (au1xxx_dbdma_ring_alloc(hw->dma_tx_ch, + AU1550_SPI_DBDMA_DESCRIPTORS) == 0) { + dev_err(&pdev->dev, + "Cannot allocate tx dma descriptors\n"); + err = -ENXIO; + goto err_no_txdma_descr; + } + + + hw->dma_rx_ch = au1xxx_dbdma_chan_alloc(hw->dma_rx_id, + ddma_memid, NULL, (void *)hw); + if (hw->dma_rx_ch == 0) { + dev_err(&pdev->dev, + "Cannot allocate rx dma channel\n"); + err = -ENXIO; + goto err_no_rxdma; + } + au1xxx_dbdma_set_devwidth(hw->dma_rx_ch, 8); + if (au1xxx_dbdma_ring_alloc(hw->dma_rx_ch, + AU1550_SPI_DBDMA_DESCRIPTORS) == 0) { + dev_err(&pdev->dev, + "Cannot allocate rx dma descriptors\n"); + err = -ENXIO; + goto err_no_rxdma_descr; + } + + err = au1550_spi_dma_rxtmp_alloc(hw, + AU1550_SPI_DMA_RXTMP_MINSIZE); + if (err < 0) { + dev_err(&pdev->dev, + "Cannot allocate initial rx dma tmp buffer\n"); + goto err_dma_rxtmp_alloc; + } + } + + au1550_spi_bits_handlers_set(hw, 8); + + err = request_irq(hw->irq, au1550_spi_irq, 0, pdev->name, hw); + if (err) { + dev_err(&pdev->dev, "Cannot claim IRQ\n"); + goto err_no_irq; + } + + master->bus_num = pdev->id; + master->num_chipselect = hw->pdata->num_chipselect; + + /* + * precompute valid range for spi freq - from au1550 datasheet: + * psc_tempclk = psc_mainclk / (2 << DIV) + * spiclk = psc_tempclk / (2 * (BRG + 1)) + * BRG valid range is 4..63 + * DIV valid range is 0..3 + * round the min and max frequencies to values that would still + * produce valid brg and div + */ + { + int min_div = (2 << 0) * (2 * (4 + 1)); + int max_div = (2 << 3) * (2 * (63 + 1)); + hw->freq_max = hw->pdata->mainclk_hz / min_div; + hw->freq_min = hw->pdata->mainclk_hz / (max_div + 1) + 1; + } + + au1550_spi_setup_psc_as_spi(hw); + + err = spi_bitbang_start(&hw->bitbang); + if (err) { + dev_err(&pdev->dev, "Failed to register SPI master\n"); + goto err_register; + } + + dev_info(&pdev->dev, + "spi master registered: bus_num=%d num_chipselect=%d\n", + master->bus_num, master->num_chipselect); + + return 0; + +err_register: + free_irq(hw->irq, hw); + +err_no_irq: + au1550_spi_dma_rxtmp_free(hw); + +err_dma_rxtmp_alloc: +err_no_rxdma_descr: + if (hw->usedma) + au1xxx_dbdma_chan_free(hw->dma_rx_ch); + +err_no_rxdma: +err_no_txdma_descr: + if (hw->usedma) + au1xxx_dbdma_chan_free(hw->dma_tx_ch); + +err_no_txdma: + iounmap((void __iomem *)hw->regs); + +err_ioremap: + release_resource(hw->ioarea); + kfree(hw->ioarea); + +err_no_iores: +err_no_pdata: + spi_master_put(hw->master); + +err_nomem: + return err; +} + +static int __exit au1550_spi_remove(struct platform_device *pdev) +{ + struct au1550_spi *hw = platform_get_drvdata(pdev); + + dev_info(&pdev->dev, "spi master remove: bus_num=%d\n", + hw->master->bus_num); + + spi_bitbang_stop(&hw->bitbang); + free_irq(hw->irq, hw); + iounmap((void __iomem *)hw->regs); + release_resource(hw->ioarea); + kfree(hw->ioarea); + + if (hw->usedma) { + au1550_spi_dma_rxtmp_free(hw); + au1xxx_dbdma_chan_free(hw->dma_rx_ch); + au1xxx_dbdma_chan_free(hw->dma_tx_ch); + } + + platform_set_drvdata(pdev, NULL); + + spi_master_put(hw->master); + return 0; +} + +/* work with hotplug and coldplug */ +MODULE_ALIAS("platform:au1550-spi"); + +static struct platform_driver au1550_spi_drv = { + .remove = __exit_p(au1550_spi_remove), + .driver = { + .name = "au1550-spi", + .owner = THIS_MODULE, + }, +}; + +static int __init au1550_spi_init(void) +{ + /* + * create memory device with 8 bits dev_devwidth + * needed for proper byte ordering to spi fifo + */ + if (usedma) { + ddma_memid = au1xxx_ddma_add_device(&au1550_spi_mem_dbdev); + if (!ddma_memid) + printk(KERN_ERR "au1550-spi: cannot add memory" + "dbdma device\n"); + } + return platform_driver_probe(&au1550_spi_drv, au1550_spi_probe); +} +module_init(au1550_spi_init); + +static void __exit au1550_spi_exit(void) +{ + if (usedma && ddma_memid) + au1xxx_ddma_del_device(ddma_memid); + platform_driver_unregister(&au1550_spi_drv); +} +module_exit(au1550_spi_exit); + +MODULE_DESCRIPTION("Au1550 PSC SPI Driver"); +MODULE_AUTHOR("Jan Nikitenko "); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-bfin-sport.c b/drivers/spi/spi-bfin-sport.c new file mode 100644 index 0000000..e557ff6 --- /dev/null +++ b/drivers/spi/spi-bfin-sport.c @@ -0,0 +1,952 @@ +/* + * SPI bus via the Blackfin SPORT peripheral + * + * Enter bugs at http://blackfin.uclinux.org/ + * + * Copyright 2009-2011 Analog Devices Inc. + * + * Licensed under the GPL-2 or later. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#define DRV_NAME "bfin-sport-spi" +#define DRV_DESC "SPI bus via the Blackfin SPORT" + +MODULE_AUTHOR("Cliff Cai"); +MODULE_DESCRIPTION(DRV_DESC); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:bfin-sport-spi"); + +enum bfin_sport_spi_state { + START_STATE, + RUNNING_STATE, + DONE_STATE, + ERROR_STATE, +}; + +struct bfin_sport_spi_master_data; + +struct bfin_sport_transfer_ops { + void (*write) (struct bfin_sport_spi_master_data *); + void (*read) (struct bfin_sport_spi_master_data *); + void (*duplex) (struct bfin_sport_spi_master_data *); +}; + +struct bfin_sport_spi_master_data { + /* Driver model hookup */ + struct device *dev; + + /* SPI framework hookup */ + struct spi_master *master; + + /* Regs base of SPI controller */ + struct sport_register __iomem *regs; + int err_irq; + + /* Pin request list */ + u16 *pin_req; + + /* Driver message queue */ + struct workqueue_struct *workqueue; + struct work_struct pump_messages; + spinlock_t lock; + struct list_head queue; + int busy; + bool run; + + /* Message Transfer pump */ + struct tasklet_struct pump_transfers; + + /* Current message transfer state info */ + enum bfin_sport_spi_state state; + struct spi_message *cur_msg; + struct spi_transfer *cur_transfer; + struct bfin_sport_spi_slave_data *cur_chip; + union { + void *tx; + u8 *tx8; + u16 *tx16; + }; + void *tx_end; + union { + void *rx; + u8 *rx8; + u16 *rx16; + }; + void *rx_end; + + int cs_change; + struct bfin_sport_transfer_ops *ops; +}; + +struct bfin_sport_spi_slave_data { + u16 ctl_reg; + u16 baud; + u16 cs_chg_udelay; /* Some devices require > 255usec delay */ + u32 cs_gpio; + u16 idle_tx_val; + struct bfin_sport_transfer_ops *ops; +}; + +static void +bfin_sport_spi_enable(struct bfin_sport_spi_master_data *drv_data) +{ + bfin_write_or(&drv_data->regs->tcr1, TSPEN); + bfin_write_or(&drv_data->regs->rcr1, TSPEN); + SSYNC(); +} + +static void +bfin_sport_spi_disable(struct bfin_sport_spi_master_data *drv_data) +{ + bfin_write_and(&drv_data->regs->tcr1, ~TSPEN); + bfin_write_and(&drv_data->regs->rcr1, ~TSPEN); + SSYNC(); +} + +/* Caculate the SPI_BAUD register value based on input HZ */ +static u16 +bfin_sport_hz_to_spi_baud(u32 speed_hz) +{ + u_long clk, sclk = get_sclk(); + int div = (sclk / (2 * speed_hz)) - 1; + + if (div < 0) + div = 0; + + clk = sclk / (2 * (div + 1)); + + if (clk > speed_hz) + div++; + + return div; +} + +/* Chip select operation functions for cs_change flag */ +static void +bfin_sport_spi_cs_active(struct bfin_sport_spi_slave_data *chip) +{ + gpio_direction_output(chip->cs_gpio, 0); +} + +static void +bfin_sport_spi_cs_deactive(struct bfin_sport_spi_slave_data *chip) +{ + gpio_direction_output(chip->cs_gpio, 1); + /* Move delay here for consistency */ + if (chip->cs_chg_udelay) + udelay(chip->cs_chg_udelay); +} + +static void +bfin_sport_spi_stat_poll_complete(struct bfin_sport_spi_master_data *drv_data) +{ + unsigned long timeout = jiffies + HZ; + while (!(bfin_read(&drv_data->regs->stat) & RXNE)) { + if (!time_before(jiffies, timeout)) + break; + } +} + +static void +bfin_sport_spi_u8_writer(struct bfin_sport_spi_master_data *drv_data) +{ + u16 dummy; + + while (drv_data->tx < drv_data->tx_end) { + bfin_write(&drv_data->regs->tx16, *drv_data->tx8++); + bfin_sport_spi_stat_poll_complete(drv_data); + dummy = bfin_read(&drv_data->regs->rx16); + } +} + +static void +bfin_sport_spi_u8_reader(struct bfin_sport_spi_master_data *drv_data) +{ + u16 tx_val = drv_data->cur_chip->idle_tx_val; + + while (drv_data->rx < drv_data->rx_end) { + bfin_write(&drv_data->regs->tx16, tx_val); + bfin_sport_spi_stat_poll_complete(drv_data); + *drv_data->rx8++ = bfin_read(&drv_data->regs->rx16); + } +} + +static void +bfin_sport_spi_u8_duplex(struct bfin_sport_spi_master_data *drv_data) +{ + while (drv_data->rx < drv_data->rx_end) { + bfin_write(&drv_data->regs->tx16, *drv_data->tx8++); + bfin_sport_spi_stat_poll_complete(drv_data); + *drv_data->rx8++ = bfin_read(&drv_data->regs->rx16); + } +} + +static struct bfin_sport_transfer_ops bfin_sport_transfer_ops_u8 = { + .write = bfin_sport_spi_u8_writer, + .read = bfin_sport_spi_u8_reader, + .duplex = bfin_sport_spi_u8_duplex, +}; + +static void +bfin_sport_spi_u16_writer(struct bfin_sport_spi_master_data *drv_data) +{ + u16 dummy; + + while (drv_data->tx < drv_data->tx_end) { + bfin_write(&drv_data->regs->tx16, *drv_data->tx16++); + bfin_sport_spi_stat_poll_complete(drv_data); + dummy = bfin_read(&drv_data->regs->rx16); + } +} + +static void +bfin_sport_spi_u16_reader(struct bfin_sport_spi_master_data *drv_data) +{ + u16 tx_val = drv_data->cur_chip->idle_tx_val; + + while (drv_data->rx < drv_data->rx_end) { + bfin_write(&drv_data->regs->tx16, tx_val); + bfin_sport_spi_stat_poll_complete(drv_data); + *drv_data->rx16++ = bfin_read(&drv_data->regs->rx16); + } +} + +static void +bfin_sport_spi_u16_duplex(struct bfin_sport_spi_master_data *drv_data) +{ + while (drv_data->rx < drv_data->rx_end) { + bfin_write(&drv_data->regs->tx16, *drv_data->tx16++); + bfin_sport_spi_stat_poll_complete(drv_data); + *drv_data->rx16++ = bfin_read(&drv_data->regs->rx16); + } +} + +static struct bfin_sport_transfer_ops bfin_sport_transfer_ops_u16 = { + .write = bfin_sport_spi_u16_writer, + .read = bfin_sport_spi_u16_reader, + .duplex = bfin_sport_spi_u16_duplex, +}; + +/* stop controller and re-config current chip */ +static void +bfin_sport_spi_restore_state(struct bfin_sport_spi_master_data *drv_data) +{ + struct bfin_sport_spi_slave_data *chip = drv_data->cur_chip; + unsigned int bits = (drv_data->ops == &bfin_sport_transfer_ops_u8 ? 7 : 15); + + bfin_sport_spi_disable(drv_data); + dev_dbg(drv_data->dev, "restoring spi ctl state\n"); + + bfin_write(&drv_data->regs->tcr1, chip->ctl_reg); + bfin_write(&drv_data->regs->tcr2, bits); + bfin_write(&drv_data->regs->tclkdiv, chip->baud); + bfin_write(&drv_data->regs->tfsdiv, bits); + SSYNC(); + + bfin_write(&drv_data->regs->rcr1, chip->ctl_reg & ~(ITCLK | ITFS)); + bfin_write(&drv_data->regs->rcr2, bits); + SSYNC(); + + bfin_sport_spi_cs_active(chip); +} + +/* test if there is more transfer to be done */ +static enum bfin_sport_spi_state +bfin_sport_spi_next_transfer(struct bfin_sport_spi_master_data *drv_data) +{ + struct spi_message *msg = drv_data->cur_msg; + struct spi_transfer *trans = drv_data->cur_transfer; + + /* Move to next transfer */ + if (trans->transfer_list.next != &msg->transfers) { + drv_data->cur_transfer = + list_entry(trans->transfer_list.next, + struct spi_transfer, transfer_list); + return RUNNING_STATE; + } + + return DONE_STATE; +} + +/* + * caller already set message->status; + * dma and pio irqs are blocked give finished message back + */ +static void +bfin_sport_spi_giveback(struct bfin_sport_spi_master_data *drv_data) +{ + struct bfin_sport_spi_slave_data *chip = drv_data->cur_chip; + unsigned long flags; + struct spi_message *msg; + + spin_lock_irqsave(&drv_data->lock, flags); + msg = drv_data->cur_msg; + drv_data->state = START_STATE; + drv_data->cur_msg = NULL; + drv_data->cur_transfer = NULL; + drv_data->cur_chip = NULL; + queue_work(drv_data->workqueue, &drv_data->pump_messages); + spin_unlock_irqrestore(&drv_data->lock, flags); + + if (!drv_data->cs_change) + bfin_sport_spi_cs_deactive(chip); + + if (msg->complete) + msg->complete(msg->context); +} + +static irqreturn_t +sport_err_handler(int irq, void *dev_id) +{ + struct bfin_sport_spi_master_data *drv_data = dev_id; + u16 status; + + dev_dbg(drv_data->dev, "%s enter\n", __func__); + status = bfin_read(&drv_data->regs->stat) & (TOVF | TUVF | ROVF | RUVF); + + if (status) { + bfin_write(&drv_data->regs->stat, status); + SSYNC(); + + bfin_sport_spi_disable(drv_data); + dev_err(drv_data->dev, "status error:%s%s%s%s\n", + status & TOVF ? " TOVF" : "", + status & TUVF ? " TUVF" : "", + status & ROVF ? " ROVF" : "", + status & RUVF ? " RUVF" : ""); + } + + return IRQ_HANDLED; +} + +static void +bfin_sport_spi_pump_transfers(unsigned long data) +{ + struct bfin_sport_spi_master_data *drv_data = (void *)data; + struct spi_message *message = NULL; + struct spi_transfer *transfer = NULL; + struct spi_transfer *previous = NULL; + struct bfin_sport_spi_slave_data *chip = NULL; + unsigned int bits_per_word; + u32 tranf_success = 1; + u32 transfer_speed; + u8 full_duplex = 0; + + /* Get current state information */ + message = drv_data->cur_msg; + transfer = drv_data->cur_transfer; + chip = drv_data->cur_chip; + + if (transfer->speed_hz) + transfer_speed = bfin_sport_hz_to_spi_baud(transfer->speed_hz); + else + transfer_speed = chip->baud; + bfin_write(&drv_data->regs->tclkdiv, transfer_speed); + SSYNC(); + + /* + * if msg is error or done, report it back using complete() callback + */ + + /* Handle for abort */ + if (drv_data->state == ERROR_STATE) { + dev_dbg(drv_data->dev, "transfer: we've hit an error\n"); + message->status = -EIO; + bfin_sport_spi_giveback(drv_data); + return; + } + + /* Handle end of message */ + if (drv_data->state == DONE_STATE) { + dev_dbg(drv_data->dev, "transfer: all done!\n"); + message->status = 0; + bfin_sport_spi_giveback(drv_data); + return; + } + + /* Delay if requested at end of transfer */ + if (drv_data->state == RUNNING_STATE) { + dev_dbg(drv_data->dev, "transfer: still running ...\n"); + previous = list_entry(transfer->transfer_list.prev, + struct spi_transfer, transfer_list); + if (previous->delay_usecs) + udelay(previous->delay_usecs); + } + + if (transfer->len == 0) { + /* Move to next transfer of this msg */ + drv_data->state = bfin_sport_spi_next_transfer(drv_data); + /* Schedule next transfer tasklet */ + tasklet_schedule(&drv_data->pump_transfers); + } + + if (transfer->tx_buf != NULL) { + drv_data->tx = (void *)transfer->tx_buf; + drv_data->tx_end = drv_data->tx + transfer->len; + dev_dbg(drv_data->dev, "tx_buf is %p, tx_end is %p\n", + transfer->tx_buf, drv_data->tx_end); + } else + drv_data->tx = NULL; + + if (transfer->rx_buf != NULL) { + full_duplex = transfer->tx_buf != NULL; + drv_data->rx = transfer->rx_buf; + drv_data->rx_end = drv_data->rx + transfer->len; + dev_dbg(drv_data->dev, "rx_buf is %p, rx_end is %p\n", + transfer->rx_buf, drv_data->rx_end); + } else + drv_data->rx = NULL; + + drv_data->cs_change = transfer->cs_change; + + /* Bits per word setup */ + bits_per_word = transfer->bits_per_word ? : message->spi->bits_per_word; + if (bits_per_word == 8) + drv_data->ops = &bfin_sport_transfer_ops_u8; + else + drv_data->ops = &bfin_sport_transfer_ops_u16; + + drv_data->state = RUNNING_STATE; + + if (drv_data->cs_change) + bfin_sport_spi_cs_active(chip); + + dev_dbg(drv_data->dev, + "now pumping a transfer: width is %d, len is %d\n", + bits_per_word, transfer->len); + + /* PIO mode write then read */ + dev_dbg(drv_data->dev, "doing IO transfer\n"); + + bfin_sport_spi_enable(drv_data); + if (full_duplex) { + /* full duplex mode */ + BUG_ON((drv_data->tx_end - drv_data->tx) != + (drv_data->rx_end - drv_data->rx)); + drv_data->ops->duplex(drv_data); + + if (drv_data->tx != drv_data->tx_end) + tranf_success = 0; + } else if (drv_data->tx != NULL) { + /* write only half duplex */ + + drv_data->ops->write(drv_data); + + if (drv_data->tx != drv_data->tx_end) + tranf_success = 0; + } else if (drv_data->rx != NULL) { + /* read only half duplex */ + + drv_data->ops->read(drv_data); + if (drv_data->rx != drv_data->rx_end) + tranf_success = 0; + } + bfin_sport_spi_disable(drv_data); + + if (!tranf_success) { + dev_dbg(drv_data->dev, "IO write error!\n"); + drv_data->state = ERROR_STATE; + } else { + /* Update total byte transfered */ + message->actual_length += transfer->len; + /* Move to next transfer of this msg */ + drv_data->state = bfin_sport_spi_next_transfer(drv_data); + if (drv_data->cs_change) + bfin_sport_spi_cs_deactive(chip); + } + + /* Schedule next transfer tasklet */ + tasklet_schedule(&drv_data->pump_transfers); +} + +/* pop a msg from queue and kick off real transfer */ +static void +bfin_sport_spi_pump_messages(struct work_struct *work) +{ + struct bfin_sport_spi_master_data *drv_data; + unsigned long flags; + struct spi_message *next_msg; + + drv_data = container_of(work, struct bfin_sport_spi_master_data, pump_messages); + + /* Lock queue and check for queue work */ + spin_lock_irqsave(&drv_data->lock, flags); + if (list_empty(&drv_data->queue) || !drv_data->run) { + /* pumper kicked off but no work to do */ + drv_data->busy = 0; + spin_unlock_irqrestore(&drv_data->lock, flags); + return; + } + + /* Make sure we are not already running a message */ + if (drv_data->cur_msg) { + spin_unlock_irqrestore(&drv_data->lock, flags); + return; + } + + /* Extract head of queue */ + next_msg = list_entry(drv_data->queue.next, + struct spi_message, queue); + + drv_data->cur_msg = next_msg; + + /* Setup the SSP using the per chip configuration */ + drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi); + + list_del_init(&drv_data->cur_msg->queue); + + /* Initialize message state */ + drv_data->cur_msg->state = START_STATE; + drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next, + struct spi_transfer, transfer_list); + bfin_sport_spi_restore_state(drv_data); + dev_dbg(drv_data->dev, "got a message to pump, " + "state is set to: baud %d, cs_gpio %i, ctl 0x%x\n", + drv_data->cur_chip->baud, drv_data->cur_chip->cs_gpio, + drv_data->cur_chip->ctl_reg); + + dev_dbg(drv_data->dev, + "the first transfer len is %d\n", + drv_data->cur_transfer->len); + + /* Mark as busy and launch transfers */ + tasklet_schedule(&drv_data->pump_transfers); + + drv_data->busy = 1; + spin_unlock_irqrestore(&drv_data->lock, flags); +} + +/* + * got a msg to transfer, queue it in drv_data->queue. + * And kick off message pumper + */ +static int +bfin_sport_spi_transfer(struct spi_device *spi, struct spi_message *msg) +{ + struct bfin_sport_spi_master_data *drv_data = spi_master_get_devdata(spi->master); + unsigned long flags; + + spin_lock_irqsave(&drv_data->lock, flags); + + if (!drv_data->run) { + spin_unlock_irqrestore(&drv_data->lock, flags); + return -ESHUTDOWN; + } + + msg->actual_length = 0; + msg->status = -EINPROGRESS; + msg->state = START_STATE; + + dev_dbg(&spi->dev, "adding an msg in transfer()\n"); + list_add_tail(&msg->queue, &drv_data->queue); + + if (drv_data->run && !drv_data->busy) + queue_work(drv_data->workqueue, &drv_data->pump_messages); + + spin_unlock_irqrestore(&drv_data->lock, flags); + + return 0; +} + +/* Called every time common spi devices change state */ +static int +bfin_sport_spi_setup(struct spi_device *spi) +{ + struct bfin_sport_spi_slave_data *chip, *first = NULL; + int ret; + + /* Only alloc (or use chip_info) on first setup */ + chip = spi_get_ctldata(spi); + if (chip == NULL) { + struct bfin5xx_spi_chip *chip_info; + + chip = first = kzalloc(sizeof(*chip), GFP_KERNEL); + if (!chip) + return -ENOMEM; + + /* platform chip_info isn't required */ + chip_info = spi->controller_data; + if (chip_info) { + /* + * DITFS and TDTYPE are only thing we don't set, but + * they probably shouldn't be changed by people. + */ + if (chip_info->ctl_reg || chip_info->enable_dma) { + ret = -EINVAL; + dev_err(&spi->dev, "don't set ctl_reg/enable_dma fields"); + goto error; + } + chip->cs_chg_udelay = chip_info->cs_chg_udelay; + chip->idle_tx_val = chip_info->idle_tx_val; + spi->bits_per_word = chip_info->bits_per_word; + } + } + + if (spi->bits_per_word != 8 && spi->bits_per_word != 16) { + ret = -EINVAL; + goto error; + } + + /* translate common spi framework into our register + * following configure contents are same for tx and rx. + */ + + if (spi->mode & SPI_CPHA) + chip->ctl_reg &= ~TCKFE; + else + chip->ctl_reg |= TCKFE; + + if (spi->mode & SPI_LSB_FIRST) + chip->ctl_reg |= TLSBIT; + else + chip->ctl_reg &= ~TLSBIT; + + /* Sport in master mode */ + chip->ctl_reg |= ITCLK | ITFS | TFSR | LATFS | LTFS; + + chip->baud = bfin_sport_hz_to_spi_baud(spi->max_speed_hz); + + chip->cs_gpio = spi->chip_select; + ret = gpio_request(chip->cs_gpio, spi->modalias); + if (ret) + goto error; + + dev_dbg(&spi->dev, "setup spi chip %s, width is %d\n", + spi->modalias, spi->bits_per_word); + dev_dbg(&spi->dev, "ctl_reg is 0x%x, GPIO is %i\n", + chip->ctl_reg, spi->chip_select); + + spi_set_ctldata(spi, chip); + + bfin_sport_spi_cs_deactive(chip); + + return ret; + + error: + kfree(first); + return ret; +} + +/* + * callback for spi framework. + * clean driver specific data + */ +static void +bfin_sport_spi_cleanup(struct spi_device *spi) +{ + struct bfin_sport_spi_slave_data *chip = spi_get_ctldata(spi); + + if (!chip) + return; + + gpio_free(chip->cs_gpio); + + kfree(chip); +} + +static int +bfin_sport_spi_init_queue(struct bfin_sport_spi_master_data *drv_data) +{ + INIT_LIST_HEAD(&drv_data->queue); + spin_lock_init(&drv_data->lock); + + drv_data->run = false; + drv_data->busy = 0; + + /* init transfer tasklet */ + tasklet_init(&drv_data->pump_transfers, + bfin_sport_spi_pump_transfers, (unsigned long)drv_data); + + /* init messages workqueue */ + INIT_WORK(&drv_data->pump_messages, bfin_sport_spi_pump_messages); + drv_data->workqueue = + create_singlethread_workqueue(dev_name(drv_data->master->dev.parent)); + if (drv_data->workqueue == NULL) + return -EBUSY; + + return 0; +} + +static int +bfin_sport_spi_start_queue(struct bfin_sport_spi_master_data *drv_data) +{ + unsigned long flags; + + spin_lock_irqsave(&drv_data->lock, flags); + + if (drv_data->run || drv_data->busy) { + spin_unlock_irqrestore(&drv_data->lock, flags); + return -EBUSY; + } + + drv_data->run = true; + drv_data->cur_msg = NULL; + drv_data->cur_transfer = NULL; + drv_data->cur_chip = NULL; + spin_unlock_irqrestore(&drv_data->lock, flags); + + queue_work(drv_data->workqueue, &drv_data->pump_messages); + + return 0; +} + +static inline int +bfin_sport_spi_stop_queue(struct bfin_sport_spi_master_data *drv_data) +{ + unsigned long flags; + unsigned limit = 500; + int status = 0; + + spin_lock_irqsave(&drv_data->lock, flags); + + /* + * This is a bit lame, but is optimized for the common execution path. + * A wait_queue on the drv_data->busy could be used, but then the common + * execution path (pump_messages) would be required to call wake_up or + * friends on every SPI message. Do this instead + */ + drv_data->run = false; + while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) { + spin_unlock_irqrestore(&drv_data->lock, flags); + msleep(10); + spin_lock_irqsave(&drv_data->lock, flags); + } + + if (!list_empty(&drv_data->queue) || drv_data->busy) + status = -EBUSY; + + spin_unlock_irqrestore(&drv_data->lock, flags); + + return status; +} + +static inline int +bfin_sport_spi_destroy_queue(struct bfin_sport_spi_master_data *drv_data) +{ + int status; + + status = bfin_sport_spi_stop_queue(drv_data); + if (status) + return status; + + destroy_workqueue(drv_data->workqueue); + + return 0; +} + +static int __devinit +bfin_sport_spi_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct bfin5xx_spi_master *platform_info; + struct spi_master *master; + struct resource *res, *ires; + struct bfin_sport_spi_master_data *drv_data; + int status; + + platform_info = dev->platform_data; + + /* Allocate master with space for drv_data */ + master = spi_alloc_master(dev, sizeof(*master) + 16); + if (!master) { + dev_err(dev, "cannot alloc spi_master\n"); + return -ENOMEM; + } + + drv_data = spi_master_get_devdata(master); + drv_data->master = master; + drv_data->dev = dev; + drv_data->pin_req = platform_info->pin_req; + + master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST; + master->bus_num = pdev->id; + master->num_chipselect = platform_info->num_chipselect; + master->cleanup = bfin_sport_spi_cleanup; + master->setup = bfin_sport_spi_setup; + master->transfer = bfin_sport_spi_transfer; + + /* Find and map our resources */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (res == NULL) { + dev_err(dev, "cannot get IORESOURCE_MEM\n"); + status = -ENOENT; + goto out_error_get_res; + } + + drv_data->regs = ioremap(res->start, resource_size(res)); + if (drv_data->regs == NULL) { + dev_err(dev, "cannot map registers\n"); + status = -ENXIO; + goto out_error_ioremap; + } + + ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!ires) { + dev_err(dev, "cannot get IORESOURCE_IRQ\n"); + status = -ENODEV; + goto out_error_get_ires; + } + drv_data->err_irq = ires->start; + + /* Initial and start queue */ + status = bfin_sport_spi_init_queue(drv_data); + if (status) { + dev_err(dev, "problem initializing queue\n"); + goto out_error_queue_alloc; + } + + status = bfin_sport_spi_start_queue(drv_data); + if (status) { + dev_err(dev, "problem starting queue\n"); + goto out_error_queue_alloc; + } + + status = request_irq(drv_data->err_irq, sport_err_handler, + 0, "sport_spi_err", drv_data); + if (status) { + dev_err(dev, "unable to request sport err irq\n"); + goto out_error_irq; + } + + status = peripheral_request_list(drv_data->pin_req, DRV_NAME); + if (status) { + dev_err(dev, "requesting peripherals failed\n"); + goto out_error_peripheral; + } + + /* Register with the SPI framework */ + platform_set_drvdata(pdev, drv_data); + status = spi_register_master(master); + if (status) { + dev_err(dev, "problem registering spi master\n"); + goto out_error_master; + } + + dev_info(dev, "%s, regs_base@%p\n", DRV_DESC, drv_data->regs); + return 0; + + out_error_master: + peripheral_free_list(drv_data->pin_req); + out_error_peripheral: + free_irq(drv_data->err_irq, drv_data); + out_error_irq: + out_error_queue_alloc: + bfin_sport_spi_destroy_queue(drv_data); + out_error_get_ires: + iounmap(drv_data->regs); + out_error_ioremap: + out_error_get_res: + spi_master_put(master); + + return status; +} + +/* stop hardware and remove the driver */ +static int __devexit +bfin_sport_spi_remove(struct platform_device *pdev) +{ + struct bfin_sport_spi_master_data *drv_data = platform_get_drvdata(pdev); + int status = 0; + + if (!drv_data) + return 0; + + /* Remove the queue */ + status = bfin_sport_spi_destroy_queue(drv_data); + if (status) + return status; + + /* Disable the SSP at the peripheral and SOC level */ + bfin_sport_spi_disable(drv_data); + + /* Disconnect from the SPI framework */ + spi_unregister_master(drv_data->master); + + peripheral_free_list(drv_data->pin_req); + + /* Prevent double remove */ + platform_set_drvdata(pdev, NULL); + + return 0; +} + +#ifdef CONFIG_PM +static int +bfin_sport_spi_suspend(struct platform_device *pdev, pm_message_t state) +{ + struct bfin_sport_spi_master_data *drv_data = platform_get_drvdata(pdev); + int status; + + status = bfin_sport_spi_stop_queue(drv_data); + if (status) + return status; + + /* stop hardware */ + bfin_sport_spi_disable(drv_data); + + return status; +} + +static int +bfin_sport_spi_resume(struct platform_device *pdev) +{ + struct bfin_sport_spi_master_data *drv_data = platform_get_drvdata(pdev); + int status; + + /* Enable the SPI interface */ + bfin_sport_spi_enable(drv_data); + + /* Start the queue running */ + status = bfin_sport_spi_start_queue(drv_data); + if (status) + dev_err(drv_data->dev, "problem resuming queue\n"); + + return status; +} +#else +# define bfin_sport_spi_suspend NULL +# define bfin_sport_spi_resume NULL +#endif + +static struct platform_driver bfin_sport_spi_driver = { + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + }, + .probe = bfin_sport_spi_probe, + .remove = __devexit_p(bfin_sport_spi_remove), + .suspend = bfin_sport_spi_suspend, + .resume = bfin_sport_spi_resume, +}; + +static int __init bfin_sport_spi_init(void) +{ + return platform_driver_register(&bfin_sport_spi_driver); +} +module_init(bfin_sport_spi_init); + +static void __exit bfin_sport_spi_exit(void) +{ + platform_driver_unregister(&bfin_sport_spi_driver); +} +module_exit(bfin_sport_spi_exit); diff --git a/drivers/spi/spi-bfin5xx.c b/drivers/spi/spi-bfin5xx.c new file mode 100644 index 0000000..f706dba --- /dev/null +++ b/drivers/spi/spi-bfin5xx.c @@ -0,0 +1,1530 @@ +/* + * Blackfin On-Chip SPI Driver + * + * Copyright 2004-2010 Analog Devices Inc. + * + * Enter bugs at http://blackfin.uclinux.org/ + * + * Licensed under the GPL-2 or later. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#define DRV_NAME "bfin-spi" +#define DRV_AUTHOR "Bryan Wu, Luke Yang" +#define DRV_DESC "Blackfin on-chip SPI Controller Driver" +#define DRV_VERSION "1.0" + +MODULE_AUTHOR(DRV_AUTHOR); +MODULE_DESCRIPTION(DRV_DESC); +MODULE_LICENSE("GPL"); + +#define START_STATE ((void *)0) +#define RUNNING_STATE ((void *)1) +#define DONE_STATE ((void *)2) +#define ERROR_STATE ((void *)-1) + +struct bfin_spi_master_data; + +struct bfin_spi_transfer_ops { + void (*write) (struct bfin_spi_master_data *); + void (*read) (struct bfin_spi_master_data *); + void (*duplex) (struct bfin_spi_master_data *); +}; + +struct bfin_spi_master_data { + /* Driver model hookup */ + struct platform_device *pdev; + + /* SPI framework hookup */ + struct spi_master *master; + + /* Regs base of SPI controller */ + void __iomem *regs_base; + + /* Pin request list */ + u16 *pin_req; + + /* BFIN hookup */ + struct bfin5xx_spi_master *master_info; + + /* Driver message queue */ + struct workqueue_struct *workqueue; + struct work_struct pump_messages; + spinlock_t lock; + struct list_head queue; + int busy; + bool running; + + /* Message Transfer pump */ + struct tasklet_struct pump_transfers; + + /* Current message transfer state info */ + struct spi_message *cur_msg; + struct spi_transfer *cur_transfer; + struct bfin_spi_slave_data *cur_chip; + size_t len_in_bytes; + size_t len; + void *tx; + void *tx_end; + void *rx; + void *rx_end; + + /* DMA stuffs */ + int dma_channel; + int dma_mapped; + int dma_requested; + dma_addr_t rx_dma; + dma_addr_t tx_dma; + + int irq_requested; + int spi_irq; + + size_t rx_map_len; + size_t tx_map_len; + u8 n_bytes; + u16 ctrl_reg; + u16 flag_reg; + + int cs_change; + const struct bfin_spi_transfer_ops *ops; +}; + +struct bfin_spi_slave_data { + u16 ctl_reg; + u16 baud; + u16 flag; + + u8 chip_select_num; + u8 enable_dma; + u16 cs_chg_udelay; /* Some devices require > 255usec delay */ + u32 cs_gpio; + u16 idle_tx_val; + u8 pio_interrupt; /* use spi data irq */ + const struct bfin_spi_transfer_ops *ops; +}; + +#define DEFINE_SPI_REG(reg, off) \ +static inline u16 read_##reg(struct bfin_spi_master_data *drv_data) \ + { return bfin_read16(drv_data->regs_base + off); } \ +static inline void write_##reg(struct bfin_spi_master_data *drv_data, u16 v) \ + { bfin_write16(drv_data->regs_base + off, v); } + +DEFINE_SPI_REG(CTRL, 0x00) +DEFINE_SPI_REG(FLAG, 0x04) +DEFINE_SPI_REG(STAT, 0x08) +DEFINE_SPI_REG(TDBR, 0x0C) +DEFINE_SPI_REG(RDBR, 0x10) +DEFINE_SPI_REG(BAUD, 0x14) +DEFINE_SPI_REG(SHAW, 0x18) + +static void bfin_spi_enable(struct bfin_spi_master_data *drv_data) +{ + u16 cr; + + cr = read_CTRL(drv_data); + write_CTRL(drv_data, (cr | BIT_CTL_ENABLE)); +} + +static void bfin_spi_disable(struct bfin_spi_master_data *drv_data) +{ + u16 cr; + + cr = read_CTRL(drv_data); + write_CTRL(drv_data, (cr & (~BIT_CTL_ENABLE))); +} + +/* Caculate the SPI_BAUD register value based on input HZ */ +static u16 hz_to_spi_baud(u32 speed_hz) +{ + u_long sclk = get_sclk(); + u16 spi_baud = (sclk / (2 * speed_hz)); + + if ((sclk % (2 * speed_hz)) > 0) + spi_baud++; + + if (spi_baud < MIN_SPI_BAUD_VAL) + spi_baud = MIN_SPI_BAUD_VAL; + + return spi_baud; +} + +static int bfin_spi_flush(struct bfin_spi_master_data *drv_data) +{ + unsigned long limit = loops_per_jiffy << 1; + + /* wait for stop and clear stat */ + while (!(read_STAT(drv_data) & BIT_STAT_SPIF) && --limit) + cpu_relax(); + + write_STAT(drv_data, BIT_STAT_CLR); + + return limit; +} + +/* Chip select operation functions for cs_change flag */ +static void bfin_spi_cs_active(struct bfin_spi_master_data *drv_data, struct bfin_spi_slave_data *chip) +{ + if (likely(chip->chip_select_num < MAX_CTRL_CS)) { + u16 flag = read_FLAG(drv_data); + + flag &= ~chip->flag; + + write_FLAG(drv_data, flag); + } else { + gpio_set_value(chip->cs_gpio, 0); + } +} + +static void bfin_spi_cs_deactive(struct bfin_spi_master_data *drv_data, + struct bfin_spi_slave_data *chip) +{ + if (likely(chip->chip_select_num < MAX_CTRL_CS)) { + u16 flag = read_FLAG(drv_data); + + flag |= chip->flag; + + write_FLAG(drv_data, flag); + } else { + gpio_set_value(chip->cs_gpio, 1); + } + + /* Move delay here for consistency */ + if (chip->cs_chg_udelay) + udelay(chip->cs_chg_udelay); +} + +/* enable or disable the pin muxed by GPIO and SPI CS to work as SPI CS */ +static inline void bfin_spi_cs_enable(struct bfin_spi_master_data *drv_data, + struct bfin_spi_slave_data *chip) +{ + if (chip->chip_select_num < MAX_CTRL_CS) { + u16 flag = read_FLAG(drv_data); + + flag |= (chip->flag >> 8); + + write_FLAG(drv_data, flag); + } +} + +static inline void bfin_spi_cs_disable(struct bfin_spi_master_data *drv_data, + struct bfin_spi_slave_data *chip) +{ + if (chip->chip_select_num < MAX_CTRL_CS) { + u16 flag = read_FLAG(drv_data); + + flag &= ~(chip->flag >> 8); + + write_FLAG(drv_data, flag); + } +} + +/* stop controller and re-config current chip*/ +static void bfin_spi_restore_state(struct bfin_spi_master_data *drv_data) +{ + struct bfin_spi_slave_data *chip = drv_data->cur_chip; + + /* Clear status and disable clock */ + write_STAT(drv_data, BIT_STAT_CLR); + bfin_spi_disable(drv_data); + dev_dbg(&drv_data->pdev->dev, "restoring spi ctl state\n"); + + SSYNC(); + + /* Load the registers */ + write_CTRL(drv_data, chip->ctl_reg); + write_BAUD(drv_data, chip->baud); + + bfin_spi_enable(drv_data); + bfin_spi_cs_active(drv_data, chip); +} + +/* used to kick off transfer in rx mode and read unwanted RX data */ +static inline void bfin_spi_dummy_read(struct bfin_spi_master_data *drv_data) +{ + (void) read_RDBR(drv_data); +} + +static void bfin_spi_u8_writer(struct bfin_spi_master_data *drv_data) +{ + /* clear RXS (we check for RXS inside the loop) */ + bfin_spi_dummy_read(drv_data); + + while (drv_data->tx < drv_data->tx_end) { + write_TDBR(drv_data, (*(u8 *) (drv_data->tx++))); + /* wait until transfer finished. + checking SPIF or TXS may not guarantee transfer completion */ + while (!(read_STAT(drv_data) & BIT_STAT_RXS)) + cpu_relax(); + /* discard RX data and clear RXS */ + bfin_spi_dummy_read(drv_data); + } +} + +static void bfin_spi_u8_reader(struct bfin_spi_master_data *drv_data) +{ + u16 tx_val = drv_data->cur_chip->idle_tx_val; + + /* discard old RX data and clear RXS */ + bfin_spi_dummy_read(drv_data); + + while (drv_data->rx < drv_data->rx_end) { + write_TDBR(drv_data, tx_val); + while (!(read_STAT(drv_data) & BIT_STAT_RXS)) + cpu_relax(); + *(u8 *) (drv_data->rx++) = read_RDBR(drv_data); + } +} + +static void bfin_spi_u8_duplex(struct bfin_spi_master_data *drv_data) +{ + /* discard old RX data and clear RXS */ + bfin_spi_dummy_read(drv_data); + + while (drv_data->rx < drv_data->rx_end) { + write_TDBR(drv_data, (*(u8 *) (drv_data->tx++))); + while (!(read_STAT(drv_data) & BIT_STAT_RXS)) + cpu_relax(); + *(u8 *) (drv_data->rx++) = read_RDBR(drv_data); + } +} + +static const struct bfin_spi_transfer_ops bfin_bfin_spi_transfer_ops_u8 = { + .write = bfin_spi_u8_writer, + .read = bfin_spi_u8_reader, + .duplex = bfin_spi_u8_duplex, +}; + +static void bfin_spi_u16_writer(struct bfin_spi_master_data *drv_data) +{ + /* clear RXS (we check for RXS inside the loop) */ + bfin_spi_dummy_read(drv_data); + + while (drv_data->tx < drv_data->tx_end) { + write_TDBR(drv_data, (*(u16 *) (drv_data->tx))); + drv_data->tx += 2; + /* wait until transfer finished. + checking SPIF or TXS may not guarantee transfer completion */ + while (!(read_STAT(drv_data) & BIT_STAT_RXS)) + cpu_relax(); + /* discard RX data and clear RXS */ + bfin_spi_dummy_read(drv_data); + } +} + +static void bfin_spi_u16_reader(struct bfin_spi_master_data *drv_data) +{ + u16 tx_val = drv_data->cur_chip->idle_tx_val; + + /* discard old RX data and clear RXS */ + bfin_spi_dummy_read(drv_data); + + while (drv_data->rx < drv_data->rx_end) { + write_TDBR(drv_data, tx_val); + while (!(read_STAT(drv_data) & BIT_STAT_RXS)) + cpu_relax(); + *(u16 *) (drv_data->rx) = read_RDBR(drv_data); + drv_data->rx += 2; + } +} + +static void bfin_spi_u16_duplex(struct bfin_spi_master_data *drv_data) +{ + /* discard old RX data and clear RXS */ + bfin_spi_dummy_read(drv_data); + + while (drv_data->rx < drv_data->rx_end) { + write_TDBR(drv_data, (*(u16 *) (drv_data->tx))); + drv_data->tx += 2; + while (!(read_STAT(drv_data) & BIT_STAT_RXS)) + cpu_relax(); + *(u16 *) (drv_data->rx) = read_RDBR(drv_data); + drv_data->rx += 2; + } +} + +static const struct bfin_spi_transfer_ops bfin_bfin_spi_transfer_ops_u16 = { + .write = bfin_spi_u16_writer, + .read = bfin_spi_u16_reader, + .duplex = bfin_spi_u16_duplex, +}; + +/* test if there is more transfer to be done */ +static void *bfin_spi_next_transfer(struct bfin_spi_master_data *drv_data) +{ + struct spi_message *msg = drv_data->cur_msg; + struct spi_transfer *trans = drv_data->cur_transfer; + + /* Move to next transfer */ + if (trans->transfer_list.next != &msg->transfers) { + drv_data->cur_transfer = + list_entry(trans->transfer_list.next, + struct spi_transfer, transfer_list); + return RUNNING_STATE; + } else + return DONE_STATE; +} + +/* + * caller already set message->status; + * dma and pio irqs are blocked give finished message back + */ +static void bfin_spi_giveback(struct bfin_spi_master_data *drv_data) +{ + struct bfin_spi_slave_data *chip = drv_data->cur_chip; + struct spi_transfer *last_transfer; + unsigned long flags; + struct spi_message *msg; + + spin_lock_irqsave(&drv_data->lock, flags); + msg = drv_data->cur_msg; + drv_data->cur_msg = NULL; + drv_data->cur_transfer = NULL; + drv_data->cur_chip = NULL; + queue_work(drv_data->workqueue, &drv_data->pump_messages); + spin_unlock_irqrestore(&drv_data->lock, flags); + + last_transfer = list_entry(msg->transfers.prev, + struct spi_transfer, transfer_list); + + msg->state = NULL; + + if (!drv_data->cs_change) + bfin_spi_cs_deactive(drv_data, chip); + + /* Not stop spi in autobuffer mode */ + if (drv_data->tx_dma != 0xFFFF) + bfin_spi_disable(drv_data); + + if (msg->complete) + msg->complete(msg->context); +} + +/* spi data irq handler */ +static irqreturn_t bfin_spi_pio_irq_handler(int irq, void *dev_id) +{ + struct bfin_spi_master_data *drv_data = dev_id; + struct bfin_spi_slave_data *chip = drv_data->cur_chip; + struct spi_message *msg = drv_data->cur_msg; + int n_bytes = drv_data->n_bytes; + int loop = 0; + + /* wait until transfer finished. */ + while (!(read_STAT(drv_data) & BIT_STAT_RXS)) + cpu_relax(); + + if ((drv_data->tx && drv_data->tx >= drv_data->tx_end) || + (drv_data->rx && drv_data->rx >= (drv_data->rx_end - n_bytes))) { + /* last read */ + if (drv_data->rx) { + dev_dbg(&drv_data->pdev->dev, "last read\n"); + if (n_bytes % 2) { + u16 *buf = (u16 *)drv_data->rx; + for (loop = 0; loop < n_bytes / 2; loop++) + *buf++ = read_RDBR(drv_data); + } else { + u8 *buf = (u8 *)drv_data->rx; + for (loop = 0; loop < n_bytes; loop++) + *buf++ = read_RDBR(drv_data); + } + drv_data->rx += n_bytes; + } + + msg->actual_length += drv_data->len_in_bytes; + if (drv_data->cs_change) + bfin_spi_cs_deactive(drv_data, chip); + /* Move to next transfer */ + msg->state = bfin_spi_next_transfer(drv_data); + + disable_irq_nosync(drv_data->spi_irq); + + /* Schedule transfer tasklet */ + tasklet_schedule(&drv_data->pump_transfers); + return IRQ_HANDLED; + } + + if (drv_data->rx && drv_data->tx) { + /* duplex */ + dev_dbg(&drv_data->pdev->dev, "duplex: write_TDBR\n"); + if (n_bytes % 2) { + u16 *buf = (u16 *)drv_data->rx; + u16 *buf2 = (u16 *)drv_data->tx; + for (loop = 0; loop < n_bytes / 2; loop++) { + *buf++ = read_RDBR(drv_data); + write_TDBR(drv_data, *buf2++); + } + } else { + u8 *buf = (u8 *)drv_data->rx; + u8 *buf2 = (u8 *)drv_data->tx; + for (loop = 0; loop < n_bytes; loop++) { + *buf++ = read_RDBR(drv_data); + write_TDBR(drv_data, *buf2++); + } + } + } else if (drv_data->rx) { + /* read */ + dev_dbg(&drv_data->pdev->dev, "read: write_TDBR\n"); + if (n_bytes % 2) { + u16 *buf = (u16 *)drv_data->rx; + for (loop = 0; loop < n_bytes / 2; loop++) { + *buf++ = read_RDBR(drv_data); + write_TDBR(drv_data, chip->idle_tx_val); + } + } else { + u8 *buf = (u8 *)drv_data->rx; + for (loop = 0; loop < n_bytes; loop++) { + *buf++ = read_RDBR(drv_data); + write_TDBR(drv_data, chip->idle_tx_val); + } + } + } else if (drv_data->tx) { + /* write */ + dev_dbg(&drv_data->pdev->dev, "write: write_TDBR\n"); + if (n_bytes % 2) { + u16 *buf = (u16 *)drv_data->tx; + for (loop = 0; loop < n_bytes / 2; loop++) { + read_RDBR(drv_data); + write_TDBR(drv_data, *buf++); + } + } else { + u8 *buf = (u8 *)drv_data->tx; + for (loop = 0; loop < n_bytes; loop++) { + read_RDBR(drv_data); + write_TDBR(drv_data, *buf++); + } + } + } + + if (drv_data->tx) + drv_data->tx += n_bytes; + if (drv_data->rx) + drv_data->rx += n_bytes; + + return IRQ_HANDLED; +} + +static irqreturn_t bfin_spi_dma_irq_handler(int irq, void *dev_id) +{ + struct bfin_spi_master_data *drv_data = dev_id; + struct bfin_spi_slave_data *chip = drv_data->cur_chip; + struct spi_message *msg = drv_data->cur_msg; + unsigned long timeout; + unsigned short dmastat = get_dma_curr_irqstat(drv_data->dma_channel); + u16 spistat = read_STAT(drv_data); + + dev_dbg(&drv_data->pdev->dev, + "in dma_irq_handler dmastat:0x%x spistat:0x%x\n", + dmastat, spistat); + + if (drv_data->rx != NULL) { + u16 cr = read_CTRL(drv_data); + /* discard old RX data and clear RXS */ + bfin_spi_dummy_read(drv_data); + write_CTRL(drv_data, cr & ~BIT_CTL_ENABLE); /* Disable SPI */ + write_CTRL(drv_data, cr & ~BIT_CTL_TIMOD); /* Restore State */ + write_STAT(drv_data, BIT_STAT_CLR); /* Clear Status */ + } + + clear_dma_irqstat(drv_data->dma_channel); + + /* + * wait for the last transaction shifted out. HRM states: + * at this point there may still be data in the SPI DMA FIFO waiting + * to be transmitted ... software needs to poll TXS in the SPI_STAT + * register until it goes low for 2 successive reads + */ + if (drv_data->tx != NULL) { + while ((read_STAT(drv_data) & BIT_STAT_TXS) || + (read_STAT(drv_data) & BIT_STAT_TXS)) + cpu_relax(); + } + + dev_dbg(&drv_data->pdev->dev, + "in dma_irq_handler dmastat:0x%x spistat:0x%x\n", + dmastat, read_STAT(drv_data)); + + timeout = jiffies + HZ; + while (!(read_STAT(drv_data) & BIT_STAT_SPIF)) + if (!time_before(jiffies, timeout)) { + dev_warn(&drv_data->pdev->dev, "timeout waiting for SPIF"); + break; + } else + cpu_relax(); + + if ((dmastat & DMA_ERR) && (spistat & BIT_STAT_RBSY)) { + msg->state = ERROR_STATE; + dev_err(&drv_data->pdev->dev, "dma receive: fifo/buffer overflow\n"); + } else { + msg->actual_length += drv_data->len_in_bytes; + + if (drv_data->cs_change) + bfin_spi_cs_deactive(drv_data, chip); + + /* Move to next transfer */ + msg->state = bfin_spi_next_transfer(drv_data); + } + + /* Schedule transfer tasklet */ + tasklet_schedule(&drv_data->pump_transfers); + + /* free the irq handler before next transfer */ + dev_dbg(&drv_data->pdev->dev, + "disable dma channel irq%d\n", + drv_data->dma_channel); + dma_disable_irq_nosync(drv_data->dma_channel); + + return IRQ_HANDLED; +} + +static void bfin_spi_pump_transfers(unsigned long data) +{ + struct bfin_spi_master_data *drv_data = (struct bfin_spi_master_data *)data; + struct spi_message *message = NULL; + struct spi_transfer *transfer = NULL; + struct spi_transfer *previous = NULL; + struct bfin_spi_slave_data *chip = NULL; + unsigned int bits_per_word; + u16 cr, cr_width, dma_width, dma_config; + u32 tranf_success = 1; + u8 full_duplex = 0; + + /* Get current state information */ + message = drv_data->cur_msg; + transfer = drv_data->cur_transfer; + chip = drv_data->cur_chip; + + /* + * if msg is error or done, report it back using complete() callback + */ + + /* Handle for abort */ + if (message->state == ERROR_STATE) { + dev_dbg(&drv_data->pdev->dev, "transfer: we've hit an error\n"); + message->status = -EIO; + bfin_spi_giveback(drv_data); + return; + } + + /* Handle end of message */ + if (message->state == DONE_STATE) { + dev_dbg(&drv_data->pdev->dev, "transfer: all done!\n"); + message->status = 0; + bfin_spi_giveback(drv_data); + return; + } + + /* Delay if requested at end of transfer */ + if (message->state == RUNNING_STATE) { + dev_dbg(&drv_data->pdev->dev, "transfer: still running ...\n"); + previous = list_entry(transfer->transfer_list.prev, + struct spi_transfer, transfer_list); + if (previous->delay_usecs) + udelay(previous->delay_usecs); + } + + /* Flush any existing transfers that may be sitting in the hardware */ + if (bfin_spi_flush(drv_data) == 0) { + dev_err(&drv_data->pdev->dev, "pump_transfers: flush failed\n"); + message->status = -EIO; + bfin_spi_giveback(drv_data); + return; + } + + if (transfer->len == 0) { + /* Move to next transfer of this msg */ + message->state = bfin_spi_next_transfer(drv_data); + /* Schedule next transfer tasklet */ + tasklet_schedule(&drv_data->pump_transfers); + return; + } + + if (transfer->tx_buf != NULL) { + drv_data->tx = (void *)transfer->tx_buf; + drv_data->tx_end = drv_data->tx + transfer->len; + dev_dbg(&drv_data->pdev->dev, "tx_buf is %p, tx_end is %p\n", + transfer->tx_buf, drv_data->tx_end); + } else { + drv_data->tx = NULL; + } + + if (transfer->rx_buf != NULL) { + full_duplex = transfer->tx_buf != NULL; + drv_data->rx = transfer->rx_buf; + drv_data->rx_end = drv_data->rx + transfer->len; + dev_dbg(&drv_data->pdev->dev, "rx_buf is %p, rx_end is %p\n", + transfer->rx_buf, drv_data->rx_end); + } else { + drv_data->rx = NULL; + } + + drv_data->rx_dma = transfer->rx_dma; + drv_data->tx_dma = transfer->tx_dma; + drv_data->len_in_bytes = transfer->len; + drv_data->cs_change = transfer->cs_change; + + /* Bits per word setup */ + bits_per_word = transfer->bits_per_word ? : message->spi->bits_per_word; + if ((bits_per_word > 0) && (bits_per_word % 16 == 0)) { + drv_data->n_bytes = bits_per_word/8; + drv_data->len = (transfer->len) >> 1; + cr_width = BIT_CTL_WORDSIZE; + drv_data->ops = &bfin_bfin_spi_transfer_ops_u16; + } else if ((bits_per_word > 0) && (bits_per_word % 8 == 0)) { + drv_data->n_bytes = bits_per_word/8; + drv_data->len = transfer->len; + cr_width = 0; + drv_data->ops = &bfin_bfin_spi_transfer_ops_u8; + } else { + dev_err(&drv_data->pdev->dev, "transfer: unsupported bits_per_word\n"); + message->status = -EINVAL; + bfin_spi_giveback(drv_data); + return; + } + cr = read_CTRL(drv_data) & ~(BIT_CTL_TIMOD | BIT_CTL_WORDSIZE); + cr |= cr_width; + write_CTRL(drv_data, cr); + + dev_dbg(&drv_data->pdev->dev, + "transfer: drv_data->ops is %p, chip->ops is %p, u8_ops is %p\n", + drv_data->ops, chip->ops, &bfin_bfin_spi_transfer_ops_u8); + + message->state = RUNNING_STATE; + dma_config = 0; + + /* Speed setup (surely valid because already checked) */ + if (transfer->speed_hz) + write_BAUD(drv_data, hz_to_spi_baud(transfer->speed_hz)); + else + write_BAUD(drv_data, chip->baud); + + write_STAT(drv_data, BIT_STAT_CLR); + bfin_spi_cs_active(drv_data, chip); + + dev_dbg(&drv_data->pdev->dev, + "now pumping a transfer: width is %d, len is %d\n", + cr_width, transfer->len); + + /* + * Try to map dma buffer and do a dma transfer. If successful use, + * different way to r/w according to the enable_dma settings and if + * we are not doing a full duplex transfer (since the hardware does + * not support full duplex DMA transfers). + */ + if (!full_duplex && drv_data->cur_chip->enable_dma + && drv_data->len > 6) { + + unsigned long dma_start_addr, flags; + + disable_dma(drv_data->dma_channel); + clear_dma_irqstat(drv_data->dma_channel); + + /* config dma channel */ + dev_dbg(&drv_data->pdev->dev, "doing dma transfer\n"); + set_dma_x_count(drv_data->dma_channel, drv_data->len); + if (cr_width == BIT_CTL_WORDSIZE) { + set_dma_x_modify(drv_data->dma_channel, 2); + dma_width = WDSIZE_16; + } else { + set_dma_x_modify(drv_data->dma_channel, 1); + dma_width = WDSIZE_8; + } + + /* poll for SPI completion before start */ + while (!(read_STAT(drv_data) & BIT_STAT_SPIF)) + cpu_relax(); + + /* dirty hack for autobuffer DMA mode */ + if (drv_data->tx_dma == 0xFFFF) { + dev_dbg(&drv_data->pdev->dev, + "doing autobuffer DMA out.\n"); + + /* no irq in autobuffer mode */ + dma_config = + (DMAFLOW_AUTO | RESTART | dma_width | DI_EN); + set_dma_config(drv_data->dma_channel, dma_config); + set_dma_start_addr(drv_data->dma_channel, + (unsigned long)drv_data->tx); + enable_dma(drv_data->dma_channel); + + /* start SPI transfer */ + write_CTRL(drv_data, cr | BIT_CTL_TIMOD_DMA_TX); + + /* just return here, there can only be one transfer + * in this mode + */ + message->status = 0; + bfin_spi_giveback(drv_data); + return; + } + + /* In dma mode, rx or tx must be NULL in one transfer */ + dma_config = (RESTART | dma_width | DI_EN); + if (drv_data->rx != NULL) { + /* set transfer mode, and enable SPI */ + dev_dbg(&drv_data->pdev->dev, "doing DMA in to %p (size %zx)\n", + drv_data->rx, drv_data->len_in_bytes); + + /* invalidate caches, if needed */ + if (bfin_addr_dcacheable((unsigned long) drv_data->rx)) + invalidate_dcache_range((unsigned long) drv_data->rx, + (unsigned long) (drv_data->rx + + drv_data->len_in_bytes)); + + dma_config |= WNR; + dma_start_addr = (unsigned long)drv_data->rx; + cr |= BIT_CTL_TIMOD_DMA_RX | BIT_CTL_SENDOPT; + + } else if (drv_data->tx != NULL) { + dev_dbg(&drv_data->pdev->dev, "doing DMA out.\n"); + + /* flush caches, if needed */ + if (bfin_addr_dcacheable((unsigned long) drv_data->tx)) + flush_dcache_range((unsigned long) drv_data->tx, + (unsigned long) (drv_data->tx + + drv_data->len_in_bytes)); + + dma_start_addr = (unsigned long)drv_data->tx; + cr |= BIT_CTL_TIMOD_DMA_TX; + + } else + BUG(); + + /* oh man, here there be monsters ... and i dont mean the + * fluffy cute ones from pixar, i mean the kind that'll eat + * your data, kick your dog, and love it all. do *not* try + * and change these lines unless you (1) heavily test DMA + * with SPI flashes on a loaded system (e.g. ping floods), + * (2) know just how broken the DMA engine interaction with + * the SPI peripheral is, and (3) have someone else to blame + * when you screw it all up anyways. + */ + set_dma_start_addr(drv_data->dma_channel, dma_start_addr); + set_dma_config(drv_data->dma_channel, dma_config); + local_irq_save(flags); + SSYNC(); + write_CTRL(drv_data, cr); + enable_dma(drv_data->dma_channel); + dma_enable_irq(drv_data->dma_channel); + local_irq_restore(flags); + + return; + } + + /* + * We always use SPI_WRITE mode (transfer starts with TDBR write). + * SPI_READ mode (transfer starts with RDBR read) seems to have + * problems with setting up the output value in TDBR prior to the + * start of the transfer. + */ + write_CTRL(drv_data, cr | BIT_CTL_TXMOD); + + if (chip->pio_interrupt) { + /* SPI irq should have been disabled by now */ + + /* discard old RX data and clear RXS */ + bfin_spi_dummy_read(drv_data); + + /* start transfer */ + if (drv_data->tx == NULL) + write_TDBR(drv_data, chip->idle_tx_val); + else { + int loop; + if (bits_per_word % 16 == 0) { + u16 *buf = (u16 *)drv_data->tx; + for (loop = 0; loop < bits_per_word / 16; + loop++) { + write_TDBR(drv_data, *buf++); + } + } else if (bits_per_word % 8 == 0) { + u8 *buf = (u8 *)drv_data->tx; + for (loop = 0; loop < bits_per_word / 8; loop++) + write_TDBR(drv_data, *buf++); + } + + drv_data->tx += drv_data->n_bytes; + } + + /* once TDBR is empty, interrupt is triggered */ + enable_irq(drv_data->spi_irq); + return; + } + + /* IO mode */ + dev_dbg(&drv_data->pdev->dev, "doing IO transfer\n"); + + if (full_duplex) { + /* full duplex mode */ + BUG_ON((drv_data->tx_end - drv_data->tx) != + (drv_data->rx_end - drv_data->rx)); + dev_dbg(&drv_data->pdev->dev, + "IO duplex: cr is 0x%x\n", cr); + + drv_data->ops->duplex(drv_data); + + if (drv_data->tx != drv_data->tx_end) + tranf_success = 0; + } else if (drv_data->tx != NULL) { + /* write only half duplex */ + dev_dbg(&drv_data->pdev->dev, + "IO write: cr is 0x%x\n", cr); + + drv_data->ops->write(drv_data); + + if (drv_data->tx != drv_data->tx_end) + tranf_success = 0; + } else if (drv_data->rx != NULL) { + /* read only half duplex */ + dev_dbg(&drv_data->pdev->dev, + "IO read: cr is 0x%x\n", cr); + + drv_data->ops->read(drv_data); + if (drv_data->rx != drv_data->rx_end) + tranf_success = 0; + } + + if (!tranf_success) { + dev_dbg(&drv_data->pdev->dev, + "IO write error!\n"); + message->state = ERROR_STATE; + } else { + /* Update total byte transferred */ + message->actual_length += drv_data->len_in_bytes; + /* Move to next transfer of this msg */ + message->state = bfin_spi_next_transfer(drv_data); + if (drv_data->cs_change) + bfin_spi_cs_deactive(drv_data, chip); + } + + /* Schedule next transfer tasklet */ + tasklet_schedule(&drv_data->pump_transfers); +} + +/* pop a msg from queue and kick off real transfer */ +static void bfin_spi_pump_messages(struct work_struct *work) +{ + struct bfin_spi_master_data *drv_data; + unsigned long flags; + + drv_data = container_of(work, struct bfin_spi_master_data, pump_messages); + + /* Lock queue and check for queue work */ + spin_lock_irqsave(&drv_data->lock, flags); + if (list_empty(&drv_data->queue) || !drv_data->running) { + /* pumper kicked off but no work to do */ + drv_data->busy = 0; + spin_unlock_irqrestore(&drv_data->lock, flags); + return; + } + + /* Make sure we are not already running a message */ + if (drv_data->cur_msg) { + spin_unlock_irqrestore(&drv_data->lock, flags); + return; + } + + /* Extract head of queue */ + drv_data->cur_msg = list_entry(drv_data->queue.next, + struct spi_message, queue); + + /* Setup the SSP using the per chip configuration */ + drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi); + bfin_spi_restore_state(drv_data); + + list_del_init(&drv_data->cur_msg->queue); + + /* Initial message state */ + drv_data->cur_msg->state = START_STATE; + drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next, + struct spi_transfer, transfer_list); + + dev_dbg(&drv_data->pdev->dev, "got a message to pump, " + "state is set to: baud %d, flag 0x%x, ctl 0x%x\n", + drv_data->cur_chip->baud, drv_data->cur_chip->flag, + drv_data->cur_chip->ctl_reg); + + dev_dbg(&drv_data->pdev->dev, + "the first transfer len is %d\n", + drv_data->cur_transfer->len); + + /* Mark as busy and launch transfers */ + tasklet_schedule(&drv_data->pump_transfers); + + drv_data->busy = 1; + spin_unlock_irqrestore(&drv_data->lock, flags); +} + +/* + * got a msg to transfer, queue it in drv_data->queue. + * And kick off message pumper + */ +static int bfin_spi_transfer(struct spi_device *spi, struct spi_message *msg) +{ + struct bfin_spi_master_data *drv_data = spi_master_get_devdata(spi->master); + unsigned long flags; + + spin_lock_irqsave(&drv_data->lock, flags); + + if (!drv_data->running) { + spin_unlock_irqrestore(&drv_data->lock, flags); + return -ESHUTDOWN; + } + + msg->actual_length = 0; + msg->status = -EINPROGRESS; + msg->state = START_STATE; + + dev_dbg(&spi->dev, "adding an msg in transfer() \n"); + list_add_tail(&msg->queue, &drv_data->queue); + + if (drv_data->running && !drv_data->busy) + queue_work(drv_data->workqueue, &drv_data->pump_messages); + + spin_unlock_irqrestore(&drv_data->lock, flags); + + return 0; +} + +#define MAX_SPI_SSEL 7 + +static u16 ssel[][MAX_SPI_SSEL] = { + {P_SPI0_SSEL1, P_SPI0_SSEL2, P_SPI0_SSEL3, + P_SPI0_SSEL4, P_SPI0_SSEL5, + P_SPI0_SSEL6, P_SPI0_SSEL7}, + + {P_SPI1_SSEL1, P_SPI1_SSEL2, P_SPI1_SSEL3, + P_SPI1_SSEL4, P_SPI1_SSEL5, + P_SPI1_SSEL6, P_SPI1_SSEL7}, + + {P_SPI2_SSEL1, P_SPI2_SSEL2, P_SPI2_SSEL3, + P_SPI2_SSEL4, P_SPI2_SSEL5, + P_SPI2_SSEL6, P_SPI2_SSEL7}, +}; + +/* setup for devices (may be called multiple times -- not just first setup) */ +static int bfin_spi_setup(struct spi_device *spi) +{ + struct bfin5xx_spi_chip *chip_info; + struct bfin_spi_slave_data *chip = NULL; + struct bfin_spi_master_data *drv_data = spi_master_get_devdata(spi->master); + u16 bfin_ctl_reg; + int ret = -EINVAL; + + /* Only alloc (or use chip_info) on first setup */ + chip_info = NULL; + chip = spi_get_ctldata(spi); + if (chip == NULL) { + chip = kzalloc(sizeof(*chip), GFP_KERNEL); + if (!chip) { + dev_err(&spi->dev, "cannot allocate chip data\n"); + ret = -ENOMEM; + goto error; + } + + chip->enable_dma = 0; + chip_info = spi->controller_data; + } + + /* Let people set non-standard bits directly */ + bfin_ctl_reg = BIT_CTL_OPENDRAIN | BIT_CTL_EMISO | + BIT_CTL_PSSE | BIT_CTL_GM | BIT_CTL_SZ; + + /* chip_info isn't always needed */ + if (chip_info) { + /* Make sure people stop trying to set fields via ctl_reg + * when they should actually be using common SPI framework. + * Currently we let through: WOM EMISO PSSE GM SZ. + * Not sure if a user actually needs/uses any of these, + * but let's assume (for now) they do. + */ + if (chip_info->ctl_reg & ~bfin_ctl_reg) { + dev_err(&spi->dev, "do not set bits in ctl_reg " + "that the SPI framework manages\n"); + goto error; + } + chip->enable_dma = chip_info->enable_dma != 0 + && drv_data->master_info->enable_dma; + chip->ctl_reg = chip_info->ctl_reg; + chip->cs_chg_udelay = chip_info->cs_chg_udelay; + chip->idle_tx_val = chip_info->idle_tx_val; + chip->pio_interrupt = chip_info->pio_interrupt; + spi->bits_per_word = chip_info->bits_per_word; + } else { + /* force a default base state */ + chip->ctl_reg &= bfin_ctl_reg; + } + + if (spi->bits_per_word % 8) { + dev_err(&spi->dev, "%d bits_per_word is not supported\n", + spi->bits_per_word); + goto error; + } + + /* translate common spi framework into our register */ + if (spi->mode & ~(SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST)) { + dev_err(&spi->dev, "unsupported spi modes detected\n"); + goto error; + } + if (spi->mode & SPI_CPOL) + chip->ctl_reg |= BIT_CTL_CPOL; + if (spi->mode & SPI_CPHA) + chip->ctl_reg |= BIT_CTL_CPHA; + if (spi->mode & SPI_LSB_FIRST) + chip->ctl_reg |= BIT_CTL_LSBF; + /* we dont support running in slave mode (yet?) */ + chip->ctl_reg |= BIT_CTL_MASTER; + + /* + * Notice: for blackfin, the speed_hz is the value of register + * SPI_BAUD, not the real baudrate + */ + chip->baud = hz_to_spi_baud(spi->max_speed_hz); + chip->chip_select_num = spi->chip_select; + if (chip->chip_select_num < MAX_CTRL_CS) { + if (!(spi->mode & SPI_CPHA)) + dev_warn(&spi->dev, "Warning: SPI CPHA not set:" + " Slave Select not under software control!\n" + " See Documentation/blackfin/bfin-spi-notes.txt"); + + chip->flag = (1 << spi->chip_select) << 8; + } else + chip->cs_gpio = chip->chip_select_num - MAX_CTRL_CS; + + if (chip->enable_dma && chip->pio_interrupt) { + dev_err(&spi->dev, "enable_dma is set, " + "do not set pio_interrupt\n"); + goto error; + } + /* + * if any one SPI chip is registered and wants DMA, request the + * DMA channel for it + */ + if (chip->enable_dma && !drv_data->dma_requested) { + /* register dma irq handler */ + ret = request_dma(drv_data->dma_channel, "BFIN_SPI_DMA"); + if (ret) { + dev_err(&spi->dev, + "Unable to request BlackFin SPI DMA channel\n"); + goto error; + } + drv_data->dma_requested = 1; + + ret = set_dma_callback(drv_data->dma_channel, + bfin_spi_dma_irq_handler, drv_data); + if (ret) { + dev_err(&spi->dev, "Unable to set dma callback\n"); + goto error; + } + dma_disable_irq(drv_data->dma_channel); + } + + if (chip->pio_interrupt && !drv_data->irq_requested) { + ret = request_irq(drv_data->spi_irq, bfin_spi_pio_irq_handler, + IRQF_DISABLED, "BFIN_SPI", drv_data); + if (ret) { + dev_err(&spi->dev, "Unable to register spi IRQ\n"); + goto error; + } + drv_data->irq_requested = 1; + /* we use write mode, spi irq has to be disabled here */ + disable_irq(drv_data->spi_irq); + } + + if (chip->chip_select_num >= MAX_CTRL_CS) { + /* Only request on first setup */ + if (spi_get_ctldata(spi) == NULL) { + ret = gpio_request(chip->cs_gpio, spi->modalias); + if (ret) { + dev_err(&spi->dev, "gpio_request() error\n"); + goto pin_error; + } + gpio_direction_output(chip->cs_gpio, 1); + } + } + + dev_dbg(&spi->dev, "setup spi chip %s, width is %d, dma is %d\n", + spi->modalias, spi->bits_per_word, chip->enable_dma); + dev_dbg(&spi->dev, "ctl_reg is 0x%x, flag_reg is 0x%x\n", + chip->ctl_reg, chip->flag); + + spi_set_ctldata(spi, chip); + + dev_dbg(&spi->dev, "chip select number is %d\n", chip->chip_select_num); + if (chip->chip_select_num < MAX_CTRL_CS) { + ret = peripheral_request(ssel[spi->master->bus_num] + [chip->chip_select_num-1], spi->modalias); + if (ret) { + dev_err(&spi->dev, "peripheral_request() error\n"); + goto pin_error; + } + } + + bfin_spi_cs_enable(drv_data, chip); + bfin_spi_cs_deactive(drv_data, chip); + + return 0; + + pin_error: + if (chip->chip_select_num >= MAX_CTRL_CS) + gpio_free(chip->cs_gpio); + else + peripheral_free(ssel[spi->master->bus_num] + [chip->chip_select_num - 1]); + error: + if (chip) { + if (drv_data->dma_requested) + free_dma(drv_data->dma_channel); + drv_data->dma_requested = 0; + + kfree(chip); + /* prevent free 'chip' twice */ + spi_set_ctldata(spi, NULL); + } + + return ret; +} + +/* + * callback for spi framework. + * clean driver specific data + */ +static void bfin_spi_cleanup(struct spi_device *spi) +{ + struct bfin_spi_slave_data *chip = spi_get_ctldata(spi); + struct bfin_spi_master_data *drv_data = spi_master_get_devdata(spi->master); + + if (!chip) + return; + + if (chip->chip_select_num < MAX_CTRL_CS) { + peripheral_free(ssel[spi->master->bus_num] + [chip->chip_select_num-1]); + bfin_spi_cs_disable(drv_data, chip); + } else + gpio_free(chip->cs_gpio); + + kfree(chip); + /* prevent free 'chip' twice */ + spi_set_ctldata(spi, NULL); +} + +static inline int bfin_spi_init_queue(struct bfin_spi_master_data *drv_data) +{ + INIT_LIST_HEAD(&drv_data->queue); + spin_lock_init(&drv_data->lock); + + drv_data->running = false; + drv_data->busy = 0; + + /* init transfer tasklet */ + tasklet_init(&drv_data->pump_transfers, + bfin_spi_pump_transfers, (unsigned long)drv_data); + + /* init messages workqueue */ + INIT_WORK(&drv_data->pump_messages, bfin_spi_pump_messages); + drv_data->workqueue = create_singlethread_workqueue( + dev_name(drv_data->master->dev.parent)); + if (drv_data->workqueue == NULL) + return -EBUSY; + + return 0; +} + +static inline int bfin_spi_start_queue(struct bfin_spi_master_data *drv_data) +{ + unsigned long flags; + + spin_lock_irqsave(&drv_data->lock, flags); + + if (drv_data->running || drv_data->busy) { + spin_unlock_irqrestore(&drv_data->lock, flags); + return -EBUSY; + } + + drv_data->running = true; + drv_data->cur_msg = NULL; + drv_data->cur_transfer = NULL; + drv_data->cur_chip = NULL; + spin_unlock_irqrestore(&drv_data->lock, flags); + + queue_work(drv_data->workqueue, &drv_data->pump_messages); + + return 0; +} + +static inline int bfin_spi_stop_queue(struct bfin_spi_master_data *drv_data) +{ + unsigned long flags; + unsigned limit = 500; + int status = 0; + + spin_lock_irqsave(&drv_data->lock, flags); + + /* + * This is a bit lame, but is optimized for the common execution path. + * A wait_queue on the drv_data->busy could be used, but then the common + * execution path (pump_messages) would be required to call wake_up or + * friends on every SPI message. Do this instead + */ + drv_data->running = false; + while ((!list_empty(&drv_data->queue) || drv_data->busy) && limit--) { + spin_unlock_irqrestore(&drv_data->lock, flags); + msleep(10); + spin_lock_irqsave(&drv_data->lock, flags); + } + + if (!list_empty(&drv_data->queue) || drv_data->busy) + status = -EBUSY; + + spin_unlock_irqrestore(&drv_data->lock, flags); + + return status; +} + +static inline int bfin_spi_destroy_queue(struct bfin_spi_master_data *drv_data) +{ + int status; + + status = bfin_spi_stop_queue(drv_data); + if (status != 0) + return status; + + destroy_workqueue(drv_data->workqueue); + + return 0; +} + +static int __init bfin_spi_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct bfin5xx_spi_master *platform_info; + struct spi_master *master; + struct bfin_spi_master_data *drv_data; + struct resource *res; + int status = 0; + + platform_info = dev->platform_data; + + /* Allocate master with space for drv_data */ + master = spi_alloc_master(dev, sizeof(*drv_data)); + if (!master) { + dev_err(&pdev->dev, "can not alloc spi_master\n"); + return -ENOMEM; + } + + drv_data = spi_master_get_devdata(master); + drv_data->master = master; + drv_data->master_info = platform_info; + drv_data->pdev = pdev; + drv_data->pin_req = platform_info->pin_req; + + /* the spi->mode bits supported by this driver: */ + master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST; + + master->bus_num = pdev->id; + master->num_chipselect = platform_info->num_chipselect; + master->cleanup = bfin_spi_cleanup; + master->setup = bfin_spi_setup; + master->transfer = bfin_spi_transfer; + + /* Find and map our resources */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (res == NULL) { + dev_err(dev, "Cannot get IORESOURCE_MEM\n"); + status = -ENOENT; + goto out_error_get_res; + } + + drv_data->regs_base = ioremap(res->start, resource_size(res)); + if (drv_data->regs_base == NULL) { + dev_err(dev, "Cannot map IO\n"); + status = -ENXIO; + goto out_error_ioremap; + } + + res = platform_get_resource(pdev, IORESOURCE_DMA, 0); + if (res == NULL) { + dev_err(dev, "No DMA channel specified\n"); + status = -ENOENT; + goto out_error_free_io; + } + drv_data->dma_channel = res->start; + + drv_data->spi_irq = platform_get_irq(pdev, 0); + if (drv_data->spi_irq < 0) { + dev_err(dev, "No spi pio irq specified\n"); + status = -ENOENT; + goto out_error_free_io; + } + + /* Initial and start queue */ + status = bfin_spi_init_queue(drv_data); + if (status != 0) { + dev_err(dev, "problem initializing queue\n"); + goto out_error_queue_alloc; + } + + status = bfin_spi_start_queue(drv_data); + if (status != 0) { + dev_err(dev, "problem starting queue\n"); + goto out_error_queue_alloc; + } + + status = peripheral_request_list(drv_data->pin_req, DRV_NAME); + if (status != 0) { + dev_err(&pdev->dev, ": Requesting Peripherals failed\n"); + goto out_error_queue_alloc; + } + + /* Reset SPI registers. If these registers were used by the boot loader, + * the sky may fall on your head if you enable the dma controller. + */ + write_CTRL(drv_data, BIT_CTL_CPHA | BIT_CTL_MASTER); + write_FLAG(drv_data, 0xFF00); + + /* Register with the SPI framework */ + platform_set_drvdata(pdev, drv_data); + status = spi_register_master(master); + if (status != 0) { + dev_err(dev, "problem registering spi master\n"); + goto out_error_queue_alloc; + } + + dev_info(dev, "%s, Version %s, regs_base@%p, dma channel@%d\n", + DRV_DESC, DRV_VERSION, drv_data->regs_base, + drv_data->dma_channel); + return status; + +out_error_queue_alloc: + bfin_spi_destroy_queue(drv_data); +out_error_free_io: + iounmap((void *) drv_data->regs_base); +out_error_ioremap: +out_error_get_res: + spi_master_put(master); + + return status; +} + +/* stop hardware and remove the driver */ +static int __devexit bfin_spi_remove(struct platform_device *pdev) +{ + struct bfin_spi_master_data *drv_data = platform_get_drvdata(pdev); + int status = 0; + + if (!drv_data) + return 0; + + /* Remove the queue */ + status = bfin_spi_destroy_queue(drv_data); + if (status != 0) + return status; + + /* Disable the SSP at the peripheral and SOC level */ + bfin_spi_disable(drv_data); + + /* Release DMA */ + if (drv_data->master_info->enable_dma) { + if (dma_channel_active(drv_data->dma_channel)) + free_dma(drv_data->dma_channel); + } + + if (drv_data->irq_requested) { + free_irq(drv_data->spi_irq, drv_data); + drv_data->irq_requested = 0; + } + + /* Disconnect from the SPI framework */ + spi_unregister_master(drv_data->master); + + peripheral_free_list(drv_data->pin_req); + + /* Prevent double remove */ + platform_set_drvdata(pdev, NULL); + + return 0; +} + +#ifdef CONFIG_PM +static int bfin_spi_suspend(struct platform_device *pdev, pm_message_t state) +{ + struct bfin_spi_master_data *drv_data = platform_get_drvdata(pdev); + int status = 0; + + status = bfin_spi_stop_queue(drv_data); + if (status != 0) + return status; + + drv_data->ctrl_reg = read_CTRL(drv_data); + drv_data->flag_reg = read_FLAG(drv_data); + + /* + * reset SPI_CTL and SPI_FLG registers + */ + write_CTRL(drv_data, BIT_CTL_CPHA | BIT_CTL_MASTER); + write_FLAG(drv_data, 0xFF00); + + return 0; +} + +static int bfin_spi_resume(struct platform_device *pdev) +{ + struct bfin_spi_master_data *drv_data = platform_get_drvdata(pdev); + int status = 0; + + write_CTRL(drv_data, drv_data->ctrl_reg); + write_FLAG(drv_data, drv_data->flag_reg); + + /* Start the queue running */ + status = bfin_spi_start_queue(drv_data); + if (status != 0) { + dev_err(&pdev->dev, "problem starting queue (%d)\n", status); + return status; + } + + return 0; +} +#else +#define bfin_spi_suspend NULL +#define bfin_spi_resume NULL +#endif /* CONFIG_PM */ + +MODULE_ALIAS("platform:bfin-spi"); +static struct platform_driver bfin_spi_driver = { + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + }, + .suspend = bfin_spi_suspend, + .resume = bfin_spi_resume, + .remove = __devexit_p(bfin_spi_remove), +}; + +static int __init bfin_spi_init(void) +{ + return platform_driver_probe(&bfin_spi_driver, bfin_spi_probe); +} +subsys_initcall(bfin_spi_init); + +static void __exit bfin_spi_exit(void) +{ + platform_driver_unregister(&bfin_spi_driver); +} +module_exit(bfin_spi_exit); diff --git a/drivers/spi/spi-bitbang-txrx.h b/drivers/spi/spi-bitbang-txrx.h new file mode 100644 index 0000000..c16bf85 --- /dev/null +++ b/drivers/spi/spi-bitbang-txrx.h @@ -0,0 +1,97 @@ +/* + * Mix this utility code with some glue code to get one of several types of + * simple SPI master driver. Two do polled word-at-a-time I/O: + * + * - GPIO/parport bitbangers. Provide chipselect() and txrx_word[](), + * expanding the per-word routines from the inline templates below. + * + * - Drivers for controllers resembling bare shift registers. Provide + * chipselect() and txrx_word[](), with custom setup()/cleanup() methods + * that use your controller's clock and chipselect registers. + * + * Some hardware works well with requests at spi_transfer scope: + * + * - Drivers leveraging smarter hardware, with fifos or DMA; or for half + * duplex (MicroWire) controllers. Provide chipselect() and txrx_bufs(), + * and custom setup()/cleanup() methods. + */ + +/* + * The code that knows what GPIO pins do what should have declared four + * functions, ideally as inlines, before including this header: + * + * void setsck(struct spi_device *, int is_on); + * void setmosi(struct spi_device *, int is_on); + * int getmiso(struct spi_device *); + * void spidelay(unsigned); + * + * setsck()'s is_on parameter is a zero/nonzero boolean. + * + * setmosi()'s is_on parameter is a zero/nonzero boolean. + * + * getmiso() is required to return 0 or 1 only. Any other value is invalid + * and will result in improper operation. + * + * A non-inlined routine would call bitbang_txrx_*() routines. The + * main loop could easily compile down to a handful of instructions, + * especially if the delay is a NOP (to run at peak speed). + * + * Since this is software, the timings may not be exactly what your board's + * chips need ... there may be several reasons you'd need to tweak timings + * in these routines, not just make to make it faster or slower to match a + * particular CPU clock rate. + */ + +static inline u32 +bitbang_txrx_be_cpha0(struct spi_device *spi, + unsigned nsecs, unsigned cpol, unsigned flags, + u32 word, u8 bits) +{ + /* if (cpol == 0) this is SPI_MODE_0; else this is SPI_MODE_2 */ + + /* clock starts at inactive polarity */ + for (word <<= (32 - bits); likely(bits); bits--) { + + /* setup MSB (to slave) on trailing edge */ + if ((flags & SPI_MASTER_NO_TX) == 0) + setmosi(spi, word & (1 << 31)); + spidelay(nsecs); /* T(setup) */ + + setsck(spi, !cpol); + spidelay(nsecs); + + /* sample MSB (from slave) on leading edge */ + word <<= 1; + if ((flags & SPI_MASTER_NO_RX) == 0) + word |= getmiso(spi); + setsck(spi, cpol); + } + return word; +} + +static inline u32 +bitbang_txrx_be_cpha1(struct spi_device *spi, + unsigned nsecs, unsigned cpol, unsigned flags, + u32 word, u8 bits) +{ + /* if (cpol == 0) this is SPI_MODE_1; else this is SPI_MODE_3 */ + + /* clock starts at inactive polarity */ + for (word <<= (32 - bits); likely(bits); bits--) { + + /* setup MSB (to slave) on leading edge */ + setsck(spi, !cpol); + if ((flags & SPI_MASTER_NO_TX) == 0) + setmosi(spi, word & (1 << 31)); + spidelay(nsecs); /* T(setup) */ + + setsck(spi, cpol); + spidelay(nsecs); + + /* sample MSB (from slave) on trailing edge */ + word <<= 1; + if ((flags & SPI_MASTER_NO_RX) == 0) + word |= getmiso(spi); + } + return word; +} diff --git a/drivers/spi/spi-bitbang.c b/drivers/spi/spi-bitbang.c new file mode 100644 index 0000000..02d57fb --- /dev/null +++ b/drivers/spi/spi-bitbang.c @@ -0,0 +1,505 @@ +/* + * polling/bitbanging SPI master controller driver utilities + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + + +/*----------------------------------------------------------------------*/ + +/* + * FIRST PART (OPTIONAL): word-at-a-time spi_transfer support. + * Use this for GPIO or shift-register level hardware APIs. + * + * spi_bitbang_cs is in spi_device->controller_state, which is unavailable + * to glue code. These bitbang setup() and cleanup() routines are always + * used, though maybe they're called from controller-aware code. + * + * chipselect() and friends may use use spi_device->controller_data and + * controller registers as appropriate. + * + * + * NOTE: SPI controller pins can often be used as GPIO pins instead, + * which means you could use a bitbang driver either to get hardware + * working quickly, or testing for differences that aren't speed related. + */ + +struct spi_bitbang_cs { + unsigned nsecs; /* (clock cycle time)/2 */ + u32 (*txrx_word)(struct spi_device *spi, unsigned nsecs, + u32 word, u8 bits); + unsigned (*txrx_bufs)(struct spi_device *, + u32 (*txrx_word)( + struct spi_device *spi, + unsigned nsecs, + u32 word, u8 bits), + unsigned, struct spi_transfer *); +}; + +static unsigned bitbang_txrx_8( + struct spi_device *spi, + u32 (*txrx_word)(struct spi_device *spi, + unsigned nsecs, + u32 word, u8 bits), + unsigned ns, + struct spi_transfer *t +) { + unsigned bits = t->bits_per_word ? : spi->bits_per_word; + unsigned count = t->len; + const u8 *tx = t->tx_buf; + u8 *rx = t->rx_buf; + + while (likely(count > 0)) { + u8 word = 0; + + if (tx) + word = *tx++; + word = txrx_word(spi, ns, word, bits); + if (rx) + *rx++ = word; + count -= 1; + } + return t->len - count; +} + +static unsigned bitbang_txrx_16( + struct spi_device *spi, + u32 (*txrx_word)(struct spi_device *spi, + unsigned nsecs, + u32 word, u8 bits), + unsigned ns, + struct spi_transfer *t +) { + unsigned bits = t->bits_per_word ? : spi->bits_per_word; + unsigned count = t->len; + const u16 *tx = t->tx_buf; + u16 *rx = t->rx_buf; + + while (likely(count > 1)) { + u16 word = 0; + + if (tx) + word = *tx++; + word = txrx_word(spi, ns, word, bits); + if (rx) + *rx++ = word; + count -= 2; + } + return t->len - count; +} + +static unsigned bitbang_txrx_32( + struct spi_device *spi, + u32 (*txrx_word)(struct spi_device *spi, + unsigned nsecs, + u32 word, u8 bits), + unsigned ns, + struct spi_transfer *t +) { + unsigned bits = t->bits_per_word ? : spi->bits_per_word; + unsigned count = t->len; + const u32 *tx = t->tx_buf; + u32 *rx = t->rx_buf; + + while (likely(count > 3)) { + u32 word = 0; + + if (tx) + word = *tx++; + word = txrx_word(spi, ns, word, bits); + if (rx) + *rx++ = word; + count -= 4; + } + return t->len - count; +} + +int spi_bitbang_setup_transfer(struct spi_device *spi, struct spi_transfer *t) +{ + struct spi_bitbang_cs *cs = spi->controller_state; + u8 bits_per_word; + u32 hz; + + if (t) { + bits_per_word = t->bits_per_word; + hz = t->speed_hz; + } else { + bits_per_word = 0; + hz = 0; + } + + /* spi_transfer level calls that work per-word */ + if (!bits_per_word) + bits_per_word = spi->bits_per_word; + if (bits_per_word <= 8) + cs->txrx_bufs = bitbang_txrx_8; + else if (bits_per_word <= 16) + cs->txrx_bufs = bitbang_txrx_16; + else if (bits_per_word <= 32) + cs->txrx_bufs = bitbang_txrx_32; + else + return -EINVAL; + + /* nsecs = (clock period)/2 */ + if (!hz) + hz = spi->max_speed_hz; + if (hz) { + cs->nsecs = (1000000000/2) / hz; + if (cs->nsecs > (MAX_UDELAY_MS * 1000 * 1000)) + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL_GPL(spi_bitbang_setup_transfer); + +/** + * spi_bitbang_setup - default setup for per-word I/O loops + */ +int spi_bitbang_setup(struct spi_device *spi) +{ + struct spi_bitbang_cs *cs = spi->controller_state; + struct spi_bitbang *bitbang; + int retval; + unsigned long flags; + + bitbang = spi_master_get_devdata(spi->master); + + if (!cs) { + cs = kzalloc(sizeof *cs, GFP_KERNEL); + if (!cs) + return -ENOMEM; + spi->controller_state = cs; + } + + /* per-word shift register access, in hardware or bitbanging */ + cs->txrx_word = bitbang->txrx_word[spi->mode & (SPI_CPOL|SPI_CPHA)]; + if (!cs->txrx_word) + return -EINVAL; + + retval = bitbang->setup_transfer(spi, NULL); + if (retval < 0) + return retval; + + dev_dbg(&spi->dev, "%s, %u nsec/bit\n", __func__, 2 * cs->nsecs); + + /* NOTE we _need_ to call chipselect() early, ideally with adapter + * setup, unless the hardware defaults cooperate to avoid confusion + * between normal (active low) and inverted chipselects. + */ + + /* deselect chip (low or high) */ + spin_lock_irqsave(&bitbang->lock, flags); + if (!bitbang->busy) { + bitbang->chipselect(spi, BITBANG_CS_INACTIVE); + ndelay(cs->nsecs); + } + spin_unlock_irqrestore(&bitbang->lock, flags); + + return 0; +} +EXPORT_SYMBOL_GPL(spi_bitbang_setup); + +/** + * spi_bitbang_cleanup - default cleanup for per-word I/O loops + */ +void spi_bitbang_cleanup(struct spi_device *spi) +{ + kfree(spi->controller_state); +} +EXPORT_SYMBOL_GPL(spi_bitbang_cleanup); + +static int spi_bitbang_bufs(struct spi_device *spi, struct spi_transfer *t) +{ + struct spi_bitbang_cs *cs = spi->controller_state; + unsigned nsecs = cs->nsecs; + + return cs->txrx_bufs(spi, cs->txrx_word, nsecs, t); +} + +/*----------------------------------------------------------------------*/ + +/* + * SECOND PART ... simple transfer queue runner. + * + * This costs a task context per controller, running the queue by + * performing each transfer in sequence. Smarter hardware can queue + * several DMA transfers at once, and process several controller queues + * in parallel; this driver doesn't match such hardware very well. + * + * Drivers can provide word-at-a-time i/o primitives, or provide + * transfer-at-a-time ones to leverage dma or fifo hardware. + */ +static void bitbang_work(struct work_struct *work) +{ + struct spi_bitbang *bitbang = + container_of(work, struct spi_bitbang, work); + unsigned long flags; + + spin_lock_irqsave(&bitbang->lock, flags); + bitbang->busy = 1; + while (!list_empty(&bitbang->queue)) { + struct spi_message *m; + struct spi_device *spi; + unsigned nsecs; + struct spi_transfer *t = NULL; + unsigned tmp; + unsigned cs_change; + int status; + int do_setup = -1; + + m = container_of(bitbang->queue.next, struct spi_message, + queue); + list_del_init(&m->queue); + spin_unlock_irqrestore(&bitbang->lock, flags); + + /* FIXME this is made-up ... the correct value is known to + * word-at-a-time bitbang code, and presumably chipselect() + * should enforce these requirements too? + */ + nsecs = 100; + + spi = m->spi; + tmp = 0; + cs_change = 1; + status = 0; + + list_for_each_entry (t, &m->transfers, transfer_list) { + + /* override speed or wordsize? */ + if (t->speed_hz || t->bits_per_word) + do_setup = 1; + + /* init (-1) or override (1) transfer params */ + if (do_setup != 0) { + status = bitbang->setup_transfer(spi, t); + if (status < 0) + break; + if (do_setup == -1) + do_setup = 0; + } + + /* set up default clock polarity, and activate chip; + * this implicitly updates clock and spi modes as + * previously recorded for this device via setup(). + * (and also deselects any other chip that might be + * selected ...) + */ + if (cs_change) { + bitbang->chipselect(spi, BITBANG_CS_ACTIVE); + ndelay(nsecs); + } + cs_change = t->cs_change; + if (!t->tx_buf && !t->rx_buf && t->len) { + status = -EINVAL; + break; + } + + /* transfer data. the lower level code handles any + * new dma mappings it needs. our caller always gave + * us dma-safe buffers. + */ + if (t->len) { + /* REVISIT dma API still needs a designated + * DMA_ADDR_INVALID; ~0 might be better. + */ + if (!m->is_dma_mapped) + t->rx_dma = t->tx_dma = 0; + status = bitbang->txrx_bufs(spi, t); + } + if (status > 0) + m->actual_length += status; + if (status != t->len) { + /* always report some kind of error */ + if (status >= 0) + status = -EREMOTEIO; + break; + } + status = 0; + + /* protocol tweaks before next transfer */ + if (t->delay_usecs) + udelay(t->delay_usecs); + + if (!cs_change) + continue; + if (t->transfer_list.next == &m->transfers) + break; + + /* sometimes a short mid-message deselect of the chip + * may be needed to terminate a mode or command + */ + ndelay(nsecs); + bitbang->chipselect(spi, BITBANG_CS_INACTIVE); + ndelay(nsecs); + } + + m->status = status; + m->complete(m->context); + + /* normally deactivate chipselect ... unless no error and + * cs_change has hinted that the next message will probably + * be for this chip too. + */ + if (!(status == 0 && cs_change)) { + ndelay(nsecs); + bitbang->chipselect(spi, BITBANG_CS_INACTIVE); + ndelay(nsecs); + } + + spin_lock_irqsave(&bitbang->lock, flags); + } + bitbang->busy = 0; + spin_unlock_irqrestore(&bitbang->lock, flags); +} + +/** + * spi_bitbang_transfer - default submit to transfer queue + */ +int spi_bitbang_transfer(struct spi_device *spi, struct spi_message *m) +{ + struct spi_bitbang *bitbang; + unsigned long flags; + int status = 0; + + m->actual_length = 0; + m->status = -EINPROGRESS; + + bitbang = spi_master_get_devdata(spi->master); + + spin_lock_irqsave(&bitbang->lock, flags); + if (!spi->max_speed_hz) + status = -ENETDOWN; + else { + list_add_tail(&m->queue, &bitbang->queue); + queue_work(bitbang->workqueue, &bitbang->work); + } + spin_unlock_irqrestore(&bitbang->lock, flags); + + return status; +} +EXPORT_SYMBOL_GPL(spi_bitbang_transfer); + +/*----------------------------------------------------------------------*/ + +/** + * spi_bitbang_start - start up a polled/bitbanging SPI master driver + * @bitbang: driver handle + * + * Caller should have zero-initialized all parts of the structure, and then + * provided callbacks for chip selection and I/O loops. If the master has + * a transfer method, its final step should call spi_bitbang_transfer; or, + * that's the default if the transfer routine is not initialized. It should + * also set up the bus number and number of chipselects. + * + * For i/o loops, provide callbacks either per-word (for bitbanging, or for + * hardware that basically exposes a shift register) or per-spi_transfer + * (which takes better advantage of hardware like fifos or DMA engines). + * + * Drivers using per-word I/O loops should use (or call) spi_bitbang_setup, + * spi_bitbang_cleanup and spi_bitbang_setup_transfer to handle those spi + * master methods. Those methods are the defaults if the bitbang->txrx_bufs + * routine isn't initialized. + * + * This routine registers the spi_master, which will process requests in a + * dedicated task, keeping IRQs unblocked most of the time. To stop + * processing those requests, call spi_bitbang_stop(). + */ +int spi_bitbang_start(struct spi_bitbang *bitbang) +{ + int status; + + if (!bitbang->master || !bitbang->chipselect) + return -EINVAL; + + INIT_WORK(&bitbang->work, bitbang_work); + spin_lock_init(&bitbang->lock); + INIT_LIST_HEAD(&bitbang->queue); + + if (!bitbang->master->mode_bits) + bitbang->master->mode_bits = SPI_CPOL | SPI_CPHA | bitbang->flags; + + if (!bitbang->master->transfer) + bitbang->master->transfer = spi_bitbang_transfer; + if (!bitbang->txrx_bufs) { + bitbang->use_dma = 0; + bitbang->txrx_bufs = spi_bitbang_bufs; + if (!bitbang->master->setup) { + if (!bitbang->setup_transfer) + bitbang->setup_transfer = + spi_bitbang_setup_transfer; + bitbang->master->setup = spi_bitbang_setup; + bitbang->master->cleanup = spi_bitbang_cleanup; + } + } else if (!bitbang->master->setup) + return -EINVAL; + if (bitbang->master->transfer == spi_bitbang_transfer && + !bitbang->setup_transfer) + return -EINVAL; + + /* this task is the only thing to touch the SPI bits */ + bitbang->busy = 0; + bitbang->workqueue = create_singlethread_workqueue( + dev_name(bitbang->master->dev.parent)); + if (bitbang->workqueue == NULL) { + status = -EBUSY; + goto err1; + } + + /* driver may get busy before register() returns, especially + * if someone registered boardinfo for devices + */ + status = spi_register_master(bitbang->master); + if (status < 0) + goto err2; + + return status; + +err2: + destroy_workqueue(bitbang->workqueue); +err1: + return status; +} +EXPORT_SYMBOL_GPL(spi_bitbang_start); + +/** + * spi_bitbang_stop - stops the task providing spi communication + */ +int spi_bitbang_stop(struct spi_bitbang *bitbang) +{ + spi_unregister_master(bitbang->master); + + WARN_ON(!list_empty(&bitbang->queue)); + + destroy_workqueue(bitbang->workqueue); + + return 0; +} +EXPORT_SYMBOL_GPL(spi_bitbang_stop); + +MODULE_LICENSE("GPL"); + diff --git a/drivers/spi/spi-butterfly.c b/drivers/spi/spi-butterfly.c new file mode 100644 index 0000000..9f907ec --- /dev/null +++ b/drivers/spi/spi-butterfly.c @@ -0,0 +1,356 @@ +/* + * parport-to-butterfly adapter + * + * Copyright (C) 2005 David Brownell + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include + + +/* + * This uses SPI to talk with an "AVR Butterfly", which is a $US20 card + * with a battery powered AVR microcontroller and lots of goodies. You + * can use GCC to develop firmware for this. + * + * See Documentation/spi/butterfly for information about how to build + * and use this custom parallel port cable. + */ + + +/* DATA output bits (pins 2..9 == D0..D7) */ +#define butterfly_nreset (1 << 1) /* pin 3 */ + +#define spi_sck_bit (1 << 0) /* pin 2 */ +#define spi_mosi_bit (1 << 7) /* pin 9 */ + +#define vcc_bits ((1 << 6) | (1 << 5)) /* pins 7, 8 */ + +/* STATUS input bits */ +#define spi_miso_bit PARPORT_STATUS_BUSY /* pin 11 */ + +/* CONTROL output bits */ +#define spi_cs_bit PARPORT_CONTROL_SELECT /* pin 17 */ + + + +static inline struct butterfly *spidev_to_pp(struct spi_device *spi) +{ + return spi->controller_data; +} + + +struct butterfly { + /* REVISIT ... for now, this must be first */ + struct spi_bitbang bitbang; + + struct parport *port; + struct pardevice *pd; + + u8 lastbyte; + + struct spi_device *dataflash; + struct spi_device *butterfly; + struct spi_board_info info[2]; + +}; + +/*----------------------------------------------------------------------*/ + +static inline void +setsck(struct spi_device *spi, int is_on) +{ + struct butterfly *pp = spidev_to_pp(spi); + u8 bit, byte = pp->lastbyte; + + bit = spi_sck_bit; + + if (is_on) + byte |= bit; + else + byte &= ~bit; + parport_write_data(pp->port, byte); + pp->lastbyte = byte; +} + +static inline void +setmosi(struct spi_device *spi, int is_on) +{ + struct butterfly *pp = spidev_to_pp(spi); + u8 bit, byte = pp->lastbyte; + + bit = spi_mosi_bit; + + if (is_on) + byte |= bit; + else + byte &= ~bit; + parport_write_data(pp->port, byte); + pp->lastbyte = byte; +} + +static inline int getmiso(struct spi_device *spi) +{ + struct butterfly *pp = spidev_to_pp(spi); + int value; + u8 bit; + + bit = spi_miso_bit; + + /* only STATUS_BUSY is NOT negated */ + value = !(parport_read_status(pp->port) & bit); + return (bit == PARPORT_STATUS_BUSY) ? value : !value; +} + +static void butterfly_chipselect(struct spi_device *spi, int value) +{ + struct butterfly *pp = spidev_to_pp(spi); + + /* set default clock polarity */ + if (value != BITBANG_CS_INACTIVE) + setsck(spi, spi->mode & SPI_CPOL); + + /* here, value == "activate or not"; + * most PARPORT_CONTROL_* bits are negated, so we must + * morph it to value == "bit value to write in control register" + */ + if (spi_cs_bit == PARPORT_CONTROL_INIT) + value = !value; + + parport_frob_control(pp->port, spi_cs_bit, value ? spi_cs_bit : 0); +} + + +/* we only needed to implement one mode here, and choose SPI_MODE_0 */ + +#define spidelay(X) do{}while(0) +//#define spidelay ndelay + +#include "spi-bitbang-txrx.h" + +static u32 +butterfly_txrx_word_mode0(struct spi_device *spi, + unsigned nsecs, + u32 word, u8 bits) +{ + return bitbang_txrx_be_cpha0(spi, nsecs, 0, 0, word, bits); +} + +/*----------------------------------------------------------------------*/ + +/* override default partitioning with cmdlinepart */ +static struct mtd_partition partitions[] = { { + /* JFFS2 wants partitions of 4*N blocks for this device, + * so sectors 0 and 1 can't be partitions by themselves. + */ + + /* sector 0 = 8 pages * 264 bytes/page (1 block) + * sector 1 = 248 pages * 264 bytes/page + */ + .name = "bookkeeping", // 66 KB + .offset = 0, + .size = (8 + 248) * 264, +// .mask_flags = MTD_WRITEABLE, +}, { + /* sector 2 = 256 pages * 264 bytes/page + * sectors 3-5 = 512 pages * 264 bytes/page + */ + .name = "filesystem", // 462 KB + .offset = MTDPART_OFS_APPEND, + .size = MTDPART_SIZ_FULL, +} }; + +static struct flash_platform_data flash = { + .name = "butterflash", + .parts = partitions, + .nr_parts = ARRAY_SIZE(partitions), +}; + + +/* REVISIT remove this ugly global and its "only one" limitation */ +static struct butterfly *butterfly; + +static void butterfly_attach(struct parport *p) +{ + struct pardevice *pd; + int status; + struct butterfly *pp; + struct spi_master *master; + struct device *dev = p->physport->dev; + + if (butterfly || !dev) + return; + + /* REVISIT: this just _assumes_ a butterfly is there ... no probe, + * and no way to be selective about what it binds to. + */ + + master = spi_alloc_master(dev, sizeof *pp); + if (!master) { + status = -ENOMEM; + goto done; + } + pp = spi_master_get_devdata(master); + + /* + * SPI and bitbang hookup + * + * use default setup(), cleanup(), and transfer() methods; and + * only bother implementing mode 0. Start it later. + */ + master->bus_num = 42; + master->num_chipselect = 2; + + pp->bitbang.master = spi_master_get(master); + pp->bitbang.chipselect = butterfly_chipselect; + pp->bitbang.txrx_word[SPI_MODE_0] = butterfly_txrx_word_mode0; + + /* + * parport hookup + */ + pp->port = p; + pd = parport_register_device(p, "spi_butterfly", + NULL, NULL, NULL, + 0 /* FLAGS */, pp); + if (!pd) { + status = -ENOMEM; + goto clean0; + } + pp->pd = pd; + + status = parport_claim(pd); + if (status < 0) + goto clean1; + + /* + * Butterfly reset, powerup, run firmware + */ + pr_debug("%s: powerup/reset Butterfly\n", p->name); + + /* nCS for dataflash (this bit is inverted on output) */ + parport_frob_control(pp->port, spi_cs_bit, 0); + + /* stabilize power with chip in reset (nRESET), and + * spi_sck_bit clear (CPOL=0) + */ + pp->lastbyte |= vcc_bits; + parport_write_data(pp->port, pp->lastbyte); + msleep(5); + + /* take it out of reset; assume long reset delay */ + pp->lastbyte |= butterfly_nreset; + parport_write_data(pp->port, pp->lastbyte); + msleep(100); + + + /* + * Start SPI ... for now, hide that we're two physical busses. + */ + status = spi_bitbang_start(&pp->bitbang); + if (status < 0) + goto clean2; + + /* Bus 1 lets us talk to at45db041b (firmware disables AVR SPI), AVR + * (firmware resets at45, acts as spi slave) or neither (we ignore + * both, AVR uses AT45). Here we expect firmware for the first option. + */ + + pp->info[0].max_speed_hz = 15 * 1000 * 1000; + strcpy(pp->info[0].modalias, "mtd_dataflash"); + pp->info[0].platform_data = &flash; + pp->info[0].chip_select = 1; + pp->info[0].controller_data = pp; + pp->dataflash = spi_new_device(pp->bitbang.master, &pp->info[0]); + if (pp->dataflash) + pr_debug("%s: dataflash at %s\n", p->name, + dev_name(&pp->dataflash->dev)); + + // dev_info(_what?_, ...) + pr_info("%s: AVR Butterfly\n", p->name); + butterfly = pp; + return; + +clean2: + /* turn off VCC */ + parport_write_data(pp->port, 0); + + parport_release(pp->pd); +clean1: + parport_unregister_device(pd); +clean0: + (void) spi_master_put(pp->bitbang.master); +done: + pr_debug("%s: butterfly probe, fail %d\n", p->name, status); +} + +static void butterfly_detach(struct parport *p) +{ + struct butterfly *pp; + int status; + + /* FIXME this global is ugly ... but, how to quickly get from + * the parport to the "struct butterfly" associated with it? + * "old school" driver-internal device lists? + */ + if (!butterfly || butterfly->port != p) + return; + pp = butterfly; + butterfly = NULL; + + /* stop() unregisters child devices too */ + status = spi_bitbang_stop(&pp->bitbang); + + /* turn off VCC */ + parport_write_data(pp->port, 0); + msleep(10); + + parport_release(pp->pd); + parport_unregister_device(pp->pd); + + (void) spi_master_put(pp->bitbang.master); +} + +static struct parport_driver butterfly_driver = { + .name = "spi_butterfly", + .attach = butterfly_attach, + .detach = butterfly_detach, +}; + + +static int __init butterfly_init(void) +{ + return parport_register_driver(&butterfly_driver); +} +device_initcall(butterfly_init); + +static void __exit butterfly_exit(void) +{ + parport_unregister_driver(&butterfly_driver); +} +module_exit(butterfly_exit); + +MODULE_DESCRIPTION("Parport Adapter driver for AVR Butterfly"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-coldfire-qspi.c b/drivers/spi/spi-coldfire-qspi.c new file mode 100644 index 0000000..ae2cd1c --- /dev/null +++ b/drivers/spi/spi-coldfire-qspi.c @@ -0,0 +1,642 @@ +/* + * Freescale/Motorola Coldfire Queued SPI driver + * + * Copyright 2010 Steven King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA + * +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#define DRIVER_NAME "mcfqspi" + +#define MCFQSPI_BUSCLK (MCF_BUSCLK / 2) + +#define MCFQSPI_QMR 0x00 +#define MCFQSPI_QMR_MSTR 0x8000 +#define MCFQSPI_QMR_CPOL 0x0200 +#define MCFQSPI_QMR_CPHA 0x0100 +#define MCFQSPI_QDLYR 0x04 +#define MCFQSPI_QDLYR_SPE 0x8000 +#define MCFQSPI_QWR 0x08 +#define MCFQSPI_QWR_HALT 0x8000 +#define MCFQSPI_QWR_WREN 0x4000 +#define MCFQSPI_QWR_CSIV 0x1000 +#define MCFQSPI_QIR 0x0C +#define MCFQSPI_QIR_WCEFB 0x8000 +#define MCFQSPI_QIR_ABRTB 0x4000 +#define MCFQSPI_QIR_ABRTL 0x1000 +#define MCFQSPI_QIR_WCEFE 0x0800 +#define MCFQSPI_QIR_ABRTE 0x0400 +#define MCFQSPI_QIR_SPIFE 0x0100 +#define MCFQSPI_QIR_WCEF 0x0008 +#define MCFQSPI_QIR_ABRT 0x0004 +#define MCFQSPI_QIR_SPIF 0x0001 +#define MCFQSPI_QAR 0x010 +#define MCFQSPI_QAR_TXBUF 0x00 +#define MCFQSPI_QAR_RXBUF 0x10 +#define MCFQSPI_QAR_CMDBUF 0x20 +#define MCFQSPI_QDR 0x014 +#define MCFQSPI_QCR 0x014 +#define MCFQSPI_QCR_CONT 0x8000 +#define MCFQSPI_QCR_BITSE 0x4000 +#define MCFQSPI_QCR_DT 0x2000 + +struct mcfqspi { + void __iomem *iobase; + int irq; + struct clk *clk; + struct mcfqspi_cs_control *cs_control; + + wait_queue_head_t waitq; + + struct work_struct work; + struct workqueue_struct *workq; + spinlock_t lock; + struct list_head msgq; +}; + +static void mcfqspi_wr_qmr(struct mcfqspi *mcfqspi, u16 val) +{ + writew(val, mcfqspi->iobase + MCFQSPI_QMR); +} + +static void mcfqspi_wr_qdlyr(struct mcfqspi *mcfqspi, u16 val) +{ + writew(val, mcfqspi->iobase + MCFQSPI_QDLYR); +} + +static u16 mcfqspi_rd_qdlyr(struct mcfqspi *mcfqspi) +{ + return readw(mcfqspi->iobase + MCFQSPI_QDLYR); +} + +static void mcfqspi_wr_qwr(struct mcfqspi *mcfqspi, u16 val) +{ + writew(val, mcfqspi->iobase + MCFQSPI_QWR); +} + +static void mcfqspi_wr_qir(struct mcfqspi *mcfqspi, u16 val) +{ + writew(val, mcfqspi->iobase + MCFQSPI_QIR); +} + +static void mcfqspi_wr_qar(struct mcfqspi *mcfqspi, u16 val) +{ + writew(val, mcfqspi->iobase + MCFQSPI_QAR); +} + +static void mcfqspi_wr_qdr(struct mcfqspi *mcfqspi, u16 val) +{ + writew(val, mcfqspi->iobase + MCFQSPI_QDR); +} + +static u16 mcfqspi_rd_qdr(struct mcfqspi *mcfqspi) +{ + return readw(mcfqspi->iobase + MCFQSPI_QDR); +} + +static void mcfqspi_cs_select(struct mcfqspi *mcfqspi, u8 chip_select, + bool cs_high) +{ + mcfqspi->cs_control->select(mcfqspi->cs_control, chip_select, cs_high); +} + +static void mcfqspi_cs_deselect(struct mcfqspi *mcfqspi, u8 chip_select, + bool cs_high) +{ + mcfqspi->cs_control->deselect(mcfqspi->cs_control, chip_select, cs_high); +} + +static int mcfqspi_cs_setup(struct mcfqspi *mcfqspi) +{ + return (mcfqspi->cs_control && mcfqspi->cs_control->setup) ? + mcfqspi->cs_control->setup(mcfqspi->cs_control) : 0; +} + +static void mcfqspi_cs_teardown(struct mcfqspi *mcfqspi) +{ + if (mcfqspi->cs_control && mcfqspi->cs_control->teardown) + mcfqspi->cs_control->teardown(mcfqspi->cs_control); +} + +static u8 mcfqspi_qmr_baud(u32 speed_hz) +{ + return clamp((MCFQSPI_BUSCLK + speed_hz - 1) / speed_hz, 2u, 255u); +} + +static bool mcfqspi_qdlyr_spe(struct mcfqspi *mcfqspi) +{ + return mcfqspi_rd_qdlyr(mcfqspi) & MCFQSPI_QDLYR_SPE; +} + +static irqreturn_t mcfqspi_irq_handler(int this_irq, void *dev_id) +{ + struct mcfqspi *mcfqspi = dev_id; + + /* clear interrupt */ + mcfqspi_wr_qir(mcfqspi, MCFQSPI_QIR_SPIFE | MCFQSPI_QIR_SPIF); + wake_up(&mcfqspi->waitq); + + return IRQ_HANDLED; +} + +static void mcfqspi_transfer_msg8(struct mcfqspi *mcfqspi, unsigned count, + const u8 *txbuf, u8 *rxbuf) +{ + unsigned i, n, offset = 0; + + n = min(count, 16u); + + mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_CMDBUF); + for (i = 0; i < n; ++i) + mcfqspi_wr_qdr(mcfqspi, MCFQSPI_QCR_BITSE); + + mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_TXBUF); + if (txbuf) + for (i = 0; i < n; ++i) + mcfqspi_wr_qdr(mcfqspi, *txbuf++); + else + for (i = 0; i < count; ++i) + mcfqspi_wr_qdr(mcfqspi, 0); + + count -= n; + if (count) { + u16 qwr = 0xf08; + mcfqspi_wr_qwr(mcfqspi, 0x700); + mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); + + do { + wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi)); + mcfqspi_wr_qwr(mcfqspi, qwr); + mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); + if (rxbuf) { + mcfqspi_wr_qar(mcfqspi, + MCFQSPI_QAR_RXBUF + offset); + for (i = 0; i < 8; ++i) + *rxbuf++ = mcfqspi_rd_qdr(mcfqspi); + } + n = min(count, 8u); + if (txbuf) { + mcfqspi_wr_qar(mcfqspi, + MCFQSPI_QAR_TXBUF + offset); + for (i = 0; i < n; ++i) + mcfqspi_wr_qdr(mcfqspi, *txbuf++); + } + qwr = (offset ? 0x808 : 0) + ((n - 1) << 8); + offset ^= 8; + count -= n; + } while (count); + wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi)); + mcfqspi_wr_qwr(mcfqspi, qwr); + mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); + if (rxbuf) { + mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_RXBUF + offset); + for (i = 0; i < 8; ++i) + *rxbuf++ = mcfqspi_rd_qdr(mcfqspi); + offset ^= 8; + } + } else { + mcfqspi_wr_qwr(mcfqspi, (n - 1) << 8); + mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); + } + wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi)); + if (rxbuf) { + mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_RXBUF + offset); + for (i = 0; i < n; ++i) + *rxbuf++ = mcfqspi_rd_qdr(mcfqspi); + } +} + +static void mcfqspi_transfer_msg16(struct mcfqspi *mcfqspi, unsigned count, + const u16 *txbuf, u16 *rxbuf) +{ + unsigned i, n, offset = 0; + + n = min(count, 16u); + + mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_CMDBUF); + for (i = 0; i < n; ++i) + mcfqspi_wr_qdr(mcfqspi, MCFQSPI_QCR_BITSE); + + mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_TXBUF); + if (txbuf) + for (i = 0; i < n; ++i) + mcfqspi_wr_qdr(mcfqspi, *txbuf++); + else + for (i = 0; i < count; ++i) + mcfqspi_wr_qdr(mcfqspi, 0); + + count -= n; + if (count) { + u16 qwr = 0xf08; + mcfqspi_wr_qwr(mcfqspi, 0x700); + mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); + + do { + wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi)); + mcfqspi_wr_qwr(mcfqspi, qwr); + mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); + if (rxbuf) { + mcfqspi_wr_qar(mcfqspi, + MCFQSPI_QAR_RXBUF + offset); + for (i = 0; i < 8; ++i) + *rxbuf++ = mcfqspi_rd_qdr(mcfqspi); + } + n = min(count, 8u); + if (txbuf) { + mcfqspi_wr_qar(mcfqspi, + MCFQSPI_QAR_TXBUF + offset); + for (i = 0; i < n; ++i) + mcfqspi_wr_qdr(mcfqspi, *txbuf++); + } + qwr = (offset ? 0x808 : 0x000) + ((n - 1) << 8); + offset ^= 8; + count -= n; + } while (count); + wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi)); + mcfqspi_wr_qwr(mcfqspi, qwr); + mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); + if (rxbuf) { + mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_RXBUF + offset); + for (i = 0; i < 8; ++i) + *rxbuf++ = mcfqspi_rd_qdr(mcfqspi); + offset ^= 8; + } + } else { + mcfqspi_wr_qwr(mcfqspi, (n - 1) << 8); + mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); + } + wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi)); + if (rxbuf) { + mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_RXBUF + offset); + for (i = 0; i < n; ++i) + *rxbuf++ = mcfqspi_rd_qdr(mcfqspi); + } +} + +static void mcfqspi_work(struct work_struct *work) +{ + struct mcfqspi *mcfqspi = container_of(work, struct mcfqspi, work); + unsigned long flags; + + spin_lock_irqsave(&mcfqspi->lock, flags); + while (!list_empty(&mcfqspi->msgq)) { + struct spi_message *msg; + struct spi_device *spi; + struct spi_transfer *xfer; + int status = 0; + + msg = container_of(mcfqspi->msgq.next, struct spi_message, + queue); + + list_del_init(&msg->queue); + spin_unlock_irqrestore(&mcfqspi->lock, flags); + + spi = msg->spi; + + list_for_each_entry(xfer, &msg->transfers, transfer_list) { + bool cs_high = spi->mode & SPI_CS_HIGH; + u16 qmr = MCFQSPI_QMR_MSTR; + + if (xfer->bits_per_word) + qmr |= xfer->bits_per_word << 10; + else + qmr |= spi->bits_per_word << 10; + if (spi->mode & SPI_CPHA) + qmr |= MCFQSPI_QMR_CPHA; + if (spi->mode & SPI_CPOL) + qmr |= MCFQSPI_QMR_CPOL; + if (xfer->speed_hz) + qmr |= mcfqspi_qmr_baud(xfer->speed_hz); + else + qmr |= mcfqspi_qmr_baud(spi->max_speed_hz); + mcfqspi_wr_qmr(mcfqspi, qmr); + + mcfqspi_cs_select(mcfqspi, spi->chip_select, cs_high); + + mcfqspi_wr_qir(mcfqspi, MCFQSPI_QIR_SPIFE); + if ((xfer->bits_per_word ? xfer->bits_per_word : + spi->bits_per_word) == 8) + mcfqspi_transfer_msg8(mcfqspi, xfer->len, + xfer->tx_buf, + xfer->rx_buf); + else + mcfqspi_transfer_msg16(mcfqspi, xfer->len / 2, + xfer->tx_buf, + xfer->rx_buf); + mcfqspi_wr_qir(mcfqspi, 0); + + if (xfer->delay_usecs) + udelay(xfer->delay_usecs); + if (xfer->cs_change) { + if (!list_is_last(&xfer->transfer_list, + &msg->transfers)) + mcfqspi_cs_deselect(mcfqspi, + spi->chip_select, + cs_high); + } else { + if (list_is_last(&xfer->transfer_list, + &msg->transfers)) + mcfqspi_cs_deselect(mcfqspi, + spi->chip_select, + cs_high); + } + msg->actual_length += xfer->len; + } + msg->status = status; + msg->complete(msg->context); + + spin_lock_irqsave(&mcfqspi->lock, flags); + } + spin_unlock_irqrestore(&mcfqspi->lock, flags); +} + +static int mcfqspi_transfer(struct spi_device *spi, struct spi_message *msg) +{ + struct mcfqspi *mcfqspi; + struct spi_transfer *xfer; + unsigned long flags; + + mcfqspi = spi_master_get_devdata(spi->master); + + list_for_each_entry(xfer, &msg->transfers, transfer_list) { + if (xfer->bits_per_word && ((xfer->bits_per_word < 8) + || (xfer->bits_per_word > 16))) { + dev_dbg(&spi->dev, + "%d bits per word is not supported\n", + xfer->bits_per_word); + goto fail; + } + if (xfer->speed_hz) { + u32 real_speed = MCFQSPI_BUSCLK / + mcfqspi_qmr_baud(xfer->speed_hz); + if (real_speed != xfer->speed_hz) + dev_dbg(&spi->dev, + "using speed %d instead of %d\n", + real_speed, xfer->speed_hz); + } + } + msg->status = -EINPROGRESS; + msg->actual_length = 0; + + spin_lock_irqsave(&mcfqspi->lock, flags); + list_add_tail(&msg->queue, &mcfqspi->msgq); + queue_work(mcfqspi->workq, &mcfqspi->work); + spin_unlock_irqrestore(&mcfqspi->lock, flags); + + return 0; +fail: + msg->status = -EINVAL; + return -EINVAL; +} + +static int mcfqspi_setup(struct spi_device *spi) +{ + if ((spi->bits_per_word < 8) || (spi->bits_per_word > 16)) { + dev_dbg(&spi->dev, "%d bits per word is not supported\n", + spi->bits_per_word); + return -EINVAL; + } + if (spi->chip_select >= spi->master->num_chipselect) { + dev_dbg(&spi->dev, "%d chip select is out of range\n", + spi->chip_select); + return -EINVAL; + } + + mcfqspi_cs_deselect(spi_master_get_devdata(spi->master), + spi->chip_select, spi->mode & SPI_CS_HIGH); + + dev_dbg(&spi->dev, + "bits per word %d, chip select %d, speed %d KHz\n", + spi->bits_per_word, spi->chip_select, + (MCFQSPI_BUSCLK / mcfqspi_qmr_baud(spi->max_speed_hz)) + / 1000); + + return 0; +} + +static int __devinit mcfqspi_probe(struct platform_device *pdev) +{ + struct spi_master *master; + struct mcfqspi *mcfqspi; + struct resource *res; + struct mcfqspi_platform_data *pdata; + int status; + + master = spi_alloc_master(&pdev->dev, sizeof(*mcfqspi)); + if (master == NULL) { + dev_dbg(&pdev->dev, "spi_alloc_master failed\n"); + return -ENOMEM; + } + + mcfqspi = spi_master_get_devdata(master); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_dbg(&pdev->dev, "platform_get_resource failed\n"); + status = -ENXIO; + goto fail0; + } + + if (!request_mem_region(res->start, resource_size(res), pdev->name)) { + dev_dbg(&pdev->dev, "request_mem_region failed\n"); + status = -EBUSY; + goto fail0; + } + + mcfqspi->iobase = ioremap(res->start, resource_size(res)); + if (!mcfqspi->iobase) { + dev_dbg(&pdev->dev, "ioremap failed\n"); + status = -ENOMEM; + goto fail1; + } + + mcfqspi->irq = platform_get_irq(pdev, 0); + if (mcfqspi->irq < 0) { + dev_dbg(&pdev->dev, "platform_get_irq failed\n"); + status = -ENXIO; + goto fail2; + } + + status = request_irq(mcfqspi->irq, mcfqspi_irq_handler, IRQF_DISABLED, + pdev->name, mcfqspi); + if (status) { + dev_dbg(&pdev->dev, "request_irq failed\n"); + goto fail2; + } + + mcfqspi->clk = clk_get(&pdev->dev, "qspi_clk"); + if (IS_ERR(mcfqspi->clk)) { + dev_dbg(&pdev->dev, "clk_get failed\n"); + status = PTR_ERR(mcfqspi->clk); + goto fail3; + } + clk_enable(mcfqspi->clk); + + mcfqspi->workq = create_singlethread_workqueue(dev_name(master->dev.parent)); + if (!mcfqspi->workq) { + dev_dbg(&pdev->dev, "create_workqueue failed\n"); + status = -ENOMEM; + goto fail4; + } + INIT_WORK(&mcfqspi->work, mcfqspi_work); + spin_lock_init(&mcfqspi->lock); + INIT_LIST_HEAD(&mcfqspi->msgq); + init_waitqueue_head(&mcfqspi->waitq); + + pdata = pdev->dev.platform_data; + if (!pdata) { + dev_dbg(&pdev->dev, "platform data is missing\n"); + goto fail5; + } + master->bus_num = pdata->bus_num; + master->num_chipselect = pdata->num_chipselect; + + mcfqspi->cs_control = pdata->cs_control; + status = mcfqspi_cs_setup(mcfqspi); + if (status) { + dev_dbg(&pdev->dev, "error initializing cs_control\n"); + goto fail5; + } + + master->mode_bits = SPI_CS_HIGH | SPI_CPOL | SPI_CPHA; + master->setup = mcfqspi_setup; + master->transfer = mcfqspi_transfer; + + platform_set_drvdata(pdev, master); + + status = spi_register_master(master); + if (status) { + dev_dbg(&pdev->dev, "spi_register_master failed\n"); + goto fail6; + } + dev_info(&pdev->dev, "Coldfire QSPI bus driver\n"); + + return 0; + +fail6: + mcfqspi_cs_teardown(mcfqspi); +fail5: + destroy_workqueue(mcfqspi->workq); +fail4: + clk_disable(mcfqspi->clk); + clk_put(mcfqspi->clk); +fail3: + free_irq(mcfqspi->irq, mcfqspi); +fail2: + iounmap(mcfqspi->iobase); +fail1: + release_mem_region(res->start, resource_size(res)); +fail0: + spi_master_put(master); + + dev_dbg(&pdev->dev, "Coldfire QSPI probe failed\n"); + + return status; +} + +static int __devexit mcfqspi_remove(struct platform_device *pdev) +{ + struct spi_master *master = platform_get_drvdata(pdev); + struct mcfqspi *mcfqspi = spi_master_get_devdata(master); + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + /* disable the hardware (set the baud rate to 0) */ + mcfqspi_wr_qmr(mcfqspi, MCFQSPI_QMR_MSTR); + + platform_set_drvdata(pdev, NULL); + mcfqspi_cs_teardown(mcfqspi); + destroy_workqueue(mcfqspi->workq); + clk_disable(mcfqspi->clk); + clk_put(mcfqspi->clk); + free_irq(mcfqspi->irq, mcfqspi); + iounmap(mcfqspi->iobase); + release_mem_region(res->start, resource_size(res)); + spi_unregister_master(master); + spi_master_put(master); + + return 0; +} + +#ifdef CONFIG_PM + +static int mcfqspi_suspend(struct device *dev) +{ + struct mcfqspi *mcfqspi = platform_get_drvdata(to_platform_device(dev)); + + clk_disable(mcfqspi->clk); + + return 0; +} + +static int mcfqspi_resume(struct device *dev) +{ + struct mcfqspi *mcfqspi = platform_get_drvdata(to_platform_device(dev)); + + clk_enable(mcfqspi->clk); + + return 0; +} + +static struct dev_pm_ops mcfqspi_dev_pm_ops = { + .suspend = mcfqspi_suspend, + .resume = mcfqspi_resume, +}; + +#define MCFQSPI_DEV_PM_OPS (&mcfqspi_dev_pm_ops) +#else +#define MCFQSPI_DEV_PM_OPS NULL +#endif + +static struct platform_driver mcfqspi_driver = { + .driver.name = DRIVER_NAME, + .driver.owner = THIS_MODULE, + .driver.pm = MCFQSPI_DEV_PM_OPS, + .remove = __devexit_p(mcfqspi_remove), +}; + +static int __init mcfqspi_init(void) +{ + return platform_driver_probe(&mcfqspi_driver, mcfqspi_probe); +} +module_init(mcfqspi_init); + +static void __exit mcfqspi_exit(void) +{ + platform_driver_unregister(&mcfqspi_driver); +} +module_exit(mcfqspi_exit); + +MODULE_AUTHOR("Steven King "); +MODULE_DESCRIPTION("Coldfire QSPI Controller Driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:" DRIVER_NAME); diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c new file mode 100644 index 0000000..1f0ed80 --- /dev/null +++ b/drivers/spi/spi-davinci.c @@ -0,0 +1,1030 @@ +/* + * Copyright (C) 2009 Texas Instruments. + * Copyright (C) 2010 EF Johnson Technologies + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#define SPI_NO_RESOURCE ((resource_size_t)-1) + +#define SPI_MAX_CHIPSELECT 2 + +#define CS_DEFAULT 0xFF + +#define SPIFMT_PHASE_MASK BIT(16) +#define SPIFMT_POLARITY_MASK BIT(17) +#define SPIFMT_DISTIMER_MASK BIT(18) +#define SPIFMT_SHIFTDIR_MASK BIT(20) +#define SPIFMT_WAITENA_MASK BIT(21) +#define SPIFMT_PARITYENA_MASK BIT(22) +#define SPIFMT_ODD_PARITY_MASK BIT(23) +#define SPIFMT_WDELAY_MASK 0x3f000000u +#define SPIFMT_WDELAY_SHIFT 24 +#define SPIFMT_PRESCALE_SHIFT 8 + +/* SPIPC0 */ +#define SPIPC0_DIFUN_MASK BIT(11) /* MISO */ +#define SPIPC0_DOFUN_MASK BIT(10) /* MOSI */ +#define SPIPC0_CLKFUN_MASK BIT(9) /* CLK */ +#define SPIPC0_SPIENA_MASK BIT(8) /* nREADY */ + +#define SPIINT_MASKALL 0x0101035F +#define SPIINT_MASKINT 0x0000015F +#define SPI_INTLVL_1 0x000001FF +#define SPI_INTLVL_0 0x00000000 + +/* SPIDAT1 (upper 16 bit defines) */ +#define SPIDAT1_CSHOLD_MASK BIT(12) + +/* SPIGCR1 */ +#define SPIGCR1_CLKMOD_MASK BIT(1) +#define SPIGCR1_MASTER_MASK BIT(0) +#define SPIGCR1_POWERDOWN_MASK BIT(8) +#define SPIGCR1_LOOPBACK_MASK BIT(16) +#define SPIGCR1_SPIENA_MASK BIT(24) + +/* SPIBUF */ +#define SPIBUF_TXFULL_MASK BIT(29) +#define SPIBUF_RXEMPTY_MASK BIT(31) + +/* SPIDELAY */ +#define SPIDELAY_C2TDELAY_SHIFT 24 +#define SPIDELAY_C2TDELAY_MASK (0xFF << SPIDELAY_C2TDELAY_SHIFT) +#define SPIDELAY_T2CDELAY_SHIFT 16 +#define SPIDELAY_T2CDELAY_MASK (0xFF << SPIDELAY_T2CDELAY_SHIFT) +#define SPIDELAY_T2EDELAY_SHIFT 8 +#define SPIDELAY_T2EDELAY_MASK (0xFF << SPIDELAY_T2EDELAY_SHIFT) +#define SPIDELAY_C2EDELAY_SHIFT 0 +#define SPIDELAY_C2EDELAY_MASK 0xFF + +/* Error Masks */ +#define SPIFLG_DLEN_ERR_MASK BIT(0) +#define SPIFLG_TIMEOUT_MASK BIT(1) +#define SPIFLG_PARERR_MASK BIT(2) +#define SPIFLG_DESYNC_MASK BIT(3) +#define SPIFLG_BITERR_MASK BIT(4) +#define SPIFLG_OVRRUN_MASK BIT(6) +#define SPIFLG_BUF_INIT_ACTIVE_MASK BIT(24) +#define SPIFLG_ERROR_MASK (SPIFLG_DLEN_ERR_MASK \ + | SPIFLG_TIMEOUT_MASK | SPIFLG_PARERR_MASK \ + | SPIFLG_DESYNC_MASK | SPIFLG_BITERR_MASK \ + | SPIFLG_OVRRUN_MASK) + +#define SPIINT_DMA_REQ_EN BIT(16) + +/* SPI Controller registers */ +#define SPIGCR0 0x00 +#define SPIGCR1 0x04 +#define SPIINT 0x08 +#define SPILVL 0x0c +#define SPIFLG 0x10 +#define SPIPC0 0x14 +#define SPIDAT1 0x3c +#define SPIBUF 0x40 +#define SPIDELAY 0x48 +#define SPIDEF 0x4c +#define SPIFMT0 0x50 + +/* We have 2 DMA channels per CS, one for RX and one for TX */ +struct davinci_spi_dma { + int tx_channel; + int rx_channel; + int dummy_param_slot; + enum dma_event_q eventq; +}; + +/* SPI Controller driver's private data. */ +struct davinci_spi { + struct spi_bitbang bitbang; + struct clk *clk; + + u8 version; + resource_size_t pbase; + void __iomem *base; + u32 irq; + struct completion done; + + const void *tx; + void *rx; +#define SPI_TMP_BUFSZ (SMP_CACHE_BYTES + 1) + u8 rx_tmp_buf[SPI_TMP_BUFSZ]; + int rcount; + int wcount; + struct davinci_spi_dma dma; + struct davinci_spi_platform_data *pdata; + + void (*get_rx)(u32 rx_data, struct davinci_spi *); + u32 (*get_tx)(struct davinci_spi *); + + u8 bytes_per_word[SPI_MAX_CHIPSELECT]; +}; + +static struct davinci_spi_config davinci_spi_default_cfg; + +static void davinci_spi_rx_buf_u8(u32 data, struct davinci_spi *dspi) +{ + if (dspi->rx) { + u8 *rx = dspi->rx; + *rx++ = (u8)data; + dspi->rx = rx; + } +} + +static void davinci_spi_rx_buf_u16(u32 data, struct davinci_spi *dspi) +{ + if (dspi->rx) { + u16 *rx = dspi->rx; + *rx++ = (u16)data; + dspi->rx = rx; + } +} + +static u32 davinci_spi_tx_buf_u8(struct davinci_spi *dspi) +{ + u32 data = 0; + if (dspi->tx) { + const u8 *tx = dspi->tx; + data = *tx++; + dspi->tx = tx; + } + return data; +} + +static u32 davinci_spi_tx_buf_u16(struct davinci_spi *dspi) +{ + u32 data = 0; + if (dspi->tx) { + const u16 *tx = dspi->tx; + data = *tx++; + dspi->tx = tx; + } + return data; +} + +static inline void set_io_bits(void __iomem *addr, u32 bits) +{ + u32 v = ioread32(addr); + + v |= bits; + iowrite32(v, addr); +} + +static inline void clear_io_bits(void __iomem *addr, u32 bits) +{ + u32 v = ioread32(addr); + + v &= ~bits; + iowrite32(v, addr); +} + +/* + * Interface to control the chip select signal + */ +static void davinci_spi_chipselect(struct spi_device *spi, int value) +{ + struct davinci_spi *dspi; + struct davinci_spi_platform_data *pdata; + u8 chip_sel = spi->chip_select; + u16 spidat1 = CS_DEFAULT; + bool gpio_chipsel = false; + + dspi = spi_master_get_devdata(spi->master); + pdata = dspi->pdata; + + if (pdata->chip_sel && chip_sel < pdata->num_chipselect && + pdata->chip_sel[chip_sel] != SPI_INTERN_CS) + gpio_chipsel = true; + + /* + * Board specific chip select logic decides the polarity and cs + * line for the controller + */ + if (gpio_chipsel) { + if (value == BITBANG_CS_ACTIVE) + gpio_set_value(pdata->chip_sel[chip_sel], 0); + else + gpio_set_value(pdata->chip_sel[chip_sel], 1); + } else { + if (value == BITBANG_CS_ACTIVE) { + spidat1 |= SPIDAT1_CSHOLD_MASK; + spidat1 &= ~(0x1 << chip_sel); + } + + iowrite16(spidat1, dspi->base + SPIDAT1 + 2); + } +} + +/** + * davinci_spi_get_prescale - Calculates the correct prescale value + * @maxspeed_hz: the maximum rate the SPI clock can run at + * + * This function calculates the prescale value that generates a clock rate + * less than or equal to the specified maximum. + * + * Returns: calculated prescale - 1 for easy programming into SPI registers + * or negative error number if valid prescalar cannot be updated. + */ +static inline int davinci_spi_get_prescale(struct davinci_spi *dspi, + u32 max_speed_hz) +{ + int ret; + + ret = DIV_ROUND_UP(clk_get_rate(dspi->clk), max_speed_hz); + + if (ret < 3 || ret > 256) + return -EINVAL; + + return ret - 1; +} + +/** + * davinci_spi_setup_transfer - This functions will determine transfer method + * @spi: spi device on which data transfer to be done + * @t: spi transfer in which transfer info is filled + * + * This function determines data transfer method (8/16/32 bit transfer). + * It will also set the SPI Clock Control register according to + * SPI slave device freq. + */ +static int davinci_spi_setup_transfer(struct spi_device *spi, + struct spi_transfer *t) +{ + + struct davinci_spi *dspi; + struct davinci_spi_config *spicfg; + u8 bits_per_word = 0; + u32 hz = 0, spifmt = 0, prescale = 0; + + dspi = spi_master_get_devdata(spi->master); + spicfg = (struct davinci_spi_config *)spi->controller_data; + if (!spicfg) + spicfg = &davinci_spi_default_cfg; + + if (t) { + bits_per_word = t->bits_per_word; + hz = t->speed_hz; + } + + /* if bits_per_word is not set then set it default */ + if (!bits_per_word) + bits_per_word = spi->bits_per_word; + + /* + * Assign function pointer to appropriate transfer method + * 8bit, 16bit or 32bit transfer + */ + if (bits_per_word <= 8 && bits_per_word >= 2) { + dspi->get_rx = davinci_spi_rx_buf_u8; + dspi->get_tx = davinci_spi_tx_buf_u8; + dspi->bytes_per_word[spi->chip_select] = 1; + } else if (bits_per_word <= 16 && bits_per_word >= 2) { + dspi->get_rx = davinci_spi_rx_buf_u16; + dspi->get_tx = davinci_spi_tx_buf_u16; + dspi->bytes_per_word[spi->chip_select] = 2; + } else + return -EINVAL; + + if (!hz) + hz = spi->max_speed_hz; + + /* Set up SPIFMTn register, unique to this chipselect. */ + + prescale = davinci_spi_get_prescale(dspi, hz); + if (prescale < 0) + return prescale; + + spifmt = (prescale << SPIFMT_PRESCALE_SHIFT) | (bits_per_word & 0x1f); + + if (spi->mode & SPI_LSB_FIRST) + spifmt |= SPIFMT_SHIFTDIR_MASK; + + if (spi->mode & SPI_CPOL) + spifmt |= SPIFMT_POLARITY_MASK; + + if (!(spi->mode & SPI_CPHA)) + spifmt |= SPIFMT_PHASE_MASK; + + /* + * Version 1 hardware supports two basic SPI modes: + * - Standard SPI mode uses 4 pins, with chipselect + * - 3 pin SPI is a 4 pin variant without CS (SPI_NO_CS) + * (distinct from SPI_3WIRE, with just one data wire; + * or similar variants without MOSI or without MISO) + * + * Version 2 hardware supports an optional handshaking signal, + * so it can support two more modes: + * - 5 pin SPI variant is standard SPI plus SPI_READY + * - 4 pin with enable is (SPI_READY | SPI_NO_CS) + */ + + if (dspi->version == SPI_VERSION_2) { + + u32 delay = 0; + + spifmt |= ((spicfg->wdelay << SPIFMT_WDELAY_SHIFT) + & SPIFMT_WDELAY_MASK); + + if (spicfg->odd_parity) + spifmt |= SPIFMT_ODD_PARITY_MASK; + + if (spicfg->parity_enable) + spifmt |= SPIFMT_PARITYENA_MASK; + + if (spicfg->timer_disable) { + spifmt |= SPIFMT_DISTIMER_MASK; + } else { + delay |= (spicfg->c2tdelay << SPIDELAY_C2TDELAY_SHIFT) + & SPIDELAY_C2TDELAY_MASK; + delay |= (spicfg->t2cdelay << SPIDELAY_T2CDELAY_SHIFT) + & SPIDELAY_T2CDELAY_MASK; + } + + if (spi->mode & SPI_READY) { + spifmt |= SPIFMT_WAITENA_MASK; + delay |= (spicfg->t2edelay << SPIDELAY_T2EDELAY_SHIFT) + & SPIDELAY_T2EDELAY_MASK; + delay |= (spicfg->c2edelay << SPIDELAY_C2EDELAY_SHIFT) + & SPIDELAY_C2EDELAY_MASK; + } + + iowrite32(delay, dspi->base + SPIDELAY); + } + + iowrite32(spifmt, dspi->base + SPIFMT0); + + return 0; +} + +/** + * davinci_spi_setup - This functions will set default transfer method + * @spi: spi device on which data transfer to be done + * + * This functions sets the default transfer method. + */ +static int davinci_spi_setup(struct spi_device *spi) +{ + int retval = 0; + struct davinci_spi *dspi; + struct davinci_spi_platform_data *pdata; + + dspi = spi_master_get_devdata(spi->master); + pdata = dspi->pdata; + + /* if bits per word length is zero then set it default 8 */ + if (!spi->bits_per_word) + spi->bits_per_word = 8; + + if (!(spi->mode & SPI_NO_CS)) { + if ((pdata->chip_sel == NULL) || + (pdata->chip_sel[spi->chip_select] == SPI_INTERN_CS)) + set_io_bits(dspi->base + SPIPC0, 1 << spi->chip_select); + + } + + if (spi->mode & SPI_READY) + set_io_bits(dspi->base + SPIPC0, SPIPC0_SPIENA_MASK); + + if (spi->mode & SPI_LOOP) + set_io_bits(dspi->base + SPIGCR1, SPIGCR1_LOOPBACK_MASK); + else + clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_LOOPBACK_MASK); + + return retval; +} + +static int davinci_spi_check_error(struct davinci_spi *dspi, int int_status) +{ + struct device *sdev = dspi->bitbang.master->dev.parent; + + if (int_status & SPIFLG_TIMEOUT_MASK) { + dev_dbg(sdev, "SPI Time-out Error\n"); + return -ETIMEDOUT; + } + if (int_status & SPIFLG_DESYNC_MASK) { + dev_dbg(sdev, "SPI Desynchronization Error\n"); + return -EIO; + } + if (int_status & SPIFLG_BITERR_MASK) { + dev_dbg(sdev, "SPI Bit error\n"); + return -EIO; + } + + if (dspi->version == SPI_VERSION_2) { + if (int_status & SPIFLG_DLEN_ERR_MASK) { + dev_dbg(sdev, "SPI Data Length Error\n"); + return -EIO; + } + if (int_status & SPIFLG_PARERR_MASK) { + dev_dbg(sdev, "SPI Parity Error\n"); + return -EIO; + } + if (int_status & SPIFLG_OVRRUN_MASK) { + dev_dbg(sdev, "SPI Data Overrun error\n"); + return -EIO; + } + if (int_status & SPIFLG_BUF_INIT_ACTIVE_MASK) { + dev_dbg(sdev, "SPI Buffer Init Active\n"); + return -EBUSY; + } + } + + return 0; +} + +/** + * davinci_spi_process_events - check for and handle any SPI controller events + * @dspi: the controller data + * + * This function will check the SPIFLG register and handle any events that are + * detected there + */ +static int davinci_spi_process_events(struct davinci_spi *dspi) +{ + u32 buf, status, errors = 0, spidat1; + + buf = ioread32(dspi->base + SPIBUF); + + if (dspi->rcount > 0 && !(buf & SPIBUF_RXEMPTY_MASK)) { + dspi->get_rx(buf & 0xFFFF, dspi); + dspi->rcount--; + } + + status = ioread32(dspi->base + SPIFLG); + + if (unlikely(status & SPIFLG_ERROR_MASK)) { + errors = status & SPIFLG_ERROR_MASK; + goto out; + } + + if (dspi->wcount > 0 && !(buf & SPIBUF_TXFULL_MASK)) { + spidat1 = ioread32(dspi->base + SPIDAT1); + dspi->wcount--; + spidat1 &= ~0xFFFF; + spidat1 |= 0xFFFF & dspi->get_tx(dspi); + iowrite32(spidat1, dspi->base + SPIDAT1); + } + +out: + return errors; +} + +static void davinci_spi_dma_callback(unsigned lch, u16 status, void *data) +{ + struct davinci_spi *dspi = data; + struct davinci_spi_dma *dma = &dspi->dma; + + edma_stop(lch); + + if (status == DMA_COMPLETE) { + if (lch == dma->rx_channel) + dspi->rcount = 0; + if (lch == dma->tx_channel) + dspi->wcount = 0; + } + + if ((!dspi->wcount && !dspi->rcount) || (status != DMA_COMPLETE)) + complete(&dspi->done); +} + +/** + * davinci_spi_bufs - functions which will handle transfer data + * @spi: spi device on which data transfer to be done + * @t: spi transfer in which transfer info is filled + * + * This function will put data to be transferred into data register + * of SPI controller and then wait until the completion will be marked + * by the IRQ Handler. + */ +static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t) +{ + struct davinci_spi *dspi; + int data_type, ret; + u32 tx_data, spidat1; + u32 errors = 0; + struct davinci_spi_config *spicfg; + struct davinci_spi_platform_data *pdata; + unsigned uninitialized_var(rx_buf_count); + struct device *sdev; + + dspi = spi_master_get_devdata(spi->master); + pdata = dspi->pdata; + spicfg = (struct davinci_spi_config *)spi->controller_data; + if (!spicfg) + spicfg = &davinci_spi_default_cfg; + sdev = dspi->bitbang.master->dev.parent; + + /* convert len to words based on bits_per_word */ + data_type = dspi->bytes_per_word[spi->chip_select]; + + dspi->tx = t->tx_buf; + dspi->rx = t->rx_buf; + dspi->wcount = t->len / data_type; + dspi->rcount = dspi->wcount; + + spidat1 = ioread32(dspi->base + SPIDAT1); + + clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK); + set_io_bits(dspi->base + SPIGCR1, SPIGCR1_SPIENA_MASK); + + INIT_COMPLETION(dspi->done); + + if (spicfg->io_type == SPI_IO_TYPE_INTR) + set_io_bits(dspi->base + SPIINT, SPIINT_MASKINT); + + if (spicfg->io_type != SPI_IO_TYPE_DMA) { + /* start the transfer */ + dspi->wcount--; + tx_data = dspi->get_tx(dspi); + spidat1 &= 0xFFFF0000; + spidat1 |= tx_data & 0xFFFF; + iowrite32(spidat1, dspi->base + SPIDAT1); + } else { + struct davinci_spi_dma *dma; + unsigned long tx_reg, rx_reg; + struct edmacc_param param; + void *rx_buf; + int b, c; + + dma = &dspi->dma; + + tx_reg = (unsigned long)dspi->pbase + SPIDAT1; + rx_reg = (unsigned long)dspi->pbase + SPIBUF; + + /* + * Transmit DMA setup + * + * If there is transmit data, map the transmit buffer, set it + * as the source of data and set the source B index to data + * size. If there is no transmit data, set the transmit register + * as the source of data, and set the source B index to zero. + * + * The destination is always the transmit register itself. And + * the destination never increments. + */ + + if (t->tx_buf) { + t->tx_dma = dma_map_single(&spi->dev, (void *)t->tx_buf, + t->len, DMA_TO_DEVICE); + if (dma_mapping_error(&spi->dev, t->tx_dma)) { + dev_dbg(sdev, "Unable to DMA map %d bytes" + "TX buffer\n", t->len); + return -ENOMEM; + } + } + + /* + * If number of words is greater than 65535, then we need + * to configure a 3 dimension transfer. Use the BCNTRLD + * feature to allow for transfers that aren't even multiples + * of 65535 (or any other possible b size) by first transferring + * the remainder amount then grabbing the next N blocks of + * 65535 words. + */ + + c = dspi->wcount / (SZ_64K - 1); /* N 65535 Blocks */ + b = dspi->wcount - c * (SZ_64K - 1); /* Remainder */ + if (b) + c++; + else + b = SZ_64K - 1; + + param.opt = TCINTEN | EDMA_TCC(dma->tx_channel); + param.src = t->tx_buf ? t->tx_dma : tx_reg; + param.a_b_cnt = b << 16 | data_type; + param.dst = tx_reg; + param.src_dst_bidx = t->tx_buf ? data_type : 0; + param.link_bcntrld = 0xffffffff; + param.src_dst_cidx = t->tx_buf ? data_type : 0; + param.ccnt = c; + edma_write_slot(dma->tx_channel, ¶m); + edma_link(dma->tx_channel, dma->dummy_param_slot); + + /* + * Receive DMA setup + * + * If there is receive buffer, use it to receive data. If there + * is none provided, use a temporary receive buffer. Set the + * destination B index to 0 so effectively only one byte is used + * in the temporary buffer (address does not increment). + * + * The source of receive data is the receive data register. The + * source address never increments. + */ + + if (t->rx_buf) { + rx_buf = t->rx_buf; + rx_buf_count = t->len; + } else { + rx_buf = dspi->rx_tmp_buf; + rx_buf_count = sizeof(dspi->rx_tmp_buf); + } + + t->rx_dma = dma_map_single(&spi->dev, rx_buf, rx_buf_count, + DMA_FROM_DEVICE); + if (dma_mapping_error(&spi->dev, t->rx_dma)) { + dev_dbg(sdev, "Couldn't DMA map a %d bytes RX buffer\n", + rx_buf_count); + if (t->tx_buf) + dma_unmap_single(NULL, t->tx_dma, t->len, + DMA_TO_DEVICE); + return -ENOMEM; + } + + param.opt = TCINTEN | EDMA_TCC(dma->rx_channel); + param.src = rx_reg; + param.a_b_cnt = b << 16 | data_type; + param.dst = t->rx_dma; + param.src_dst_bidx = (t->rx_buf ? data_type : 0) << 16; + param.link_bcntrld = 0xffffffff; + param.src_dst_cidx = (t->rx_buf ? data_type : 0) << 16; + param.ccnt = c; + edma_write_slot(dma->rx_channel, ¶m); + + if (pdata->cshold_bug) + iowrite16(spidat1 >> 16, dspi->base + SPIDAT1 + 2); + + edma_start(dma->rx_channel); + edma_start(dma->tx_channel); + set_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN); + } + + /* Wait for the transfer to complete */ + if (spicfg->io_type != SPI_IO_TYPE_POLL) { + wait_for_completion_interruptible(&(dspi->done)); + } else { + while (dspi->rcount > 0 || dspi->wcount > 0) { + errors = davinci_spi_process_events(dspi); + if (errors) + break; + cpu_relax(); + } + } + + clear_io_bits(dspi->base + SPIINT, SPIINT_MASKALL); + if (spicfg->io_type == SPI_IO_TYPE_DMA) { + + if (t->tx_buf) + dma_unmap_single(NULL, t->tx_dma, t->len, + DMA_TO_DEVICE); + + dma_unmap_single(NULL, t->rx_dma, rx_buf_count, + DMA_FROM_DEVICE); + + clear_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN); + } + + clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_SPIENA_MASK); + set_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK); + + /* + * Check for bit error, desync error,parity error,timeout error and + * receive overflow errors + */ + if (errors) { + ret = davinci_spi_check_error(dspi, errors); + WARN(!ret, "%s: error reported but no error found!\n", + dev_name(&spi->dev)); + return ret; + } + + if (dspi->rcount != 0 || dspi->wcount != 0) { + dev_err(sdev, "SPI data transfer error\n"); + return -EIO; + } + + return t->len; +} + +/** + * davinci_spi_irq - Interrupt handler for SPI Master Controller + * @irq: IRQ number for this SPI Master + * @context_data: structure for SPI Master controller davinci_spi + * + * ISR will determine that interrupt arrives either for READ or WRITE command. + * According to command it will do the appropriate action. It will check + * transfer length and if it is not zero then dispatch transfer command again. + * If transfer length is zero then it will indicate the COMPLETION so that + * davinci_spi_bufs function can go ahead. + */ +static irqreturn_t davinci_spi_irq(s32 irq, void *data) +{ + struct davinci_spi *dspi = data; + int status; + + status = davinci_spi_process_events(dspi); + if (unlikely(status != 0)) + clear_io_bits(dspi->base + SPIINT, SPIINT_MASKINT); + + if ((!dspi->rcount && !dspi->wcount) || status) + complete(&dspi->done); + + return IRQ_HANDLED; +} + +static int davinci_spi_request_dma(struct davinci_spi *dspi) +{ + int r; + struct davinci_spi_dma *dma = &dspi->dma; + + r = edma_alloc_channel(dma->rx_channel, davinci_spi_dma_callback, dspi, + dma->eventq); + if (r < 0) { + pr_err("Unable to request DMA channel for SPI RX\n"); + r = -EAGAIN; + goto rx_dma_failed; + } + + r = edma_alloc_channel(dma->tx_channel, davinci_spi_dma_callback, dspi, + dma->eventq); + if (r < 0) { + pr_err("Unable to request DMA channel for SPI TX\n"); + r = -EAGAIN; + goto tx_dma_failed; + } + + r = edma_alloc_slot(EDMA_CTLR(dma->tx_channel), EDMA_SLOT_ANY); + if (r < 0) { + pr_err("Unable to request SPI TX DMA param slot\n"); + r = -EAGAIN; + goto param_failed; + } + dma->dummy_param_slot = r; + edma_link(dma->dummy_param_slot, dma->dummy_param_slot); + + return 0; +param_failed: + edma_free_channel(dma->tx_channel); +tx_dma_failed: + edma_free_channel(dma->rx_channel); +rx_dma_failed: + return r; +} + +/** + * davinci_spi_probe - probe function for SPI Master Controller + * @pdev: platform_device structure which contains plateform specific data + * + * According to Linux Device Model this function will be invoked by Linux + * with platform_device struct which contains the device specific info. + * This function will map the SPI controller's memory, register IRQ, + * Reset SPI controller and setting its registers to default value. + * It will invoke spi_bitbang_start to create work queue so that client driver + * can register transfer method to work queue. + */ +static int davinci_spi_probe(struct platform_device *pdev) +{ + struct spi_master *master; + struct davinci_spi *dspi; + struct davinci_spi_platform_data *pdata; + struct resource *r, *mem; + resource_size_t dma_rx_chan = SPI_NO_RESOURCE; + resource_size_t dma_tx_chan = SPI_NO_RESOURCE; + int i = 0, ret = 0; + u32 spipc0; + + pdata = pdev->dev.platform_data; + if (pdata == NULL) { + ret = -ENODEV; + goto err; + } + + master = spi_alloc_master(&pdev->dev, sizeof(struct davinci_spi)); + if (master == NULL) { + ret = -ENOMEM; + goto err; + } + + dev_set_drvdata(&pdev->dev, master); + + dspi = spi_master_get_devdata(master); + if (dspi == NULL) { + ret = -ENOENT; + goto free_master; + } + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (r == NULL) { + ret = -ENOENT; + goto free_master; + } + + dspi->pbase = r->start; + dspi->pdata = pdata; + + mem = request_mem_region(r->start, resource_size(r), pdev->name); + if (mem == NULL) { + ret = -EBUSY; + goto free_master; + } + + dspi->base = ioremap(r->start, resource_size(r)); + if (dspi->base == NULL) { + ret = -ENOMEM; + goto release_region; + } + + dspi->irq = platform_get_irq(pdev, 0); + if (dspi->irq <= 0) { + ret = -EINVAL; + goto unmap_io; + } + + ret = request_irq(dspi->irq, davinci_spi_irq, 0, dev_name(&pdev->dev), + dspi); + if (ret) + goto unmap_io; + + dspi->bitbang.master = spi_master_get(master); + if (dspi->bitbang.master == NULL) { + ret = -ENODEV; + goto irq_free; + } + + dspi->clk = clk_get(&pdev->dev, NULL); + if (IS_ERR(dspi->clk)) { + ret = -ENODEV; + goto put_master; + } + clk_enable(dspi->clk); + + master->bus_num = pdev->id; + master->num_chipselect = pdata->num_chipselect; + master->setup = davinci_spi_setup; + + dspi->bitbang.chipselect = davinci_spi_chipselect; + dspi->bitbang.setup_transfer = davinci_spi_setup_transfer; + + dspi->version = pdata->version; + + dspi->bitbang.flags = SPI_NO_CS | SPI_LSB_FIRST | SPI_LOOP; + if (dspi->version == SPI_VERSION_2) + dspi->bitbang.flags |= SPI_READY; + + r = platform_get_resource(pdev, IORESOURCE_DMA, 0); + if (r) + dma_rx_chan = r->start; + r = platform_get_resource(pdev, IORESOURCE_DMA, 1); + if (r) + dma_tx_chan = r->start; + + dspi->bitbang.txrx_bufs = davinci_spi_bufs; + if (dma_rx_chan != SPI_NO_RESOURCE && + dma_tx_chan != SPI_NO_RESOURCE) { + dspi->dma.rx_channel = dma_rx_chan; + dspi->dma.tx_channel = dma_tx_chan; + dspi->dma.eventq = pdata->dma_event_q; + + ret = davinci_spi_request_dma(dspi); + if (ret) + goto free_clk; + + dev_info(&pdev->dev, "DMA: supported\n"); + dev_info(&pdev->dev, "DMA: RX channel: %d, TX channel: %d, " + "event queue: %d\n", dma_rx_chan, dma_tx_chan, + pdata->dma_event_q); + } + + dspi->get_rx = davinci_spi_rx_buf_u8; + dspi->get_tx = davinci_spi_tx_buf_u8; + + init_completion(&dspi->done); + + /* Reset In/OUT SPI module */ + iowrite32(0, dspi->base + SPIGCR0); + udelay(100); + iowrite32(1, dspi->base + SPIGCR0); + + /* Set up SPIPC0. CS and ENA init is done in davinci_spi_setup */ + spipc0 = SPIPC0_DIFUN_MASK | SPIPC0_DOFUN_MASK | SPIPC0_CLKFUN_MASK; + iowrite32(spipc0, dspi->base + SPIPC0); + + /* initialize chip selects */ + if (pdata->chip_sel) { + for (i = 0; i < pdata->num_chipselect; i++) { + if (pdata->chip_sel[i] != SPI_INTERN_CS) + gpio_direction_output(pdata->chip_sel[i], 1); + } + } + + if (pdata->intr_line) + iowrite32(SPI_INTLVL_1, dspi->base + SPILVL); + else + iowrite32(SPI_INTLVL_0, dspi->base + SPILVL); + + iowrite32(CS_DEFAULT, dspi->base + SPIDEF); + + /* master mode default */ + set_io_bits(dspi->base + SPIGCR1, SPIGCR1_CLKMOD_MASK); + set_io_bits(dspi->base + SPIGCR1, SPIGCR1_MASTER_MASK); + set_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK); + + ret = spi_bitbang_start(&dspi->bitbang); + if (ret) + goto free_dma; + + dev_info(&pdev->dev, "Controller at 0x%p\n", dspi->base); + + return ret; + +free_dma: + edma_free_channel(dspi->dma.tx_channel); + edma_free_channel(dspi->dma.rx_channel); + edma_free_slot(dspi->dma.dummy_param_slot); +free_clk: + clk_disable(dspi->clk); + clk_put(dspi->clk); +put_master: + spi_master_put(master); +irq_free: + free_irq(dspi->irq, dspi); +unmap_io: + iounmap(dspi->base); +release_region: + release_mem_region(dspi->pbase, resource_size(r)); +free_master: + kfree(master); +err: + return ret; +} + +/** + * davinci_spi_remove - remove function for SPI Master Controller + * @pdev: platform_device structure which contains plateform specific data + * + * This function will do the reverse action of davinci_spi_probe function + * It will free the IRQ and SPI controller's memory region. + * It will also call spi_bitbang_stop to destroy the work queue which was + * created by spi_bitbang_start. + */ +static int __exit davinci_spi_remove(struct platform_device *pdev) +{ + struct davinci_spi *dspi; + struct spi_master *master; + struct resource *r; + + master = dev_get_drvdata(&pdev->dev); + dspi = spi_master_get_devdata(master); + + spi_bitbang_stop(&dspi->bitbang); + + clk_disable(dspi->clk); + clk_put(dspi->clk); + spi_master_put(master); + free_irq(dspi->irq, dspi); + iounmap(dspi->base); + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + release_mem_region(dspi->pbase, resource_size(r)); + + return 0; +} + +static struct platform_driver davinci_spi_driver = { + .driver = { + .name = "spi_davinci", + .owner = THIS_MODULE, + }, + .remove = __exit_p(davinci_spi_remove), +}; + +static int __init davinci_spi_init(void) +{ + return platform_driver_probe(&davinci_spi_driver, davinci_spi_probe); +} +module_init(davinci_spi_init); + +static void __exit davinci_spi_exit(void) +{ + platform_driver_unregister(&davinci_spi_driver); +} +module_exit(davinci_spi_exit); + +MODULE_DESCRIPTION("TI DaVinci SPI Master Controller Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c new file mode 100644 index 0000000..130e555 --- /dev/null +++ b/drivers/spi/spi-dw-mid.c @@ -0,0 +1,224 @@ +/* + * Special handling for DW core on Intel MID platform + * + * Copyright (c) 2009, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include +#include +#include +#include +#include + +#include "spi-dw.h" + +#ifdef CONFIG_SPI_DW_MID_DMA +#include +#include + +struct mid_dma { + struct intel_mid_dma_slave dmas_tx; + struct intel_mid_dma_slave dmas_rx; +}; + +static bool mid_spi_dma_chan_filter(struct dma_chan *chan, void *param) +{ + struct dw_spi *dws = param; + + return dws->dmac && (&dws->dmac->dev == chan->device->dev); +} + +static int mid_spi_dma_init(struct dw_spi *dws) +{ + struct mid_dma *dw_dma = dws->dma_priv; + struct intel_mid_dma_slave *rxs, *txs; + dma_cap_mask_t mask; + + /* + * Get pci device for DMA controller, currently it could only + * be the DMA controller of either Moorestown or Medfield + */ + dws->dmac = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0813, NULL); + if (!dws->dmac) + dws->dmac = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0827, NULL); + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + /* 1. Init rx channel */ + dws->rxchan = dma_request_channel(mask, mid_spi_dma_chan_filter, dws); + if (!dws->rxchan) + goto err_exit; + rxs = &dw_dma->dmas_rx; + rxs->hs_mode = LNW_DMA_HW_HS; + rxs->cfg_mode = LNW_DMA_PER_TO_MEM; + dws->rxchan->private = rxs; + + /* 2. Init tx channel */ + dws->txchan = dma_request_channel(mask, mid_spi_dma_chan_filter, dws); + if (!dws->txchan) + goto free_rxchan; + txs = &dw_dma->dmas_tx; + txs->hs_mode = LNW_DMA_HW_HS; + txs->cfg_mode = LNW_DMA_MEM_TO_PER; + dws->txchan->private = txs; + + dws->dma_inited = 1; + return 0; + +free_rxchan: + dma_release_channel(dws->rxchan); +err_exit: + return -1; + +} + +static void mid_spi_dma_exit(struct dw_spi *dws) +{ + dma_release_channel(dws->txchan); + dma_release_channel(dws->rxchan); +} + +/* + * dws->dma_chan_done is cleared before the dma transfer starts, + * callback for rx/tx channel will each increment it by 1. + * Reaching 2 means the whole spi transaction is done. + */ +static void dw_spi_dma_done(void *arg) +{ + struct dw_spi *dws = arg; + + if (++dws->dma_chan_done != 2) + return; + dw_spi_xfer_done(dws); +} + +static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change) +{ + struct dma_async_tx_descriptor *txdesc = NULL, *rxdesc = NULL; + struct dma_chan *txchan, *rxchan; + struct dma_slave_config txconf, rxconf; + u16 dma_ctrl = 0; + + /* 1. setup DMA related registers */ + if (cs_change) { + spi_enable_chip(dws, 0); + dw_writew(dws, dmardlr, 0xf); + dw_writew(dws, dmatdlr, 0x10); + if (dws->tx_dma) + dma_ctrl |= 0x2; + if (dws->rx_dma) + dma_ctrl |= 0x1; + dw_writew(dws, dmacr, dma_ctrl); + spi_enable_chip(dws, 1); + } + + dws->dma_chan_done = 0; + txchan = dws->txchan; + rxchan = dws->rxchan; + + /* 2. Prepare the TX dma transfer */ + txconf.direction = DMA_TO_DEVICE; + txconf.dst_addr = dws->dma_addr; + txconf.dst_maxburst = LNW_DMA_MSIZE_16; + txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + txconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; + + txchan->device->device_control(txchan, DMA_SLAVE_CONFIG, + (unsigned long) &txconf); + + memset(&dws->tx_sgl, 0, sizeof(dws->tx_sgl)); + dws->tx_sgl.dma_address = dws->tx_dma; + dws->tx_sgl.length = dws->len; + + txdesc = txchan->device->device_prep_slave_sg(txchan, + &dws->tx_sgl, + 1, + DMA_TO_DEVICE, + DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP); + txdesc->callback = dw_spi_dma_done; + txdesc->callback_param = dws; + + /* 3. Prepare the RX dma transfer */ + rxconf.direction = DMA_FROM_DEVICE; + rxconf.src_addr = dws->dma_addr; + rxconf.src_maxburst = LNW_DMA_MSIZE_16; + rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + rxconf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; + + rxchan->device->device_control(rxchan, DMA_SLAVE_CONFIG, + (unsigned long) &rxconf); + + memset(&dws->rx_sgl, 0, sizeof(dws->rx_sgl)); + dws->rx_sgl.dma_address = dws->rx_dma; + dws->rx_sgl.length = dws->len; + + rxdesc = rxchan->device->device_prep_slave_sg(rxchan, + &dws->rx_sgl, + 1, + DMA_FROM_DEVICE, + DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP); + rxdesc->callback = dw_spi_dma_done; + rxdesc->callback_param = dws; + + /* rx must be started before tx due to spi instinct */ + rxdesc->tx_submit(rxdesc); + txdesc->tx_submit(txdesc); + return 0; +} + +static struct dw_spi_dma_ops mid_dma_ops = { + .dma_init = mid_spi_dma_init, + .dma_exit = mid_spi_dma_exit, + .dma_transfer = mid_spi_dma_transfer, +}; +#endif + +/* Some specific info for SPI0 controller on Moorestown */ + +/* HW info for MRST CLk Control Unit, one 32b reg */ +#define MRST_SPI_CLK_BASE 100000000 /* 100m */ +#define MRST_CLK_SPI0_REG 0xff11d86c +#define CLK_SPI_BDIV_OFFSET 0 +#define CLK_SPI_BDIV_MASK 0x00000007 +#define CLK_SPI_CDIV_OFFSET 9 +#define CLK_SPI_CDIV_MASK 0x00000e00 +#define CLK_SPI_DISABLE_OFFSET 8 + +int dw_spi_mid_init(struct dw_spi *dws) +{ + u32 *clk_reg, clk_cdiv; + + clk_reg = ioremap_nocache(MRST_CLK_SPI0_REG, 16); + if (!clk_reg) + return -ENOMEM; + + /* get SPI controller operating freq info */ + clk_cdiv = (readl(clk_reg) & CLK_SPI_CDIV_MASK) >> CLK_SPI_CDIV_OFFSET; + dws->max_freq = MRST_SPI_CLK_BASE / (clk_cdiv + 1); + iounmap(clk_reg); + + dws->num_cs = 16; + dws->fifo_len = 40; /* FIFO has 40 words buffer */ + +#ifdef CONFIG_SPI_DW_MID_DMA + dws->dma_priv = kzalloc(sizeof(struct mid_dma), GFP_KERNEL); + if (!dws->dma_priv) + return -ENOMEM; + dws->dma_ops = &mid_dma_ops; +#endif + return 0; +} diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c new file mode 100644 index 0000000..34eb665 --- /dev/null +++ b/drivers/spi/spi-dw-mmio.c @@ -0,0 +1,151 @@ +/* + * Memory-mapped interface driver for DW SPI Core + * + * Copyright (c) 2010, Octasic semiconductor. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "spi-dw.h" + +#define DRIVER_NAME "dw_spi_mmio" + +struct dw_spi_mmio { + struct dw_spi dws; + struct clk *clk; +}; + +static int __devinit dw_spi_mmio_probe(struct platform_device *pdev) +{ + struct dw_spi_mmio *dwsmmio; + struct dw_spi *dws; + struct resource *mem, *ioarea; + int ret; + + dwsmmio = kzalloc(sizeof(struct dw_spi_mmio), GFP_KERNEL); + if (!dwsmmio) { + ret = -ENOMEM; + goto err_end; + } + + dws = &dwsmmio->dws; + + /* Get basic io resource and map it */ + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mem) { + dev_err(&pdev->dev, "no mem resource?\n"); + ret = -EINVAL; + goto err_kfree; + } + + ioarea = request_mem_region(mem->start, resource_size(mem), + pdev->name); + if (!ioarea) { + dev_err(&pdev->dev, "SPI region already claimed\n"); + ret = -EBUSY; + goto err_kfree; + } + + dws->regs = ioremap_nocache(mem->start, resource_size(mem)); + if (!dws->regs) { + dev_err(&pdev->dev, "SPI region already mapped\n"); + ret = -ENOMEM; + goto err_release_reg; + } + + dws->irq = platform_get_irq(pdev, 0); + if (dws->irq < 0) { + dev_err(&pdev->dev, "no irq resource?\n"); + ret = dws->irq; /* -ENXIO */ + goto err_unmap; + } + + dwsmmio->clk = clk_get(&pdev->dev, NULL); + if (IS_ERR(dwsmmio->clk)) { + ret = PTR_ERR(dwsmmio->clk); + goto err_irq; + } + clk_enable(dwsmmio->clk); + + dws->parent_dev = &pdev->dev; + dws->bus_num = 0; + dws->num_cs = 4; + dws->max_freq = clk_get_rate(dwsmmio->clk); + + ret = dw_spi_add_host(dws); + if (ret) + goto err_clk; + + platform_set_drvdata(pdev, dwsmmio); + return 0; + +err_clk: + clk_disable(dwsmmio->clk); + clk_put(dwsmmio->clk); + dwsmmio->clk = NULL; +err_irq: + free_irq(dws->irq, dws); +err_unmap: + iounmap(dws->regs); +err_release_reg: + release_mem_region(mem->start, resource_size(mem)); +err_kfree: + kfree(dwsmmio); +err_end: + return ret; +} + +static int __devexit dw_spi_mmio_remove(struct platform_device *pdev) +{ + struct dw_spi_mmio *dwsmmio = platform_get_drvdata(pdev); + struct resource *mem; + + platform_set_drvdata(pdev, NULL); + + clk_disable(dwsmmio->clk); + clk_put(dwsmmio->clk); + dwsmmio->clk = NULL; + + free_irq(dwsmmio->dws.irq, &dwsmmio->dws); + dw_spi_remove_host(&dwsmmio->dws); + iounmap(dwsmmio->dws.regs); + kfree(dwsmmio); + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + release_mem_region(mem->start, resource_size(mem)); + return 0; +} + +static struct platform_driver dw_spi_mmio_driver = { + .remove = __devexit_p(dw_spi_mmio_remove), + .driver = { + .name = DRIVER_NAME, + .owner = THIS_MODULE, + }, +}; + +static int __init dw_spi_mmio_init(void) +{ + return platform_driver_probe(&dw_spi_mmio_driver, dw_spi_mmio_probe); +} +module_init(dw_spi_mmio_init); + +static void __exit dw_spi_mmio_exit(void) +{ + platform_driver_unregister(&dw_spi_mmio_driver); +} +module_exit(dw_spi_mmio_exit); + +MODULE_AUTHOR("Jean-Hugues Deschenes "); +MODULE_DESCRIPTION("Memory-mapped I/O interface driver for DW SPI Core"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c new file mode 100644 index 0000000..c5f37f0 --- /dev/null +++ b/drivers/spi/spi-dw-pci.c @@ -0,0 +1,181 @@ +/* + * PCI interface driver for DW SPI Core + * + * Copyright (c) 2009, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include +#include +#include +#include + +#include "spi-dw.h" + +#define DRIVER_NAME "dw_spi_pci" + +struct dw_spi_pci { + struct pci_dev *pdev; + struct dw_spi dws; +}; + +static int __devinit spi_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct dw_spi_pci *dwpci; + struct dw_spi *dws; + int pci_bar = 0; + int ret; + + printk(KERN_INFO "DW: found PCI SPI controller(ID: %04x:%04x)\n", + pdev->vendor, pdev->device); + + ret = pci_enable_device(pdev); + if (ret) + return ret; + + dwpci = kzalloc(sizeof(struct dw_spi_pci), GFP_KERNEL); + if (!dwpci) { + ret = -ENOMEM; + goto err_disable; + } + + dwpci->pdev = pdev; + dws = &dwpci->dws; + + /* Get basic io resource and map it */ + dws->paddr = pci_resource_start(pdev, pci_bar); + dws->iolen = pci_resource_len(pdev, pci_bar); + + ret = pci_request_region(pdev, pci_bar, dev_name(&pdev->dev)); + if (ret) + goto err_kfree; + + dws->regs = ioremap_nocache((unsigned long)dws->paddr, + pci_resource_len(pdev, pci_bar)); + if (!dws->regs) { + ret = -ENOMEM; + goto err_release_reg; + } + + dws->parent_dev = &pdev->dev; + dws->bus_num = 0; + dws->num_cs = 4; + dws->irq = pdev->irq; + + /* + * Specific handling for Intel MID paltforms, like dma setup, + * clock rate, FIFO depth. + */ + if (pdev->device == 0x0800) { + ret = dw_spi_mid_init(dws); + if (ret) + goto err_unmap; + } + + ret = dw_spi_add_host(dws); + if (ret) + goto err_unmap; + + /* PCI hook and SPI hook use the same drv data */ + pci_set_drvdata(pdev, dwpci); + return 0; + +err_unmap: + iounmap(dws->regs); +err_release_reg: + pci_release_region(pdev, pci_bar); +err_kfree: + kfree(dwpci); +err_disable: + pci_disable_device(pdev); + return ret; +} + +static void __devexit spi_pci_remove(struct pci_dev *pdev) +{ + struct dw_spi_pci *dwpci = pci_get_drvdata(pdev); + + pci_set_drvdata(pdev, NULL); + dw_spi_remove_host(&dwpci->dws); + iounmap(dwpci->dws.regs); + pci_release_region(pdev, 0); + kfree(dwpci); + pci_disable_device(pdev); +} + +#ifdef CONFIG_PM +static int spi_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct dw_spi_pci *dwpci = pci_get_drvdata(pdev); + int ret; + + ret = dw_spi_suspend_host(&dwpci->dws); + if (ret) + return ret; + pci_save_state(pdev); + pci_disable_device(pdev); + pci_set_power_state(pdev, pci_choose_state(pdev, state)); + return ret; +} + +static int spi_resume(struct pci_dev *pdev) +{ + struct dw_spi_pci *dwpci = pci_get_drvdata(pdev); + int ret; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + ret = pci_enable_device(pdev); + if (ret) + return ret; + return dw_spi_resume_host(&dwpci->dws); +} +#else +#define spi_suspend NULL +#define spi_resume NULL +#endif + +static const struct pci_device_id pci_ids[] __devinitdata = { + /* Intel MID platform SPI controller 0 */ + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) }, + {}, +}; + +static struct pci_driver dw_spi_driver = { + .name = DRIVER_NAME, + .id_table = pci_ids, + .probe = spi_pci_probe, + .remove = __devexit_p(spi_pci_remove), + .suspend = spi_suspend, + .resume = spi_resume, +}; + +static int __init mrst_spi_init(void) +{ + return pci_register_driver(&dw_spi_driver); +} + +static void __exit mrst_spi_exit(void) +{ + pci_unregister_driver(&dw_spi_driver); +} + +module_init(mrst_spi_init); +module_exit(mrst_spi_exit); + +MODULE_AUTHOR("Feng Tang "); +MODULE_DESCRIPTION("PCI interface driver for DW SPI Core"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c new file mode 100644 index 0000000..ece5f69 --- /dev/null +++ b/drivers/spi/spi-dw.c @@ -0,0 +1,936 @@ +/* + * Designware SPI core controller driver (refer pxa2xx_spi.c) + * + * Copyright (c) 2009, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include +#include +#include +#include +#include +#include + +#include "spi-dw.h" + +#ifdef CONFIG_DEBUG_FS +#include +#endif + +#define START_STATE ((void *)0) +#define RUNNING_STATE ((void *)1) +#define DONE_STATE ((void *)2) +#define ERROR_STATE ((void *)-1) + +#define QUEUE_RUNNING 0 +#define QUEUE_STOPPED 1 + +#define MRST_SPI_DEASSERT 0 +#define MRST_SPI_ASSERT 1 + +/* Slave spi_dev related */ +struct chip_data { + u16 cr0; + u8 cs; /* chip select pin */ + u8 n_bytes; /* current is a 1/2/4 byte op */ + u8 tmode; /* TR/TO/RO/EEPROM */ + u8 type; /* SPI/SSP/MicroWire */ + + u8 poll_mode; /* 1 means use poll mode */ + + u32 dma_width; + u32 rx_threshold; + u32 tx_threshold; + u8 enable_dma; + u8 bits_per_word; + u16 clk_div; /* baud rate divider */ + u32 speed_hz; /* baud rate */ + void (*cs_control)(u32 command); +}; + +#ifdef CONFIG_DEBUG_FS +static int spi_show_regs_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +#define SPI_REGS_BUFSIZE 1024 +static ssize_t spi_show_regs(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct dw_spi *dws; + char *buf; + u32 len = 0; + ssize_t ret; + + dws = file->private_data; + + buf = kzalloc(SPI_REGS_BUFSIZE, GFP_KERNEL); + if (!buf) + return 0; + + len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + "MRST SPI0 registers:\n"); + len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + "=================================\n"); + len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + "CTRL0: \t\t0x%08x\n", dw_readl(dws, ctrl0)); + len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + "CTRL1: \t\t0x%08x\n", dw_readl(dws, ctrl1)); + len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + "SSIENR: \t0x%08x\n", dw_readl(dws, ssienr)); + len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + "SER: \t\t0x%08x\n", dw_readl(dws, ser)); + len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + "BAUDR: \t\t0x%08x\n", dw_readl(dws, baudr)); + len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + "TXFTLR: \t0x%08x\n", dw_readl(dws, txfltr)); + len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + "RXFTLR: \t0x%08x\n", dw_readl(dws, rxfltr)); + len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + "TXFLR: \t\t0x%08x\n", dw_readl(dws, txflr)); + len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + "RXFLR: \t\t0x%08x\n", dw_readl(dws, rxflr)); + len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + "SR: \t\t0x%08x\n", dw_readl(dws, sr)); + len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + "IMR: \t\t0x%08x\n", dw_readl(dws, imr)); + len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + "ISR: \t\t0x%08x\n", dw_readl(dws, isr)); + len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + "DMACR: \t\t0x%08x\n", dw_readl(dws, dmacr)); + len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + "DMATDLR: \t0x%08x\n", dw_readl(dws, dmatdlr)); + len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + "DMARDLR: \t0x%08x\n", dw_readl(dws, dmardlr)); + len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + "=================================\n"); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, len); + kfree(buf); + return ret; +} + +static const struct file_operations mrst_spi_regs_ops = { + .owner = THIS_MODULE, + .open = spi_show_regs_open, + .read = spi_show_regs, + .llseek = default_llseek, +}; + +static int mrst_spi_debugfs_init(struct dw_spi *dws) +{ + dws->debugfs = debugfs_create_dir("mrst_spi", NULL); + if (!dws->debugfs) + return -ENOMEM; + + debugfs_create_file("registers", S_IFREG | S_IRUGO, + dws->debugfs, (void *)dws, &mrst_spi_regs_ops); + return 0; +} + +static void mrst_spi_debugfs_remove(struct dw_spi *dws) +{ + if (dws->debugfs) + debugfs_remove_recursive(dws->debugfs); +} + +#else +static inline int mrst_spi_debugfs_init(struct dw_spi *dws) +{ + return 0; +} + +static inline void mrst_spi_debugfs_remove(struct dw_spi *dws) +{ +} +#endif /* CONFIG_DEBUG_FS */ + +/* Return the max entries we can fill into tx fifo */ +static inline u32 tx_max(struct dw_spi *dws) +{ + u32 tx_left, tx_room, rxtx_gap; + + tx_left = (dws->tx_end - dws->tx) / dws->n_bytes; + tx_room = dws->fifo_len - dw_readw(dws, txflr); + + /* + * Another concern is about the tx/rx mismatch, we + * though to use (dws->fifo_len - rxflr - txflr) as + * one maximum value for tx, but it doesn't cover the + * data which is out of tx/rx fifo and inside the + * shift registers. So a control from sw point of + * view is taken. + */ + rxtx_gap = ((dws->rx_end - dws->rx) - (dws->tx_end - dws->tx)) + / dws->n_bytes; + + return min3(tx_left, tx_room, (u32) (dws->fifo_len - rxtx_gap)); +} + +/* Return the max entries we should read out of rx fifo */ +static inline u32 rx_max(struct dw_spi *dws) +{ + u32 rx_left = (dws->rx_end - dws->rx) / dws->n_bytes; + + return min(rx_left, (u32)dw_readw(dws, rxflr)); +} + +static void dw_writer(struct dw_spi *dws) +{ + u32 max = tx_max(dws); + u16 txw = 0; + + while (max--) { + /* Set the tx word if the transfer's original "tx" is not null */ + if (dws->tx_end - dws->len) { + if (dws->n_bytes == 1) + txw = *(u8 *)(dws->tx); + else + txw = *(u16 *)(dws->tx); + } + dw_writew(dws, dr, txw); + dws->tx += dws->n_bytes; + } +} + +static void dw_reader(struct dw_spi *dws) +{ + u32 max = rx_max(dws); + u16 rxw; + + while (max--) { + rxw = dw_readw(dws, dr); + /* Care rx only if the transfer's original "rx" is not null */ + if (dws->rx_end - dws->len) { + if (dws->n_bytes == 1) + *(u8 *)(dws->rx) = rxw; + else + *(u16 *)(dws->rx) = rxw; + } + dws->rx += dws->n_bytes; + } +} + +static void *next_transfer(struct dw_spi *dws) +{ + struct spi_message *msg = dws->cur_msg; + struct spi_transfer *trans = dws->cur_transfer; + + /* Move to next transfer */ + if (trans->transfer_list.next != &msg->transfers) { + dws->cur_transfer = + list_entry(trans->transfer_list.next, + struct spi_transfer, + transfer_list); + return RUNNING_STATE; + } else + return DONE_STATE; +} + +/* + * Note: first step is the protocol driver prepares + * a dma-capable memory, and this func just need translate + * the virt addr to physical + */ +static int map_dma_buffers(struct dw_spi *dws) +{ + if (!dws->cur_msg->is_dma_mapped + || !dws->dma_inited + || !dws->cur_chip->enable_dma + || !dws->dma_ops) + return 0; + + if (dws->cur_transfer->tx_dma) + dws->tx_dma = dws->cur_transfer->tx_dma; + + if (dws->cur_transfer->rx_dma) + dws->rx_dma = dws->cur_transfer->rx_dma; + + return 1; +} + +/* Caller already set message->status; dma and pio irqs are blocked */ +static void giveback(struct dw_spi *dws) +{ + struct spi_transfer *last_transfer; + unsigned long flags; + struct spi_message *msg; + + spin_lock_irqsave(&dws->lock, flags); + msg = dws->cur_msg; + dws->cur_msg = NULL; + dws->cur_transfer = NULL; + dws->prev_chip = dws->cur_chip; + dws->cur_chip = NULL; + dws->dma_mapped = 0; + queue_work(dws->workqueue, &dws->pump_messages); + spin_unlock_irqrestore(&dws->lock, flags); + + last_transfer = list_entry(msg->transfers.prev, + struct spi_transfer, + transfer_list); + + if (!last_transfer->cs_change && dws->cs_control) + dws->cs_control(MRST_SPI_DEASSERT); + + msg->state = NULL; + if (msg->complete) + msg->complete(msg->context); +} + +static void int_error_stop(struct dw_spi *dws, const char *msg) +{ + /* Stop the hw */ + spi_enable_chip(dws, 0); + + dev_err(&dws->master->dev, "%s\n", msg); + dws->cur_msg->state = ERROR_STATE; + tasklet_schedule(&dws->pump_transfers); +} + +void dw_spi_xfer_done(struct dw_spi *dws) +{ + /* Update total byte transferred return count actual bytes read */ + dws->cur_msg->actual_length += dws->len; + + /* Move to next transfer */ + dws->cur_msg->state = next_transfer(dws); + + /* Handle end of message */ + if (dws->cur_msg->state == DONE_STATE) { + dws->cur_msg->status = 0; + giveback(dws); + } else + tasklet_schedule(&dws->pump_transfers); +} +EXPORT_SYMBOL_GPL(dw_spi_xfer_done); + +static irqreturn_t interrupt_transfer(struct dw_spi *dws) +{ + u16 irq_status = dw_readw(dws, isr); + + /* Error handling */ + if (irq_status & (SPI_INT_TXOI | SPI_INT_RXOI | SPI_INT_RXUI)) { + dw_readw(dws, txoicr); + dw_readw(dws, rxoicr); + dw_readw(dws, rxuicr); + int_error_stop(dws, "interrupt_transfer: fifo overrun/underrun"); + return IRQ_HANDLED; + } + + dw_reader(dws); + if (dws->rx_end == dws->rx) { + spi_mask_intr(dws, SPI_INT_TXEI); + dw_spi_xfer_done(dws); + return IRQ_HANDLED; + } + if (irq_status & SPI_INT_TXEI) { + spi_mask_intr(dws, SPI_INT_TXEI); + dw_writer(dws); + /* Enable TX irq always, it will be disabled when RX finished */ + spi_umask_intr(dws, SPI_INT_TXEI); + } + + return IRQ_HANDLED; +} + +static irqreturn_t dw_spi_irq(int irq, void *dev_id) +{ + struct dw_spi *dws = dev_id; + u16 irq_status = dw_readw(dws, isr) & 0x3f; + + if (!irq_status) + return IRQ_NONE; + + if (!dws->cur_msg) { + spi_mask_intr(dws, SPI_INT_TXEI); + return IRQ_HANDLED; + } + + return dws->transfer_handler(dws); +} + +/* Must be called inside pump_transfers() */ +static void poll_transfer(struct dw_spi *dws) +{ + do { + dw_writer(dws); + dw_reader(dws); + cpu_relax(); + } while (dws->rx_end > dws->rx); + + dw_spi_xfer_done(dws); +} + +static void pump_transfers(unsigned long data) +{ + struct dw_spi *dws = (struct dw_spi *)data; + struct spi_message *message = NULL; + struct spi_transfer *transfer = NULL; + struct spi_transfer *previous = NULL; + struct spi_device *spi = NULL; + struct chip_data *chip = NULL; + u8 bits = 0; + u8 imask = 0; + u8 cs_change = 0; + u16 txint_level = 0; + u16 clk_div = 0; + u32 speed = 0; + u32 cr0 = 0; + + /* Get current state information */ + message = dws->cur_msg; + transfer = dws->cur_transfer; + chip = dws->cur_chip; + spi = message->spi; + + if (unlikely(!chip->clk_div)) + chip->clk_div = dws->max_freq / chip->speed_hz; + + if (message->state == ERROR_STATE) { + message->status = -EIO; + goto early_exit; + } + + /* Handle end of message */ + if (message->state == DONE_STATE) { + message->status = 0; + goto early_exit; + } + + /* Delay if requested at end of transfer*/ + if (message->state == RUNNING_STATE) { + previous = list_entry(transfer->transfer_list.prev, + struct spi_transfer, + transfer_list); + if (previous->delay_usecs) + udelay(previous->delay_usecs); + } + + dws->n_bytes = chip->n_bytes; + dws->dma_width = chip->dma_width; + dws->cs_control = chip->cs_control; + + dws->rx_dma = transfer->rx_dma; + dws->tx_dma = transfer->tx_dma; + dws->tx = (void *)transfer->tx_buf; + dws->tx_end = dws->tx + transfer->len; + dws->rx = transfer->rx_buf; + dws->rx_end = dws->rx + transfer->len; + dws->cs_change = transfer->cs_change; + dws->len = dws->cur_transfer->len; + if (chip != dws->prev_chip) + cs_change = 1; + + cr0 = chip->cr0; + + /* Handle per transfer options for bpw and speed */ + if (transfer->speed_hz) { + speed = chip->speed_hz; + + if (transfer->speed_hz != speed) { + speed = transfer->speed_hz; + if (speed > dws->max_freq) { + printk(KERN_ERR "MRST SPI0: unsupported" + "freq: %dHz\n", speed); + message->status = -EIO; + goto early_exit; + } + + /* clk_div doesn't support odd number */ + clk_div = dws->max_freq / speed; + clk_div = (clk_div + 1) & 0xfffe; + + chip->speed_hz = speed; + chip->clk_div = clk_div; + } + } + if (transfer->bits_per_word) { + bits = transfer->bits_per_word; + + switch (bits) { + case 8: + case 16: + dws->n_bytes = dws->dma_width = bits >> 3; + break; + default: + printk(KERN_ERR "MRST SPI0: unsupported bits:" + "%db\n", bits); + message->status = -EIO; + goto early_exit; + } + + cr0 = (bits - 1) + | (chip->type << SPI_FRF_OFFSET) + | (spi->mode << SPI_MODE_OFFSET) + | (chip->tmode << SPI_TMOD_OFFSET); + } + message->state = RUNNING_STATE; + + /* + * Adjust transfer mode if necessary. Requires platform dependent + * chipselect mechanism. + */ + if (dws->cs_control) { + if (dws->rx && dws->tx) + chip->tmode = SPI_TMOD_TR; + else if (dws->rx) + chip->tmode = SPI_TMOD_RO; + else + chip->tmode = SPI_TMOD_TO; + + cr0 &= ~SPI_TMOD_MASK; + cr0 |= (chip->tmode << SPI_TMOD_OFFSET); + } + + /* Check if current transfer is a DMA transaction */ + dws->dma_mapped = map_dma_buffers(dws); + + /* + * Interrupt mode + * we only need set the TXEI IRQ, as TX/RX always happen syncronizely + */ + if (!dws->dma_mapped && !chip->poll_mode) { + int templen = dws->len / dws->n_bytes; + txint_level = dws->fifo_len / 2; + txint_level = (templen > txint_level) ? txint_level : templen; + + imask |= SPI_INT_TXEI | SPI_INT_TXOI | SPI_INT_RXUI | SPI_INT_RXOI; + dws->transfer_handler = interrupt_transfer; + } + + /* + * Reprogram registers only if + * 1. chip select changes + * 2. clk_div is changed + * 3. control value changes + */ + if (dw_readw(dws, ctrl0) != cr0 || cs_change || clk_div || imask) { + spi_enable_chip(dws, 0); + + if (dw_readw(dws, ctrl0) != cr0) + dw_writew(dws, ctrl0, cr0); + + spi_set_clk(dws, clk_div ? clk_div : chip->clk_div); + spi_chip_sel(dws, spi->chip_select); + + /* Set the interrupt mask, for poll mode just disable all int */ + spi_mask_intr(dws, 0xff); + if (imask) + spi_umask_intr(dws, imask); + if (txint_level) + dw_writew(dws, txfltr, txint_level); + + spi_enable_chip(dws, 1); + if (cs_change) + dws->prev_chip = chip; + } + + if (dws->dma_mapped) + dws->dma_ops->dma_transfer(dws, cs_change); + + if (chip->poll_mode) + poll_transfer(dws); + + return; + +early_exit: + giveback(dws); + return; +} + +static void pump_messages(struct work_struct *work) +{ + struct dw_spi *dws = + container_of(work, struct dw_spi, pump_messages); + unsigned long flags; + + /* Lock queue and check for queue work */ + spin_lock_irqsave(&dws->lock, flags); + if (list_empty(&dws->queue) || dws->run == QUEUE_STOPPED) { + dws->busy = 0; + spin_unlock_irqrestore(&dws->lock, flags); + return; + } + + /* Make sure we are not already running a message */ + if (dws->cur_msg) { + spin_unlock_irqrestore(&dws->lock, flags); + return; + } + + /* Extract head of queue */ + dws->cur_msg = list_entry(dws->queue.next, struct spi_message, queue); + list_del_init(&dws->cur_msg->queue); + + /* Initial message state*/ + dws->cur_msg->state = START_STATE; + dws->cur_transfer = list_entry(dws->cur_msg->transfers.next, + struct spi_transfer, + transfer_list); + dws->cur_chip = spi_get_ctldata(dws->cur_msg->spi); + + /* Mark as busy and launch transfers */ + tasklet_schedule(&dws->pump_transfers); + + dws->busy = 1; + spin_unlock_irqrestore(&dws->lock, flags); +} + +/* spi_device use this to queue in their spi_msg */ +static int dw_spi_transfer(struct spi_device *spi, struct spi_message *msg) +{ + struct dw_spi *dws = spi_master_get_devdata(spi->master); + unsigned long flags; + + spin_lock_irqsave(&dws->lock, flags); + + if (dws->run == QUEUE_STOPPED) { + spin_unlock_irqrestore(&dws->lock, flags); + return -ESHUTDOWN; + } + + msg->actual_length = 0; + msg->status = -EINPROGRESS; + msg->state = START_STATE; + + list_add_tail(&msg->queue, &dws->queue); + + if (dws->run == QUEUE_RUNNING && !dws->busy) { + + if (dws->cur_transfer || dws->cur_msg) + queue_work(dws->workqueue, + &dws->pump_messages); + else { + /* If no other data transaction in air, just go */ + spin_unlock_irqrestore(&dws->lock, flags); + pump_messages(&dws->pump_messages); + return 0; + } + } + + spin_unlock_irqrestore(&dws->lock, flags); + return 0; +} + +/* This may be called twice for each spi dev */ +static int dw_spi_setup(struct spi_device *spi) +{ + struct dw_spi_chip *chip_info = NULL; + struct chip_data *chip; + + if (spi->bits_per_word != 8 && spi->bits_per_word != 16) + return -EINVAL; + + /* Only alloc on first setup */ + chip = spi_get_ctldata(spi); + if (!chip) { + chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); + if (!chip) + return -ENOMEM; + } + + /* + * Protocol drivers may change the chip settings, so... + * if chip_info exists, use it + */ + chip_info = spi->controller_data; + + /* chip_info doesn't always exist */ + if (chip_info) { + if (chip_info->cs_control) + chip->cs_control = chip_info->cs_control; + + chip->poll_mode = chip_info->poll_mode; + chip->type = chip_info->type; + + chip->rx_threshold = 0; + chip->tx_threshold = 0; + + chip->enable_dma = chip_info->enable_dma; + } + + if (spi->bits_per_word <= 8) { + chip->n_bytes = 1; + chip->dma_width = 1; + } else if (spi->bits_per_word <= 16) { + chip->n_bytes = 2; + chip->dma_width = 2; + } else { + /* Never take >16b case for MRST SPIC */ + dev_err(&spi->dev, "invalid wordsize\n"); + return -EINVAL; + } + chip->bits_per_word = spi->bits_per_word; + + if (!spi->max_speed_hz) { + dev_err(&spi->dev, "No max speed HZ parameter\n"); + return -EINVAL; + } + chip->speed_hz = spi->max_speed_hz; + + chip->tmode = 0; /* Tx & Rx */ + /* Default SPI mode is SCPOL = 0, SCPH = 0 */ + chip->cr0 = (chip->bits_per_word - 1) + | (chip->type << SPI_FRF_OFFSET) + | (spi->mode << SPI_MODE_OFFSET) + | (chip->tmode << SPI_TMOD_OFFSET); + + spi_set_ctldata(spi, chip); + return 0; +} + +static void dw_spi_cleanup(struct spi_device *spi) +{ + struct chip_data *chip = spi_get_ctldata(spi); + kfree(chip); +} + +static int __devinit init_queue(struct dw_spi *dws) +{ + INIT_LIST_HEAD(&dws->queue); + spin_lock_init(&dws->lock); + + dws->run = QUEUE_STOPPED; + dws->busy = 0; + + tasklet_init(&dws->pump_transfers, + pump_transfers, (unsigned long)dws); + + INIT_WORK(&dws->pump_messages, pump_messages); + dws->workqueue = create_singlethread_workqueue( + dev_name(dws->master->dev.parent)); + if (dws->workqueue == NULL) + return -EBUSY; + + return 0; +} + +static int start_queue(struct dw_spi *dws) +{ + unsigned long flags; + + spin_lock_irqsave(&dws->lock, flags); + + if (dws->run == QUEUE_RUNNING || dws->busy) { + spin_unlock_irqrestore(&dws->lock, flags); + return -EBUSY; + } + + dws->run = QUEUE_RUNNING; + dws->cur_msg = NULL; + dws->cur_transfer = NULL; + dws->cur_chip = NULL; + dws->prev_chip = NULL; + spin_unlock_irqrestore(&dws->lock, flags); + + queue_work(dws->workqueue, &dws->pump_messages); + + return 0; +} + +static int stop_queue(struct dw_spi *dws) +{ + unsigned long flags; + unsigned limit = 50; + int status = 0; + + spin_lock_irqsave(&dws->lock, flags); + dws->run = QUEUE_STOPPED; + while ((!list_empty(&dws->queue) || dws->busy) && limit--) { + spin_unlock_irqrestore(&dws->lock, flags); + msleep(10); + spin_lock_irqsave(&dws->lock, flags); + } + + if (!list_empty(&dws->queue) || dws->busy) + status = -EBUSY; + spin_unlock_irqrestore(&dws->lock, flags); + + return status; +} + +static int destroy_queue(struct dw_spi *dws) +{ + int status; + + status = stop_queue(dws); + if (status != 0) + return status; + destroy_workqueue(dws->workqueue); + return 0; +} + +/* Restart the controller, disable all interrupts, clean rx fifo */ +static void spi_hw_init(struct dw_spi *dws) +{ + spi_enable_chip(dws, 0); + spi_mask_intr(dws, 0xff); + spi_enable_chip(dws, 1); + + /* + * Try to detect the FIFO depth if not set by interface driver, + * the depth could be from 2 to 256 from HW spec + */ + if (!dws->fifo_len) { + u32 fifo; + for (fifo = 2; fifo <= 257; fifo++) { + dw_writew(dws, txfltr, fifo); + if (fifo != dw_readw(dws, txfltr)) + break; + } + + dws->fifo_len = (fifo == 257) ? 0 : fifo; + dw_writew(dws, txfltr, 0); + } +} + +int __devinit dw_spi_add_host(struct dw_spi *dws) +{ + struct spi_master *master; + int ret; + + BUG_ON(dws == NULL); + + master = spi_alloc_master(dws->parent_dev, 0); + if (!master) { + ret = -ENOMEM; + goto exit; + } + + dws->master = master; + dws->type = SSI_MOTO_SPI; + dws->prev_chip = NULL; + dws->dma_inited = 0; + dws->dma_addr = (dma_addr_t)(dws->paddr + 0x60); + + ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED, + "dw_spi", dws); + if (ret < 0) { + dev_err(&master->dev, "can not get IRQ\n"); + goto err_free_master; + } + + master->mode_bits = SPI_CPOL | SPI_CPHA; + master->bus_num = dws->bus_num; + master->num_chipselect = dws->num_cs; + master->cleanup = dw_spi_cleanup; + master->setup = dw_spi_setup; + master->transfer = dw_spi_transfer; + + /* Basic HW init */ + spi_hw_init(dws); + + if (dws->dma_ops && dws->dma_ops->dma_init) { + ret = dws->dma_ops->dma_init(dws); + if (ret) { + dev_warn(&master->dev, "DMA init failed\n"); + dws->dma_inited = 0; + } + } + + /* Initial and start queue */ + ret = init_queue(dws); + if (ret) { + dev_err(&master->dev, "problem initializing queue\n"); + goto err_diable_hw; + } + ret = start_queue(dws); + if (ret) { + dev_err(&master->dev, "problem starting queue\n"); + goto err_diable_hw; + } + + spi_master_set_devdata(master, dws); + ret = spi_register_master(master); + if (ret) { + dev_err(&master->dev, "problem registering spi master\n"); + goto err_queue_alloc; + } + + mrst_spi_debugfs_init(dws); + return 0; + +err_queue_alloc: + destroy_queue(dws); + if (dws->dma_ops && dws->dma_ops->dma_exit) + dws->dma_ops->dma_exit(dws); +err_diable_hw: + spi_enable_chip(dws, 0); + free_irq(dws->irq, dws); +err_free_master: + spi_master_put(master); +exit: + return ret; +} +EXPORT_SYMBOL_GPL(dw_spi_add_host); + +void __devexit dw_spi_remove_host(struct dw_spi *dws) +{ + int status = 0; + + if (!dws) + return; + mrst_spi_debugfs_remove(dws); + + /* Remove the queue */ + status = destroy_queue(dws); + if (status != 0) + dev_err(&dws->master->dev, "dw_spi_remove: workqueue will not " + "complete, message memory not freed\n"); + + if (dws->dma_ops && dws->dma_ops->dma_exit) + dws->dma_ops->dma_exit(dws); + spi_enable_chip(dws, 0); + /* Disable clk */ + spi_set_clk(dws, 0); + free_irq(dws->irq, dws); + + /* Disconnect from the SPI framework */ + spi_unregister_master(dws->master); +} +EXPORT_SYMBOL_GPL(dw_spi_remove_host); + +int dw_spi_suspend_host(struct dw_spi *dws) +{ + int ret = 0; + + ret = stop_queue(dws); + if (ret) + return ret; + spi_enable_chip(dws, 0); + spi_set_clk(dws, 0); + return ret; +} +EXPORT_SYMBOL_GPL(dw_spi_suspend_host); + +int dw_spi_resume_host(struct dw_spi *dws) +{ + int ret; + + spi_hw_init(dws); + ret = start_queue(dws); + if (ret) + dev_err(&dws->master->dev, "fail to start queue (%d)\n", ret); + return ret; +} +EXPORT_SYMBOL_GPL(dw_spi_resume_host); + +MODULE_AUTHOR("Feng Tang "); +MODULE_DESCRIPTION("Driver for DesignWare SPI controller core"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/spi/spi-dw.h b/drivers/spi/spi-dw.h new file mode 100644 index 0000000..7a5e78d --- /dev/null +++ b/drivers/spi/spi-dw.h @@ -0,0 +1,232 @@ +#ifndef DW_SPI_HEADER_H +#define DW_SPI_HEADER_H + +#include +#include + +/* Bit fields in CTRLR0 */ +#define SPI_DFS_OFFSET 0 + +#define SPI_FRF_OFFSET 4 +#define SPI_FRF_SPI 0x0 +#define SPI_FRF_SSP 0x1 +#define SPI_FRF_MICROWIRE 0x2 +#define SPI_FRF_RESV 0x3 + +#define SPI_MODE_OFFSET 6 +#define SPI_SCPH_OFFSET 6 +#define SPI_SCOL_OFFSET 7 + +#define SPI_TMOD_OFFSET 8 +#define SPI_TMOD_MASK (0x3 << SPI_TMOD_OFFSET) +#define SPI_TMOD_TR 0x0 /* xmit & recv */ +#define SPI_TMOD_TO 0x1 /* xmit only */ +#define SPI_TMOD_RO 0x2 /* recv only */ +#define SPI_TMOD_EPROMREAD 0x3 /* eeprom read mode */ + +#define SPI_SLVOE_OFFSET 10 +#define SPI_SRL_OFFSET 11 +#define SPI_CFS_OFFSET 12 + +/* Bit fields in SR, 7 bits */ +#define SR_MASK 0x7f /* cover 7 bits */ +#define SR_BUSY (1 << 0) +#define SR_TF_NOT_FULL (1 << 1) +#define SR_TF_EMPT (1 << 2) +#define SR_RF_NOT_EMPT (1 << 3) +#define SR_RF_FULL (1 << 4) +#define SR_TX_ERR (1 << 5) +#define SR_DCOL (1 << 6) + +/* Bit fields in ISR, IMR, RISR, 7 bits */ +#define SPI_INT_TXEI (1 << 0) +#define SPI_INT_TXOI (1 << 1) +#define SPI_INT_RXUI (1 << 2) +#define SPI_INT_RXOI (1 << 3) +#define SPI_INT_RXFI (1 << 4) +#define SPI_INT_MSTI (1 << 5) + +/* TX RX interrupt level threshold, max can be 256 */ +#define SPI_INT_THRESHOLD 32 + +enum dw_ssi_type { + SSI_MOTO_SPI = 0, + SSI_TI_SSP, + SSI_NS_MICROWIRE, +}; + +struct dw_spi_reg { + u32 ctrl0; + u32 ctrl1; + u32 ssienr; + u32 mwcr; + u32 ser; + u32 baudr; + u32 txfltr; + u32 rxfltr; + u32 txflr; + u32 rxflr; + u32 sr; + u32 imr; + u32 isr; + u32 risr; + u32 txoicr; + u32 rxoicr; + u32 rxuicr; + u32 msticr; + u32 icr; + u32 dmacr; + u32 dmatdlr; + u32 dmardlr; + u32 idr; + u32 version; + u32 dr; /* Currently oper as 32 bits, + though only low 16 bits matters */ +} __packed; + +struct dw_spi; +struct dw_spi_dma_ops { + int (*dma_init)(struct dw_spi *dws); + void (*dma_exit)(struct dw_spi *dws); + int (*dma_transfer)(struct dw_spi *dws, int cs_change); +}; + +struct dw_spi { + struct spi_master *master; + struct spi_device *cur_dev; + struct device *parent_dev; + enum dw_ssi_type type; + + void __iomem *regs; + unsigned long paddr; + u32 iolen; + int irq; + u32 fifo_len; /* depth of the FIFO buffer */ + u32 max_freq; /* max bus freq supported */ + + u16 bus_num; + u16 num_cs; /* supported slave numbers */ + + /* Driver message queue */ + struct workqueue_struct *workqueue; + struct work_struct pump_messages; + spinlock_t lock; + struct list_head queue; + int busy; + int run; + + /* Message Transfer pump */ + struct tasklet_struct pump_transfers; + + /* Current message transfer state info */ + struct spi_message *cur_msg; + struct spi_transfer *cur_transfer; + struct chip_data *cur_chip; + struct chip_data *prev_chip; + size_t len; + void *tx; + void *tx_end; + void *rx; + void *rx_end; + int dma_mapped; + dma_addr_t rx_dma; + dma_addr_t tx_dma; + size_t rx_map_len; + size_t tx_map_len; + u8 n_bytes; /* current is a 1/2 bytes op */ + u8 max_bits_per_word; /* maxim is 16b */ + u32 dma_width; + int cs_change; + irqreturn_t (*transfer_handler)(struct dw_spi *dws); + void (*cs_control)(u32 command); + + /* Dma info */ + int dma_inited; + struct dma_chan *txchan; + struct scatterlist tx_sgl; + struct dma_chan *rxchan; + struct scatterlist rx_sgl; + int dma_chan_done; + struct device *dma_dev; + dma_addr_t dma_addr; /* phy address of the Data register */ + struct dw_spi_dma_ops *dma_ops; + void *dma_priv; /* platform relate info */ + struct pci_dev *dmac; + + /* Bus interface info */ + void *priv; +#ifdef CONFIG_DEBUG_FS + struct dentry *debugfs; +#endif +}; + +#define dw_readl(dw, name) \ + __raw_readl(&(((struct dw_spi_reg *)dw->regs)->name)) +#define dw_writel(dw, name, val) \ + __raw_writel((val), &(((struct dw_spi_reg *)dw->regs)->name)) +#define dw_readw(dw, name) \ + __raw_readw(&(((struct dw_spi_reg *)dw->regs)->name)) +#define dw_writew(dw, name, val) \ + __raw_writew((val), &(((struct dw_spi_reg *)dw->regs)->name)) + +static inline void spi_enable_chip(struct dw_spi *dws, int enable) +{ + dw_writel(dws, ssienr, (enable ? 1 : 0)); +} + +static inline void spi_set_clk(struct dw_spi *dws, u16 div) +{ + dw_writel(dws, baudr, div); +} + +static inline void spi_chip_sel(struct dw_spi *dws, u16 cs) +{ + if (cs > dws->num_cs) + return; + + if (dws->cs_control) + dws->cs_control(1); + + dw_writel(dws, ser, 1 << cs); +} + +/* Disable IRQ bits */ +static inline void spi_mask_intr(struct dw_spi *dws, u32 mask) +{ + u32 new_mask; + + new_mask = dw_readl(dws, imr) & ~mask; + dw_writel(dws, imr, new_mask); +} + +/* Enable IRQ bits */ +static inline void spi_umask_intr(struct dw_spi *dws, u32 mask) +{ + u32 new_mask; + + new_mask = dw_readl(dws, imr) | mask; + dw_writel(dws, imr, new_mask); +} + +/* + * Each SPI slave device to work with dw_api controller should + * has such a structure claiming its working mode (PIO/DMA etc), + * which can be save in the "controller_data" member of the + * struct spi_device + */ +struct dw_spi_chip { + u8 poll_mode; /* 0 for contoller polling mode */ + u8 type; /* SPI/SSP/Micrwire */ + u8 enable_dma; + void (*cs_control)(u32 command); +}; + +extern int dw_spi_add_host(struct dw_spi *dws); +extern void dw_spi_remove_host(struct dw_spi *dws); +extern int dw_spi_suspend_host(struct dw_spi *dws); +extern int dw_spi_resume_host(struct dw_spi *dws); +extern void dw_spi_xfer_done(struct dw_spi *dws); + +/* platform related setup */ +extern int dw_spi_mid_init(struct dw_spi *dws); /* Intel MID platforms */ +#endif /* DW_SPI_HEADER_H */ diff --git a/drivers/spi/spi-ep93xx.c b/drivers/spi/spi-ep93xx.c new file mode 100644 index 0000000..d357007 --- /dev/null +++ b/drivers/spi/spi-ep93xx.c @@ -0,0 +1,938 @@ +/* + * Driver for Cirrus Logic EP93xx SPI controller. + * + * Copyright (c) 2010 Mika Westerberg + * + * Explicit FIFO handling code was inspired by amba-pl022 driver. + * + * Chip select support using other than built-in GPIOs by H. Hartley Sweeten. + * + * For more information about the SPI controller see documentation on Cirrus + * Logic web site: + * http://www.cirrus.com/en/pubs/manual/EP93xx_Users_Guide_UM1.pdf + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define SSPCR0 0x0000 +#define SSPCR0_MODE_SHIFT 6 +#define SSPCR0_SCR_SHIFT 8 + +#define SSPCR1 0x0004 +#define SSPCR1_RIE BIT(0) +#define SSPCR1_TIE BIT(1) +#define SSPCR1_RORIE BIT(2) +#define SSPCR1_LBM BIT(3) +#define SSPCR1_SSE BIT(4) +#define SSPCR1_MS BIT(5) +#define SSPCR1_SOD BIT(6) + +#define SSPDR 0x0008 + +#define SSPSR 0x000c +#define SSPSR_TFE BIT(0) +#define SSPSR_TNF BIT(1) +#define SSPSR_RNE BIT(2) +#define SSPSR_RFF BIT(3) +#define SSPSR_BSY BIT(4) +#define SSPCPSR 0x0010 + +#define SSPIIR 0x0014 +#define SSPIIR_RIS BIT(0) +#define SSPIIR_TIS BIT(1) +#define SSPIIR_RORIS BIT(2) +#define SSPICR SSPIIR + +/* timeout in milliseconds */ +#define SPI_TIMEOUT 5 +/* maximum depth of RX/TX FIFO */ +#define SPI_FIFO_SIZE 8 + +/** + * struct ep93xx_spi - EP93xx SPI controller structure + * @lock: spinlock that protects concurrent accesses to fields @running, + * @current_msg and @msg_queue + * @pdev: pointer to platform device + * @clk: clock for the controller + * @regs_base: pointer to ioremap()'d registers + * @irq: IRQ number used by the driver + * @min_rate: minimum clock rate (in Hz) supported by the controller + * @max_rate: maximum clock rate (in Hz) supported by the controller + * @running: is the queue running + * @wq: workqueue used by the driver + * @msg_work: work that is queued for the driver + * @wait: wait here until given transfer is completed + * @msg_queue: queue for the messages + * @current_msg: message that is currently processed (or %NULL if none) + * @tx: current byte in transfer to transmit + * @rx: current byte in transfer to receive + * @fifo_level: how full is FIFO (%0..%SPI_FIFO_SIZE - %1). Receiving one + * frame decreases this level and sending one frame increases it. + * + * This structure holds EP93xx SPI controller specific information. When + * @running is %true, driver accepts transfer requests from protocol drivers. + * @current_msg is used to hold pointer to the message that is currently + * processed. If @current_msg is %NULL, it means that no processing is going + * on. + * + * Most of the fields are only written once and they can be accessed without + * taking the @lock. Fields that are accessed concurrently are: @current_msg, + * @running, and @msg_queue. + */ +struct ep93xx_spi { + spinlock_t lock; + const struct platform_device *pdev; + struct clk *clk; + void __iomem *regs_base; + int irq; + unsigned long min_rate; + unsigned long max_rate; + bool running; + struct workqueue_struct *wq; + struct work_struct msg_work; + struct completion wait; + struct list_head msg_queue; + struct spi_message *current_msg; + size_t tx; + size_t rx; + size_t fifo_level; +}; + +/** + * struct ep93xx_spi_chip - SPI device hardware settings + * @spi: back pointer to the SPI device + * @rate: max rate in hz this chip supports + * @div_cpsr: cpsr (pre-scaler) divider + * @div_scr: scr divider + * @dss: bits per word (4 - 16 bits) + * @ops: private chip operations + * + * This structure is used to store hardware register specific settings for each + * SPI device. Settings are written to hardware by function + * ep93xx_spi_chip_setup(). + */ +struct ep93xx_spi_chip { + const struct spi_device *spi; + unsigned long rate; + u8 div_cpsr; + u8 div_scr; + u8 dss; + struct ep93xx_spi_chip_ops *ops; +}; + +/* converts bits per word to CR0.DSS value */ +#define bits_per_word_to_dss(bpw) ((bpw) - 1) + +static inline void +ep93xx_spi_write_u8(const struct ep93xx_spi *espi, u16 reg, u8 value) +{ + __raw_writeb(value, espi->regs_base + reg); +} + +static inline u8 +ep93xx_spi_read_u8(const struct ep93xx_spi *spi, u16 reg) +{ + return __raw_readb(spi->regs_base + reg); +} + +static inline void +ep93xx_spi_write_u16(const struct ep93xx_spi *espi, u16 reg, u16 value) +{ + __raw_writew(value, espi->regs_base + reg); +} + +static inline u16 +ep93xx_spi_read_u16(const struct ep93xx_spi *spi, u16 reg) +{ + return __raw_readw(spi->regs_base + reg); +} + +static int ep93xx_spi_enable(const struct ep93xx_spi *espi) +{ + u8 regval; + int err; + + err = clk_enable(espi->clk); + if (err) + return err; + + regval = ep93xx_spi_read_u8(espi, SSPCR1); + regval |= SSPCR1_SSE; + ep93xx_spi_write_u8(espi, SSPCR1, regval); + + return 0; +} + +static void ep93xx_spi_disable(const struct ep93xx_spi *espi) +{ + u8 regval; + + regval = ep93xx_spi_read_u8(espi, SSPCR1); + regval &= ~SSPCR1_SSE; + ep93xx_spi_write_u8(espi, SSPCR1, regval); + + clk_disable(espi->clk); +} + +static void ep93xx_spi_enable_interrupts(const struct ep93xx_spi *espi) +{ + u8 regval; + + regval = ep93xx_spi_read_u8(espi, SSPCR1); + regval |= (SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE); + ep93xx_spi_write_u8(espi, SSPCR1, regval); +} + +static void ep93xx_spi_disable_interrupts(const struct ep93xx_spi *espi) +{ + u8 regval; + + regval = ep93xx_spi_read_u8(espi, SSPCR1); + regval &= ~(SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE); + ep93xx_spi_write_u8(espi, SSPCR1, regval); +} + +/** + * ep93xx_spi_calc_divisors() - calculates SPI clock divisors + * @espi: ep93xx SPI controller struct + * @chip: divisors are calculated for this chip + * @rate: desired SPI output clock rate + * + * Function calculates cpsr (clock pre-scaler) and scr divisors based on + * given @rate and places them to @chip->div_cpsr and @chip->div_scr. If, + * for some reason, divisors cannot be calculated nothing is stored and + * %-EINVAL is returned. + */ +static int ep93xx_spi_calc_divisors(const struct ep93xx_spi *espi, + struct ep93xx_spi_chip *chip, + unsigned long rate) +{ + unsigned long spi_clk_rate = clk_get_rate(espi->clk); + int cpsr, scr; + + /* + * Make sure that max value is between values supported by the + * controller. Note that minimum value is already checked in + * ep93xx_spi_transfer(). + */ + rate = clamp(rate, espi->min_rate, espi->max_rate); + + /* + * Calculate divisors so that we can get speed according the + * following formula: + * rate = spi_clock_rate / (cpsr * (1 + scr)) + * + * cpsr must be even number and starts from 2, scr can be any number + * between 0 and 255. + */ + for (cpsr = 2; cpsr <= 254; cpsr += 2) { + for (scr = 0; scr <= 255; scr++) { + if ((spi_clk_rate / (cpsr * (scr + 1))) <= rate) { + chip->div_scr = (u8)scr; + chip->div_cpsr = (u8)cpsr; + return 0; + } + } + } + + return -EINVAL; +} + +static void ep93xx_spi_cs_control(struct spi_device *spi, bool control) +{ + struct ep93xx_spi_chip *chip = spi_get_ctldata(spi); + int value = (spi->mode & SPI_CS_HIGH) ? control : !control; + + if (chip->ops && chip->ops->cs_control) + chip->ops->cs_control(spi, value); +} + +/** + * ep93xx_spi_setup() - setup an SPI device + * @spi: SPI device to setup + * + * This function sets up SPI device mode, speed etc. Can be called multiple + * times for a single device. Returns %0 in case of success, negative error in + * case of failure. When this function returns success, the device is + * deselected. + */ +static int ep93xx_spi_setup(struct spi_device *spi) +{ + struct ep93xx_spi *espi = spi_master_get_devdata(spi->master); + struct ep93xx_spi_chip *chip; + + if (spi->bits_per_word < 4 || spi->bits_per_word > 16) { + dev_err(&espi->pdev->dev, "invalid bits per word %d\n", + spi->bits_per_word); + return -EINVAL; + } + + chip = spi_get_ctldata(spi); + if (!chip) { + dev_dbg(&espi->pdev->dev, "initial setup for %s\n", + spi->modalias); + + chip = kzalloc(sizeof(*chip), GFP_KERNEL); + if (!chip) + return -ENOMEM; + + chip->spi = spi; + chip->ops = spi->controller_data; + + if (chip->ops && chip->ops->setup) { + int ret = chip->ops->setup(spi); + if (ret) { + kfree(chip); + return ret; + } + } + + spi_set_ctldata(spi, chip); + } + + if (spi->max_speed_hz != chip->rate) { + int err; + + err = ep93xx_spi_calc_divisors(espi, chip, spi->max_speed_hz); + if (err != 0) { + spi_set_ctldata(spi, NULL); + kfree(chip); + return err; + } + chip->rate = spi->max_speed_hz; + } + + chip->dss = bits_per_word_to_dss(spi->bits_per_word); + + ep93xx_spi_cs_control(spi, false); + return 0; +} + +/** + * ep93xx_spi_transfer() - queue message to be transferred + * @spi: target SPI device + * @msg: message to be transferred + * + * This function is called by SPI device drivers when they are going to transfer + * a new message. It simply puts the message in the queue and schedules + * workqueue to perform the actual transfer later on. + * + * Returns %0 on success and negative error in case of failure. + */ +static int ep93xx_spi_transfer(struct spi_device *spi, struct spi_message *msg) +{ + struct ep93xx_spi *espi = spi_master_get_devdata(spi->master); + struct spi_transfer *t; + unsigned long flags; + + if (!msg || !msg->complete) + return -EINVAL; + + /* first validate each transfer */ + list_for_each_entry(t, &msg->transfers, transfer_list) { + if (t->bits_per_word) { + if (t->bits_per_word < 4 || t->bits_per_word > 16) + return -EINVAL; + } + if (t->speed_hz && t->speed_hz < espi->min_rate) + return -EINVAL; + } + + /* + * Now that we own the message, let's initialize it so that it is + * suitable for us. We use @msg->status to signal whether there was + * error in transfer and @msg->state is used to hold pointer to the + * current transfer (or %NULL if no active current transfer). + */ + msg->state = NULL; + msg->status = 0; + msg->actual_length = 0; + + spin_lock_irqsave(&espi->lock, flags); + if (!espi->running) { + spin_unlock_irqrestore(&espi->lock, flags); + return -ESHUTDOWN; + } + list_add_tail(&msg->queue, &espi->msg_queue); + queue_work(espi->wq, &espi->msg_work); + spin_unlock_irqrestore(&espi->lock, flags); + + return 0; +} + +/** + * ep93xx_spi_cleanup() - cleans up master controller specific state + * @spi: SPI device to cleanup + * + * This function releases master controller specific state for given @spi + * device. + */ +static void ep93xx_spi_cleanup(struct spi_device *spi) +{ + struct ep93xx_spi_chip *chip; + + chip = spi_get_ctldata(spi); + if (chip) { + if (chip->ops && chip->ops->cleanup) + chip->ops->cleanup(spi); + spi_set_ctldata(spi, NULL); + kfree(chip); + } +} + +/** + * ep93xx_spi_chip_setup() - configures hardware according to given @chip + * @espi: ep93xx SPI controller struct + * @chip: chip specific settings + * + * This function sets up the actual hardware registers with settings given in + * @chip. Note that no validation is done so make sure that callers validate + * settings before calling this. + */ +static void ep93xx_spi_chip_setup(const struct ep93xx_spi *espi, + const struct ep93xx_spi_chip *chip) +{ + u16 cr0; + + cr0 = chip->div_scr << SSPCR0_SCR_SHIFT; + cr0 |= (chip->spi->mode & (SPI_CPHA|SPI_CPOL)) << SSPCR0_MODE_SHIFT; + cr0 |= chip->dss; + + dev_dbg(&espi->pdev->dev, "setup: mode %d, cpsr %d, scr %d, dss %d\n", + chip->spi->mode, chip->div_cpsr, chip->div_scr, chip->dss); + dev_dbg(&espi->pdev->dev, "setup: cr0 %#x", cr0); + + ep93xx_spi_write_u8(espi, SSPCPSR, chip->div_cpsr); + ep93xx_spi_write_u16(espi, SSPCR0, cr0); +} + +static inline int bits_per_word(const struct ep93xx_spi *espi) +{ + struct spi_message *msg = espi->current_msg; + struct spi_transfer *t = msg->state; + + return t->bits_per_word ? t->bits_per_word : msg->spi->bits_per_word; +} + +static void ep93xx_do_write(struct ep93xx_spi *espi, struct spi_transfer *t) +{ + if (bits_per_word(espi) > 8) { + u16 tx_val = 0; + + if (t->tx_buf) + tx_val = ((u16 *)t->tx_buf)[espi->tx]; + ep93xx_spi_write_u16(espi, SSPDR, tx_val); + espi->tx += sizeof(tx_val); + } else { + u8 tx_val = 0; + + if (t->tx_buf) + tx_val = ((u8 *)t->tx_buf)[espi->tx]; + ep93xx_spi_write_u8(espi, SSPDR, tx_val); + espi->tx += sizeof(tx_val); + } +} + +static void ep93xx_do_read(struct ep93xx_spi *espi, struct spi_transfer *t) +{ + if (bits_per_word(espi) > 8) { + u16 rx_val; + + rx_val = ep93xx_spi_read_u16(espi, SSPDR); + if (t->rx_buf) + ((u16 *)t->rx_buf)[espi->rx] = rx_val; + espi->rx += sizeof(rx_val); + } else { + u8 rx_val; + + rx_val = ep93xx_spi_read_u8(espi, SSPDR); + if (t->rx_buf) + ((u8 *)t->rx_buf)[espi->rx] = rx_val; + espi->rx += sizeof(rx_val); + } +} + +/** + * ep93xx_spi_read_write() - perform next RX/TX transfer + * @espi: ep93xx SPI controller struct + * + * This function transfers next bytes (or half-words) to/from RX/TX FIFOs. If + * called several times, the whole transfer will be completed. Returns + * %-EINPROGRESS when current transfer was not yet completed otherwise %0. + * + * When this function is finished, RX FIFO should be empty and TX FIFO should be + * full. + */ +static int ep93xx_spi_read_write(struct ep93xx_spi *espi) +{ + struct spi_message *msg = espi->current_msg; + struct spi_transfer *t = msg->state; + + /* read as long as RX FIFO has frames in it */ + while ((ep93xx_spi_read_u8(espi, SSPSR) & SSPSR_RNE)) { + ep93xx_do_read(espi, t); + espi->fifo_level--; + } + + /* write as long as TX FIFO has room */ + while (espi->fifo_level < SPI_FIFO_SIZE && espi->tx < t->len) { + ep93xx_do_write(espi, t); + espi->fifo_level++; + } + + if (espi->rx == t->len) { + msg->actual_length += t->len; + return 0; + } + + return -EINPROGRESS; +} + +/** + * ep93xx_spi_process_transfer() - processes one SPI transfer + * @espi: ep93xx SPI controller struct + * @msg: current message + * @t: transfer to process + * + * This function processes one SPI transfer given in @t. Function waits until + * transfer is complete (may sleep) and updates @msg->status based on whether + * transfer was successfully processed or not. + */ +static void ep93xx_spi_process_transfer(struct ep93xx_spi *espi, + struct spi_message *msg, + struct spi_transfer *t) +{ + struct ep93xx_spi_chip *chip = spi_get_ctldata(msg->spi); + + msg->state = t; + + /* + * Handle any transfer specific settings if needed. We use + * temporary chip settings here and restore original later when + * the transfer is finished. + */ + if (t->speed_hz || t->bits_per_word) { + struct ep93xx_spi_chip tmp_chip = *chip; + + if (t->speed_hz) { + int err; + + err = ep93xx_spi_calc_divisors(espi, &tmp_chip, + t->speed_hz); + if (err) { + dev_err(&espi->pdev->dev, + "failed to adjust speed\n"); + msg->status = err; + return; + } + } + + if (t->bits_per_word) + tmp_chip.dss = bits_per_word_to_dss(t->bits_per_word); + + /* + * Set up temporary new hw settings for this transfer. + */ + ep93xx_spi_chip_setup(espi, &tmp_chip); + } + + espi->rx = 0; + espi->tx = 0; + + /* + * Now everything is set up for the current transfer. We prime the TX + * FIFO, enable interrupts, and wait for the transfer to complete. + */ + if (ep93xx_spi_read_write(espi)) { + ep93xx_spi_enable_interrupts(espi); + wait_for_completion(&espi->wait); + } + + /* + * In case of error during transmit, we bail out from processing + * the message. + */ + if (msg->status) + return; + + /* + * After this transfer is finished, perform any possible + * post-transfer actions requested by the protocol driver. + */ + if (t->delay_usecs) { + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(usecs_to_jiffies(t->delay_usecs)); + } + if (t->cs_change) { + if (!list_is_last(&t->transfer_list, &msg->transfers)) { + /* + * In case protocol driver is asking us to drop the + * chipselect briefly, we let the scheduler to handle + * any "delay" here. + */ + ep93xx_spi_cs_control(msg->spi, false); + cond_resched(); + ep93xx_spi_cs_control(msg->spi, true); + } + } + + if (t->speed_hz || t->bits_per_word) + ep93xx_spi_chip_setup(espi, chip); +} + +/* + * ep93xx_spi_process_message() - process one SPI message + * @espi: ep93xx SPI controller struct + * @msg: message to process + * + * This function processes a single SPI message. We go through all transfers in + * the message and pass them to ep93xx_spi_process_transfer(). Chipselect is + * asserted during the whole message (unless per transfer cs_change is set). + * + * @msg->status contains %0 in case of success or negative error code in case of + * failure. + */ +static void ep93xx_spi_process_message(struct ep93xx_spi *espi, + struct spi_message *msg) +{ + unsigned long timeout; + struct spi_transfer *t; + int err; + + /* + * Enable the SPI controller and its clock. + */ + err = ep93xx_spi_enable(espi); + if (err) { + dev_err(&espi->pdev->dev, "failed to enable SPI controller\n"); + msg->status = err; + return; + } + + /* + * Just to be sure: flush any data from RX FIFO. + */ + timeout = jiffies + msecs_to_jiffies(SPI_TIMEOUT); + while (ep93xx_spi_read_u16(espi, SSPSR) & SSPSR_RNE) { + if (time_after(jiffies, timeout)) { + dev_warn(&espi->pdev->dev, + "timeout while flushing RX FIFO\n"); + msg->status = -ETIMEDOUT; + return; + } + ep93xx_spi_read_u16(espi, SSPDR); + } + + /* + * We explicitly handle FIFO level. This way we don't have to check TX + * FIFO status using %SSPSR_TNF bit which may cause RX FIFO overruns. + */ + espi->fifo_level = 0; + + /* + * Update SPI controller registers according to spi device and assert + * the chipselect. + */ + ep93xx_spi_chip_setup(espi, spi_get_ctldata(msg->spi)); + ep93xx_spi_cs_control(msg->spi, true); + + list_for_each_entry(t, &msg->transfers, transfer_list) { + ep93xx_spi_process_transfer(espi, msg, t); + if (msg->status) + break; + } + + /* + * Now the whole message is transferred (or failed for some reason). We + * deselect the device and disable the SPI controller. + */ + ep93xx_spi_cs_control(msg->spi, false); + ep93xx_spi_disable(espi); +} + +#define work_to_espi(work) (container_of((work), struct ep93xx_spi, msg_work)) + +/** + * ep93xx_spi_work() - EP93xx SPI workqueue worker function + * @work: work struct + * + * Workqueue worker function. This function is called when there are new + * SPI messages to be processed. Message is taken out from the queue and then + * passed to ep93xx_spi_process_message(). + * + * After message is transferred, protocol driver is notified by calling + * @msg->complete(). In case of error, @msg->status is set to negative error + * number, otherwise it contains zero (and @msg->actual_length is updated). + */ +static void ep93xx_spi_work(struct work_struct *work) +{ + struct ep93xx_spi *espi = work_to_espi(work); + struct spi_message *msg; + + spin_lock_irq(&espi->lock); + if (!espi->running || espi->current_msg || + list_empty(&espi->msg_queue)) { + spin_unlock_irq(&espi->lock); + return; + } + msg = list_first_entry(&espi->msg_queue, struct spi_message, queue); + list_del_init(&msg->queue); + espi->current_msg = msg; + spin_unlock_irq(&espi->lock); + + ep93xx_spi_process_message(espi, msg); + + /* + * Update the current message and re-schedule ourselves if there are + * more messages in the queue. + */ + spin_lock_irq(&espi->lock); + espi->current_msg = NULL; + if (espi->running && !list_empty(&espi->msg_queue)) + queue_work(espi->wq, &espi->msg_work); + spin_unlock_irq(&espi->lock); + + /* notify the protocol driver that we are done with this message */ + msg->complete(msg->context); +} + +static irqreturn_t ep93xx_spi_interrupt(int irq, void *dev_id) +{ + struct ep93xx_spi *espi = dev_id; + u8 irq_status = ep93xx_spi_read_u8(espi, SSPIIR); + + /* + * If we got ROR (receive overrun) interrupt we know that something is + * wrong. Just abort the message. + */ + if (unlikely(irq_status & SSPIIR_RORIS)) { + /* clear the overrun interrupt */ + ep93xx_spi_write_u8(espi, SSPICR, 0); + dev_warn(&espi->pdev->dev, + "receive overrun, aborting the message\n"); + espi->current_msg->status = -EIO; + } else { + /* + * Interrupt is either RX (RIS) or TX (TIS). For both cases we + * simply execute next data transfer. + */ + if (ep93xx_spi_read_write(espi)) { + /* + * In normal case, there still is some processing left + * for current transfer. Let's wait for the next + * interrupt then. + */ + return IRQ_HANDLED; + } + } + + /* + * Current transfer is finished, either with error or with success. In + * any case we disable interrupts and notify the worker to handle + * any post-processing of the message. + */ + ep93xx_spi_disable_interrupts(espi); + complete(&espi->wait); + return IRQ_HANDLED; +} + +static int __init ep93xx_spi_probe(struct platform_device *pdev) +{ + struct spi_master *master; + struct ep93xx_spi_info *info; + struct ep93xx_spi *espi; + struct resource *res; + int error; + + info = pdev->dev.platform_data; + + master = spi_alloc_master(&pdev->dev, sizeof(*espi)); + if (!master) { + dev_err(&pdev->dev, "failed to allocate spi master\n"); + return -ENOMEM; + } + + master->setup = ep93xx_spi_setup; + master->transfer = ep93xx_spi_transfer; + master->cleanup = ep93xx_spi_cleanup; + master->bus_num = pdev->id; + master->num_chipselect = info->num_chipselect; + master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; + + platform_set_drvdata(pdev, master); + + espi = spi_master_get_devdata(master); + + espi->clk = clk_get(&pdev->dev, NULL); + if (IS_ERR(espi->clk)) { + dev_err(&pdev->dev, "unable to get spi clock\n"); + error = PTR_ERR(espi->clk); + goto fail_release_master; + } + + spin_lock_init(&espi->lock); + init_completion(&espi->wait); + + /* + * Calculate maximum and minimum supported clock rates + * for the controller. + */ + espi->max_rate = clk_get_rate(espi->clk) / 2; + espi->min_rate = clk_get_rate(espi->clk) / (254 * 256); + espi->pdev = pdev; + + espi->irq = platform_get_irq(pdev, 0); + if (espi->irq < 0) { + error = -EBUSY; + dev_err(&pdev->dev, "failed to get irq resources\n"); + goto fail_put_clock; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&pdev->dev, "unable to get iomem resource\n"); + error = -ENODEV; + goto fail_put_clock; + } + + res = request_mem_region(res->start, resource_size(res), pdev->name); + if (!res) { + dev_err(&pdev->dev, "unable to request iomem resources\n"); + error = -EBUSY; + goto fail_put_clock; + } + + espi->regs_base = ioremap(res->start, resource_size(res)); + if (!espi->regs_base) { + dev_err(&pdev->dev, "failed to map resources\n"); + error = -ENODEV; + goto fail_free_mem; + } + + error = request_irq(espi->irq, ep93xx_spi_interrupt, 0, + "ep93xx-spi", espi); + if (error) { + dev_err(&pdev->dev, "failed to request irq\n"); + goto fail_unmap_regs; + } + + espi->wq = create_singlethread_workqueue("ep93xx_spid"); + if (!espi->wq) { + dev_err(&pdev->dev, "unable to create workqueue\n"); + goto fail_free_irq; + } + INIT_WORK(&espi->msg_work, ep93xx_spi_work); + INIT_LIST_HEAD(&espi->msg_queue); + espi->running = true; + + /* make sure that the hardware is disabled */ + ep93xx_spi_write_u8(espi, SSPCR1, 0); + + error = spi_register_master(master); + if (error) { + dev_err(&pdev->dev, "failed to register SPI master\n"); + goto fail_free_queue; + } + + dev_info(&pdev->dev, "EP93xx SPI Controller at 0x%08lx irq %d\n", + (unsigned long)res->start, espi->irq); + + return 0; + +fail_free_queue: + destroy_workqueue(espi->wq); +fail_free_irq: + free_irq(espi->irq, espi); +fail_unmap_regs: + iounmap(espi->regs_base); +fail_free_mem: + release_mem_region(res->start, resource_size(res)); +fail_put_clock: + clk_put(espi->clk); +fail_release_master: + spi_master_put(master); + platform_set_drvdata(pdev, NULL); + + return error; +} + +static int __exit ep93xx_spi_remove(struct platform_device *pdev) +{ + struct spi_master *master = platform_get_drvdata(pdev); + struct ep93xx_spi *espi = spi_master_get_devdata(master); + struct resource *res; + + spin_lock_irq(&espi->lock); + espi->running = false; + spin_unlock_irq(&espi->lock); + + destroy_workqueue(espi->wq); + + /* + * Complete remaining messages with %-ESHUTDOWN status. + */ + spin_lock_irq(&espi->lock); + while (!list_empty(&espi->msg_queue)) { + struct spi_message *msg; + + msg = list_first_entry(&espi->msg_queue, + struct spi_message, queue); + list_del_init(&msg->queue); + msg->status = -ESHUTDOWN; + spin_unlock_irq(&espi->lock); + msg->complete(msg->context); + spin_lock_irq(&espi->lock); + } + spin_unlock_irq(&espi->lock); + + free_irq(espi->irq, espi); + iounmap(espi->regs_base); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + release_mem_region(res->start, resource_size(res)); + clk_put(espi->clk); + platform_set_drvdata(pdev, NULL); + + spi_unregister_master(master); + return 0; +} + +static struct platform_driver ep93xx_spi_driver = { + .driver = { + .name = "ep93xx-spi", + .owner = THIS_MODULE, + }, + .remove = __exit_p(ep93xx_spi_remove), +}; + +static int __init ep93xx_spi_init(void) +{ + return platform_driver_probe(&ep93xx_spi_driver, ep93xx_spi_probe); +} +module_init(ep93xx_spi_init); + +static void __exit ep93xx_spi_exit(void) +{ + platform_driver_unregister(&ep93xx_spi_driver); +} +module_exit(ep93xx_spi_exit); + +MODULE_DESCRIPTION("EP93xx SPI Controller driver"); +MODULE_AUTHOR("Mika Westerberg "); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:ep93xx-spi"); diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c new file mode 100644 index 0000000..54e499d --- /dev/null +++ b/drivers/spi/spi-fsl-espi.c @@ -0,0 +1,762 @@ +/* + * Freescale eSPI controller driver. + * + * Copyright 2010 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "spi-fsl-lib.h" + +/* eSPI Controller registers */ +struct fsl_espi_reg { + __be32 mode; /* 0x000 - eSPI mode register */ + __be32 event; /* 0x004 - eSPI event register */ + __be32 mask; /* 0x008 - eSPI mask register */ + __be32 command; /* 0x00c - eSPI command register */ + __be32 transmit; /* 0x010 - eSPI transmit FIFO access register*/ + __be32 receive; /* 0x014 - eSPI receive FIFO access register*/ + u8 res[8]; /* 0x018 - 0x01c reserved */ + __be32 csmode[4]; /* 0x020 - 0x02c eSPI cs mode register */ +}; + +struct fsl_espi_transfer { + const void *tx_buf; + void *rx_buf; + unsigned len; + unsigned n_tx; + unsigned n_rx; + unsigned actual_length; + int status; +}; + +/* eSPI Controller mode register definitions */ +#define SPMODE_ENABLE (1 << 31) +#define SPMODE_LOOP (1 << 30) +#define SPMODE_TXTHR(x) ((x) << 8) +#define SPMODE_RXTHR(x) ((x) << 0) + +/* eSPI Controller CS mode register definitions */ +#define CSMODE_CI_INACTIVEHIGH (1 << 31) +#define CSMODE_CP_BEGIN_EDGECLK (1 << 30) +#define CSMODE_REV (1 << 29) +#define CSMODE_DIV16 (1 << 28) +#define CSMODE_PM(x) ((x) << 24) +#define CSMODE_POL_1 (1 << 20) +#define CSMODE_LEN(x) ((x) << 16) +#define CSMODE_BEF(x) ((x) << 12) +#define CSMODE_AFT(x) ((x) << 8) +#define CSMODE_CG(x) ((x) << 3) + +/* Default mode/csmode for eSPI controller */ +#define SPMODE_INIT_VAL (SPMODE_TXTHR(4) | SPMODE_RXTHR(3)) +#define CSMODE_INIT_VAL (CSMODE_POL_1 | CSMODE_BEF(0) \ + | CSMODE_AFT(0) | CSMODE_CG(1)) + +/* SPIE register values */ +#define SPIE_NE 0x00000200 /* Not empty */ +#define SPIE_NF 0x00000100 /* Not full */ + +/* SPIM register values */ +#define SPIM_NE 0x00000200 /* Not empty */ +#define SPIM_NF 0x00000100 /* Not full */ +#define SPIE_RXCNT(reg) ((reg >> 24) & 0x3F) +#define SPIE_TXCNT(reg) ((reg >> 16) & 0x3F) + +/* SPCOM register values */ +#define SPCOM_CS(x) ((x) << 30) +#define SPCOM_TRANLEN(x) ((x) << 0) +#define SPCOM_TRANLEN_MAX 0xFFFF /* Max transaction length */ + +static void fsl_espi_change_mode(struct spi_device *spi) +{ + struct mpc8xxx_spi *mspi = spi_master_get_devdata(spi->master); + struct spi_mpc8xxx_cs *cs = spi->controller_state; + struct fsl_espi_reg *reg_base = mspi->reg_base; + __be32 __iomem *mode = ®_base->csmode[spi->chip_select]; + __be32 __iomem *espi_mode = ®_base->mode; + u32 tmp; + unsigned long flags; + + /* Turn off IRQs locally to minimize time that SPI is disabled. */ + local_irq_save(flags); + + /* Turn off SPI unit prior changing mode */ + tmp = mpc8xxx_spi_read_reg(espi_mode); + mpc8xxx_spi_write_reg(espi_mode, tmp & ~SPMODE_ENABLE); + mpc8xxx_spi_write_reg(mode, cs->hw_mode); + mpc8xxx_spi_write_reg(espi_mode, tmp); + + local_irq_restore(flags); +} + +static u32 fsl_espi_tx_buf_lsb(struct mpc8xxx_spi *mpc8xxx_spi) +{ + u32 data; + u16 data_h; + u16 data_l; + const u32 *tx = mpc8xxx_spi->tx; + + if (!tx) + return 0; + + data = *tx++ << mpc8xxx_spi->tx_shift; + data_l = data & 0xffff; + data_h = (data >> 16) & 0xffff; + swab16s(&data_l); + swab16s(&data_h); + data = data_h | data_l; + + mpc8xxx_spi->tx = tx; + return data; +} + +static int fsl_espi_setup_transfer(struct spi_device *spi, + struct spi_transfer *t) +{ + struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); + int bits_per_word = 0; + u8 pm; + u32 hz = 0; + struct spi_mpc8xxx_cs *cs = spi->controller_state; + + if (t) { + bits_per_word = t->bits_per_word; + hz = t->speed_hz; + } + + /* spi_transfer level calls that work per-word */ + if (!bits_per_word) + bits_per_word = spi->bits_per_word; + + /* Make sure its a bit width we support [4..16] */ + if ((bits_per_word < 4) || (bits_per_word > 16)) + return -EINVAL; + + if (!hz) + hz = spi->max_speed_hz; + + cs->rx_shift = 0; + cs->tx_shift = 0; + cs->get_rx = mpc8xxx_spi_rx_buf_u32; + cs->get_tx = mpc8xxx_spi_tx_buf_u32; + if (bits_per_word <= 8) { + cs->rx_shift = 8 - bits_per_word; + } else if (bits_per_word <= 16) { + cs->rx_shift = 16 - bits_per_word; + if (spi->mode & SPI_LSB_FIRST) + cs->get_tx = fsl_espi_tx_buf_lsb; + } else { + return -EINVAL; + } + + mpc8xxx_spi->rx_shift = cs->rx_shift; + mpc8xxx_spi->tx_shift = cs->tx_shift; + mpc8xxx_spi->get_rx = cs->get_rx; + mpc8xxx_spi->get_tx = cs->get_tx; + + bits_per_word = bits_per_word - 1; + + /* mask out bits we are going to set */ + cs->hw_mode &= ~(CSMODE_LEN(0xF) | CSMODE_DIV16 | CSMODE_PM(0xF)); + + cs->hw_mode |= CSMODE_LEN(bits_per_word); + + if ((mpc8xxx_spi->spibrg / hz) > 64) { + cs->hw_mode |= CSMODE_DIV16; + pm = (mpc8xxx_spi->spibrg - 1) / (hz * 64) + 1; + + WARN_ONCE(pm > 16, "%s: Requested speed is too low: %d Hz. " + "Will use %d Hz instead.\n", dev_name(&spi->dev), + hz, mpc8xxx_spi->spibrg / 1024); + if (pm > 16) + pm = 16; + } else { + pm = (mpc8xxx_spi->spibrg - 1) / (hz * 4) + 1; + } + if (pm) + pm--; + + cs->hw_mode |= CSMODE_PM(pm); + + fsl_espi_change_mode(spi); + return 0; +} + +static int fsl_espi_cpu_bufs(struct mpc8xxx_spi *mspi, struct spi_transfer *t, + unsigned int len) +{ + u32 word; + struct fsl_espi_reg *reg_base = mspi->reg_base; + + mspi->count = len; + + /* enable rx ints */ + mpc8xxx_spi_write_reg(®_base->mask, SPIM_NE); + + /* transmit word */ + word = mspi->get_tx(mspi); + mpc8xxx_spi_write_reg(®_base->transmit, word); + + return 0; +} + +static int fsl_espi_bufs(struct spi_device *spi, struct spi_transfer *t) +{ + struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); + struct fsl_espi_reg *reg_base = mpc8xxx_spi->reg_base; + unsigned int len = t->len; + u8 bits_per_word; + int ret; + + bits_per_word = spi->bits_per_word; + if (t->bits_per_word) + bits_per_word = t->bits_per_word; + + mpc8xxx_spi->len = t->len; + len = roundup(len, 4) / 4; + + mpc8xxx_spi->tx = t->tx_buf; + mpc8xxx_spi->rx = t->rx_buf; + + INIT_COMPLETION(mpc8xxx_spi->done); + + /* Set SPCOM[CS] and SPCOM[TRANLEN] field */ + if ((t->len - 1) > SPCOM_TRANLEN_MAX) { + dev_err(mpc8xxx_spi->dev, "Transaction length (%d)" + " beyond the SPCOM[TRANLEN] field\n", t->len); + return -EINVAL; + } + mpc8xxx_spi_write_reg(®_base->command, + (SPCOM_CS(spi->chip_select) | SPCOM_TRANLEN(t->len - 1))); + + ret = fsl_espi_cpu_bufs(mpc8xxx_spi, t, len); + if (ret) + return ret; + + wait_for_completion(&mpc8xxx_spi->done); + + /* disable rx ints */ + mpc8xxx_spi_write_reg(®_base->mask, 0); + + return mpc8xxx_spi->count; +} + +static inline void fsl_espi_addr2cmd(unsigned int addr, u8 *cmd) +{ + if (cmd) { + cmd[1] = (u8)(addr >> 16); + cmd[2] = (u8)(addr >> 8); + cmd[3] = (u8)(addr >> 0); + } +} + +static inline unsigned int fsl_espi_cmd2addr(u8 *cmd) +{ + if (cmd) + return cmd[1] << 16 | cmd[2] << 8 | cmd[3] << 0; + + return 0; +} + +static void fsl_espi_do_trans(struct spi_message *m, + struct fsl_espi_transfer *tr) +{ + struct spi_device *spi = m->spi; + struct mpc8xxx_spi *mspi = spi_master_get_devdata(spi->master); + struct fsl_espi_transfer *espi_trans = tr; + struct spi_message message; + struct spi_transfer *t, *first, trans; + int status = 0; + + spi_message_init(&message); + memset(&trans, 0, sizeof(trans)); + + first = list_first_entry(&m->transfers, struct spi_transfer, + transfer_list); + list_for_each_entry(t, &m->transfers, transfer_list) { + if ((first->bits_per_word != t->bits_per_word) || + (first->speed_hz != t->speed_hz)) { + espi_trans->status = -EINVAL; + dev_err(mspi->dev, "bits_per_word/speed_hz should be" + " same for the same SPI transfer\n"); + return; + } + + trans.speed_hz = t->speed_hz; + trans.bits_per_word = t->bits_per_word; + trans.delay_usecs = max(first->delay_usecs, t->delay_usecs); + } + + trans.len = espi_trans->len; + trans.tx_buf = espi_trans->tx_buf; + trans.rx_buf = espi_trans->rx_buf; + spi_message_add_tail(&trans, &message); + + list_for_each_entry(t, &message.transfers, transfer_list) { + if (t->bits_per_word || t->speed_hz) { + status = -EINVAL; + + status = fsl_espi_setup_transfer(spi, t); + if (status < 0) + break; + } + + if (t->len) + status = fsl_espi_bufs(spi, t); + + if (status) { + status = -EMSGSIZE; + break; + } + + if (t->delay_usecs) + udelay(t->delay_usecs); + } + + espi_trans->status = status; + fsl_espi_setup_transfer(spi, NULL); +} + +static void fsl_espi_cmd_trans(struct spi_message *m, + struct fsl_espi_transfer *trans, u8 *rx_buff) +{ + struct spi_transfer *t; + u8 *local_buf; + int i = 0; + struct fsl_espi_transfer *espi_trans = trans; + + local_buf = kzalloc(SPCOM_TRANLEN_MAX, GFP_KERNEL); + if (!local_buf) { + espi_trans->status = -ENOMEM; + return; + } + + list_for_each_entry(t, &m->transfers, transfer_list) { + if (t->tx_buf) { + memcpy(local_buf + i, t->tx_buf, t->len); + i += t->len; + } + } + + espi_trans->tx_buf = local_buf; + espi_trans->rx_buf = local_buf + espi_trans->n_tx; + fsl_espi_do_trans(m, espi_trans); + + espi_trans->actual_length = espi_trans->len; + kfree(local_buf); +} + +static void fsl_espi_rw_trans(struct spi_message *m, + struct fsl_espi_transfer *trans, u8 *rx_buff) +{ + struct fsl_espi_transfer *espi_trans = trans; + unsigned int n_tx = espi_trans->n_tx; + unsigned int n_rx = espi_trans->n_rx; + struct spi_transfer *t; + u8 *local_buf; + u8 *rx_buf = rx_buff; + unsigned int trans_len; + unsigned int addr; + int i, pos, loop; + + local_buf = kzalloc(SPCOM_TRANLEN_MAX, GFP_KERNEL); + if (!local_buf) { + espi_trans->status = -ENOMEM; + return; + } + + for (pos = 0, loop = 0; pos < n_rx; pos += trans_len, loop++) { + trans_len = n_rx - pos; + if (trans_len > SPCOM_TRANLEN_MAX - n_tx) + trans_len = SPCOM_TRANLEN_MAX - n_tx; + + i = 0; + list_for_each_entry(t, &m->transfers, transfer_list) { + if (t->tx_buf) { + memcpy(local_buf + i, t->tx_buf, t->len); + i += t->len; + } + } + + if (pos > 0) { + addr = fsl_espi_cmd2addr(local_buf); + addr += pos; + fsl_espi_addr2cmd(addr, local_buf); + } + + espi_trans->n_tx = n_tx; + espi_trans->n_rx = trans_len; + espi_trans->len = trans_len + n_tx; + espi_trans->tx_buf = local_buf; + espi_trans->rx_buf = local_buf + n_tx; + fsl_espi_do_trans(m, espi_trans); + + memcpy(rx_buf + pos, espi_trans->rx_buf + n_tx, trans_len); + + if (loop > 0) + espi_trans->actual_length += espi_trans->len - n_tx; + else + espi_trans->actual_length += espi_trans->len; + } + + kfree(local_buf); +} + +static void fsl_espi_do_one_msg(struct spi_message *m) +{ + struct spi_transfer *t; + u8 *rx_buf = NULL; + unsigned int n_tx = 0; + unsigned int n_rx = 0; + struct fsl_espi_transfer espi_trans; + + list_for_each_entry(t, &m->transfers, transfer_list) { + if (t->tx_buf) + n_tx += t->len; + if (t->rx_buf) { + n_rx += t->len; + rx_buf = t->rx_buf; + } + } + + espi_trans.n_tx = n_tx; + espi_trans.n_rx = n_rx; + espi_trans.len = n_tx + n_rx; + espi_trans.actual_length = 0; + espi_trans.status = 0; + + if (!rx_buf) + fsl_espi_cmd_trans(m, &espi_trans, NULL); + else + fsl_espi_rw_trans(m, &espi_trans, rx_buf); + + m->actual_length = espi_trans.actual_length; + m->status = espi_trans.status; + m->complete(m->context); +} + +static int fsl_espi_setup(struct spi_device *spi) +{ + struct mpc8xxx_spi *mpc8xxx_spi; + struct fsl_espi_reg *reg_base; + int retval; + u32 hw_mode; + u32 loop_mode; + struct spi_mpc8xxx_cs *cs = spi->controller_state; + + if (!spi->max_speed_hz) + return -EINVAL; + + if (!cs) { + cs = kzalloc(sizeof *cs, GFP_KERNEL); + if (!cs) + return -ENOMEM; + spi->controller_state = cs; + } + + mpc8xxx_spi = spi_master_get_devdata(spi->master); + reg_base = mpc8xxx_spi->reg_base; + + hw_mode = cs->hw_mode; /* Save original settings */ + cs->hw_mode = mpc8xxx_spi_read_reg( + ®_base->csmode[spi->chip_select]); + /* mask out bits we are going to set */ + cs->hw_mode &= ~(CSMODE_CP_BEGIN_EDGECLK | CSMODE_CI_INACTIVEHIGH + | CSMODE_REV); + + if (spi->mode & SPI_CPHA) + cs->hw_mode |= CSMODE_CP_BEGIN_EDGECLK; + if (spi->mode & SPI_CPOL) + cs->hw_mode |= CSMODE_CI_INACTIVEHIGH; + if (!(spi->mode & SPI_LSB_FIRST)) + cs->hw_mode |= CSMODE_REV; + + /* Handle the loop mode */ + loop_mode = mpc8xxx_spi_read_reg(®_base->mode); + loop_mode &= ~SPMODE_LOOP; + if (spi->mode & SPI_LOOP) + loop_mode |= SPMODE_LOOP; + mpc8xxx_spi_write_reg(®_base->mode, loop_mode); + + retval = fsl_espi_setup_transfer(spi, NULL); + if (retval < 0) { + cs->hw_mode = hw_mode; /* Restore settings */ + return retval; + } + return 0; +} + +void fsl_espi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events) +{ + struct fsl_espi_reg *reg_base = mspi->reg_base; + + /* We need handle RX first */ + if (events & SPIE_NE) { + u32 rx_data, tmp; + u8 rx_data_8; + + /* Spin until RX is done */ + while (SPIE_RXCNT(events) < min(4, mspi->len)) { + cpu_relax(); + events = mpc8xxx_spi_read_reg(®_base->event); + } + + if (mspi->len >= 4) { + rx_data = mpc8xxx_spi_read_reg(®_base->receive); + } else { + tmp = mspi->len; + rx_data = 0; + while (tmp--) { + rx_data_8 = in_8((u8 *)®_base->receive); + rx_data |= (rx_data_8 << (tmp * 8)); + } + + rx_data <<= (4 - mspi->len) * 8; + } + + mspi->len -= 4; + + if (mspi->rx) + mspi->get_rx(rx_data, mspi); + } + + if (!(events & SPIE_NF)) { + int ret; + + /* spin until TX is done */ + ret = spin_event_timeout(((events = mpc8xxx_spi_read_reg( + ®_base->event)) & SPIE_NF) == 0, 1000, 0); + if (!ret) { + dev_err(mspi->dev, "tired waiting for SPIE_NF\n"); + return; + } + } + + /* Clear the events */ + mpc8xxx_spi_write_reg(®_base->event, events); + + mspi->count -= 1; + if (mspi->count) { + u32 word = mspi->get_tx(mspi); + + mpc8xxx_spi_write_reg(®_base->transmit, word); + } else { + complete(&mspi->done); + } +} + +static irqreturn_t fsl_espi_irq(s32 irq, void *context_data) +{ + struct mpc8xxx_spi *mspi = context_data; + struct fsl_espi_reg *reg_base = mspi->reg_base; + irqreturn_t ret = IRQ_NONE; + u32 events; + + /* Get interrupt events(tx/rx) */ + events = mpc8xxx_spi_read_reg(®_base->event); + if (events) + ret = IRQ_HANDLED; + + dev_vdbg(mspi->dev, "%s: events %x\n", __func__, events); + + fsl_espi_cpu_irq(mspi, events); + + return ret; +} + +static void fsl_espi_remove(struct mpc8xxx_spi *mspi) +{ + iounmap(mspi->reg_base); +} + +static struct spi_master * __devinit fsl_espi_probe(struct device *dev, + struct resource *mem, unsigned int irq) +{ + struct fsl_spi_platform_data *pdata = dev->platform_data; + struct spi_master *master; + struct mpc8xxx_spi *mpc8xxx_spi; + struct fsl_espi_reg *reg_base; + u32 regval; + int i, ret = 0; + + master = spi_alloc_master(dev, sizeof(struct mpc8xxx_spi)); + if (!master) { + ret = -ENOMEM; + goto err; + } + + dev_set_drvdata(dev, master); + + ret = mpc8xxx_spi_probe(dev, mem, irq); + if (ret) + goto err_probe; + + master->setup = fsl_espi_setup; + + mpc8xxx_spi = spi_master_get_devdata(master); + mpc8xxx_spi->spi_do_one_msg = fsl_espi_do_one_msg; + mpc8xxx_spi->spi_remove = fsl_espi_remove; + + mpc8xxx_spi->reg_base = ioremap(mem->start, resource_size(mem)); + if (!mpc8xxx_spi->reg_base) { + ret = -ENOMEM; + goto err_probe; + } + + reg_base = mpc8xxx_spi->reg_base; + + /* Register for SPI Interrupt */ + ret = request_irq(mpc8xxx_spi->irq, fsl_espi_irq, + 0, "fsl_espi", mpc8xxx_spi); + if (ret) + goto free_irq; + + if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) { + mpc8xxx_spi->rx_shift = 16; + mpc8xxx_spi->tx_shift = 24; + } + + /* SPI controller initializations */ + mpc8xxx_spi_write_reg(®_base->mode, 0); + mpc8xxx_spi_write_reg(®_base->mask, 0); + mpc8xxx_spi_write_reg(®_base->command, 0); + mpc8xxx_spi_write_reg(®_base->event, 0xffffffff); + + /* Init eSPI CS mode register */ + for (i = 0; i < pdata->max_chipselect; i++) + mpc8xxx_spi_write_reg(®_base->csmode[i], CSMODE_INIT_VAL); + + /* Enable SPI interface */ + regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE; + + mpc8xxx_spi_write_reg(®_base->mode, regval); + + ret = spi_register_master(master); + if (ret < 0) + goto unreg_master; + + dev_info(dev, "at 0x%p (irq = %d)\n", reg_base, mpc8xxx_spi->irq); + + return master; + +unreg_master: + free_irq(mpc8xxx_spi->irq, mpc8xxx_spi); +free_irq: + iounmap(mpc8xxx_spi->reg_base); +err_probe: + spi_master_put(master); +err: + return ERR_PTR(ret); +} + +static int of_fsl_espi_get_chipselects(struct device *dev) +{ + struct device_node *np = dev->of_node; + struct fsl_spi_platform_data *pdata = dev->platform_data; + const u32 *prop; + int len; + + prop = of_get_property(np, "fsl,espi-num-chipselects", &len); + if (!prop || len < sizeof(*prop)) { + dev_err(dev, "No 'fsl,espi-num-chipselects' property\n"); + return -EINVAL; + } + + pdata->max_chipselect = *prop; + pdata->cs_control = NULL; + + return 0; +} + +static int __devinit of_fsl_espi_probe(struct platform_device *ofdev) +{ + struct device *dev = &ofdev->dev; + struct device_node *np = ofdev->dev.of_node; + struct spi_master *master; + struct resource mem; + struct resource irq; + int ret = -ENOMEM; + + ret = of_mpc8xxx_spi_probe(ofdev); + if (ret) + return ret; + + ret = of_fsl_espi_get_chipselects(dev); + if (ret) + goto err; + + ret = of_address_to_resource(np, 0, &mem); + if (ret) + goto err; + + ret = of_irq_to_resource(np, 0, &irq); + if (!ret) { + ret = -EINVAL; + goto err; + } + + master = fsl_espi_probe(dev, &mem, irq.start); + if (IS_ERR(master)) { + ret = PTR_ERR(master); + goto err; + } + + return 0; + +err: + return ret; +} + +static int __devexit of_fsl_espi_remove(struct platform_device *dev) +{ + return mpc8xxx_spi_remove(&dev->dev); +} + +static const struct of_device_id of_fsl_espi_match[] = { + { .compatible = "fsl,mpc8536-espi" }, + {} +}; +MODULE_DEVICE_TABLE(of, of_fsl_espi_match); + +static struct platform_driver fsl_espi_driver = { + .driver = { + .name = "fsl_espi", + .owner = THIS_MODULE, + .of_match_table = of_fsl_espi_match, + }, + .probe = of_fsl_espi_probe, + .remove = __devexit_p(of_fsl_espi_remove), +}; + +static int __init fsl_espi_init(void) +{ + return platform_driver_register(&fsl_espi_driver); +} +module_init(fsl_espi_init); + +static void __exit fsl_espi_exit(void) +{ + platform_driver_unregister(&fsl_espi_driver); +} +module_exit(fsl_espi_exit); + +MODULE_AUTHOR("Mingkai Hu"); +MODULE_DESCRIPTION("Enhanced Freescale SPI Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-fsl-lib.c b/drivers/spi/spi-fsl-lib.c new file mode 100644 index 0000000..2674fad --- /dev/null +++ b/drivers/spi/spi-fsl-lib.c @@ -0,0 +1,236 @@ +/* + * Freescale SPI/eSPI controller driver library. + * + * Maintainer: Kumar Gala + * + * Copyright (C) 2006 Polycom, Inc. + * + * CPM SPI and QE buffer descriptors mode support: + * Copyright (c) 2009 MontaVista Software, Inc. + * Author: Anton Vorontsov + * + * Copyright 2010 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include "spi-fsl-lib.h" + +#define MPC8XXX_SPI_RX_BUF(type) \ +void mpc8xxx_spi_rx_buf_##type(u32 data, struct mpc8xxx_spi *mpc8xxx_spi) \ +{ \ + type *rx = mpc8xxx_spi->rx; \ + *rx++ = (type)(data >> mpc8xxx_spi->rx_shift); \ + mpc8xxx_spi->rx = rx; \ +} + +#define MPC8XXX_SPI_TX_BUF(type) \ +u32 mpc8xxx_spi_tx_buf_##type(struct mpc8xxx_spi *mpc8xxx_spi) \ +{ \ + u32 data; \ + const type *tx = mpc8xxx_spi->tx; \ + if (!tx) \ + return 0; \ + data = *tx++ << mpc8xxx_spi->tx_shift; \ + mpc8xxx_spi->tx = tx; \ + return data; \ +} + +MPC8XXX_SPI_RX_BUF(u8) +MPC8XXX_SPI_RX_BUF(u16) +MPC8XXX_SPI_RX_BUF(u32) +MPC8XXX_SPI_TX_BUF(u8) +MPC8XXX_SPI_TX_BUF(u16) +MPC8XXX_SPI_TX_BUF(u32) + +struct mpc8xxx_spi_probe_info *to_of_pinfo(struct fsl_spi_platform_data *pdata) +{ + return container_of(pdata, struct mpc8xxx_spi_probe_info, pdata); +} + +void mpc8xxx_spi_work(struct work_struct *work) +{ + struct mpc8xxx_spi *mpc8xxx_spi = container_of(work, struct mpc8xxx_spi, + work); + + spin_lock_irq(&mpc8xxx_spi->lock); + while (!list_empty(&mpc8xxx_spi->queue)) { + struct spi_message *m = container_of(mpc8xxx_spi->queue.next, + struct spi_message, queue); + + list_del_init(&m->queue); + spin_unlock_irq(&mpc8xxx_spi->lock); + + if (mpc8xxx_spi->spi_do_one_msg) + mpc8xxx_spi->spi_do_one_msg(m); + + spin_lock_irq(&mpc8xxx_spi->lock); + } + spin_unlock_irq(&mpc8xxx_spi->lock); +} + +int mpc8xxx_spi_transfer(struct spi_device *spi, + struct spi_message *m) +{ + struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); + unsigned long flags; + + m->actual_length = 0; + m->status = -EINPROGRESS; + + spin_lock_irqsave(&mpc8xxx_spi->lock, flags); + list_add_tail(&m->queue, &mpc8xxx_spi->queue); + queue_work(mpc8xxx_spi->workqueue, &mpc8xxx_spi->work); + spin_unlock_irqrestore(&mpc8xxx_spi->lock, flags); + + return 0; +} + +void mpc8xxx_spi_cleanup(struct spi_device *spi) +{ + kfree(spi->controller_state); +} + +const char *mpc8xxx_spi_strmode(unsigned int flags) +{ + if (flags & SPI_QE_CPU_MODE) { + return "QE CPU"; + } else if (flags & SPI_CPM_MODE) { + if (flags & SPI_QE) + return "QE"; + else if (flags & SPI_CPM2) + return "CPM2"; + else + return "CPM1"; + } + return "CPU"; +} + +int mpc8xxx_spi_probe(struct device *dev, struct resource *mem, + unsigned int irq) +{ + struct fsl_spi_platform_data *pdata = dev->platform_data; + struct spi_master *master; + struct mpc8xxx_spi *mpc8xxx_spi; + int ret = 0; + + master = dev_get_drvdata(dev); + + /* the spi->mode bits understood by this driver: */ + master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH + | SPI_LSB_FIRST | SPI_LOOP; + + master->transfer = mpc8xxx_spi_transfer; + master->cleanup = mpc8xxx_spi_cleanup; + master->dev.of_node = dev->of_node; + + mpc8xxx_spi = spi_master_get_devdata(master); + mpc8xxx_spi->dev = dev; + mpc8xxx_spi->get_rx = mpc8xxx_spi_rx_buf_u8; + mpc8xxx_spi->get_tx = mpc8xxx_spi_tx_buf_u8; + mpc8xxx_spi->flags = pdata->flags; + mpc8xxx_spi->spibrg = pdata->sysclk; + mpc8xxx_spi->irq = irq; + + mpc8xxx_spi->rx_shift = 0; + mpc8xxx_spi->tx_shift = 0; + + init_completion(&mpc8xxx_spi->done); + + master->bus_num = pdata->bus_num; + master->num_chipselect = pdata->max_chipselect; + + spin_lock_init(&mpc8xxx_spi->lock); + init_completion(&mpc8xxx_spi->done); + INIT_WORK(&mpc8xxx_spi->work, mpc8xxx_spi_work); + INIT_LIST_HEAD(&mpc8xxx_spi->queue); + + mpc8xxx_spi->workqueue = create_singlethread_workqueue( + dev_name(master->dev.parent)); + if (mpc8xxx_spi->workqueue == NULL) { + ret = -EBUSY; + goto err; + } + + return 0; + +err: + return ret; +} + +int __devexit mpc8xxx_spi_remove(struct device *dev) +{ + struct mpc8xxx_spi *mpc8xxx_spi; + struct spi_master *master; + + master = dev_get_drvdata(dev); + mpc8xxx_spi = spi_master_get_devdata(master); + + flush_workqueue(mpc8xxx_spi->workqueue); + destroy_workqueue(mpc8xxx_spi->workqueue); + spi_unregister_master(master); + + free_irq(mpc8xxx_spi->irq, mpc8xxx_spi); + + if (mpc8xxx_spi->spi_remove) + mpc8xxx_spi->spi_remove(mpc8xxx_spi); + + return 0; +} + +int __devinit of_mpc8xxx_spi_probe(struct platform_device *ofdev) +{ + struct device *dev = &ofdev->dev; + struct device_node *np = ofdev->dev.of_node; + struct mpc8xxx_spi_probe_info *pinfo; + struct fsl_spi_platform_data *pdata; + const void *prop; + int ret = -ENOMEM; + + pinfo = kzalloc(sizeof(*pinfo), GFP_KERNEL); + if (!pinfo) + return -ENOMEM; + + pdata = &pinfo->pdata; + dev->platform_data = pdata; + + /* Allocate bus num dynamically. */ + pdata->bus_num = -1; + + /* SPI controller is either clocked from QE or SoC clock. */ + pdata->sysclk = get_brgfreq(); + if (pdata->sysclk == -1) { + pdata->sysclk = fsl_get_sys_freq(); + if (pdata->sysclk == -1) { + ret = -ENODEV; + goto err; + } + } + + prop = of_get_property(np, "mode", NULL); + if (prop && !strcmp(prop, "cpu-qe")) + pdata->flags = SPI_QE_CPU_MODE; + else if (prop && !strcmp(prop, "qe")) + pdata->flags = SPI_CPM_MODE | SPI_QE; + else if (of_device_is_compatible(np, "fsl,cpm2-spi")) + pdata->flags = SPI_CPM_MODE | SPI_CPM2; + else if (of_device_is_compatible(np, "fsl,cpm1-spi")) + pdata->flags = SPI_CPM_MODE | SPI_CPM1; + + return 0; + +err: + kfree(pinfo); + return ret; +} diff --git a/drivers/spi/spi-fsl-lib.h b/drivers/spi/spi-fsl-lib.h new file mode 100644 index 0000000..cbe881b --- /dev/null +++ b/drivers/spi/spi-fsl-lib.h @@ -0,0 +1,123 @@ +/* + * Freescale SPI/eSPI controller driver library. + * + * Maintainer: Kumar Gala + * + * Copyright 2010 Freescale Semiconductor, Inc. + * Copyright (C) 2006 Polycom, Inc. + * + * CPM SPI and QE buffer descriptors mode support: + * Copyright (c) 2009 MontaVista Software, Inc. + * Author: Anton Vorontsov + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#ifndef __SPI_FSL_LIB_H__ +#define __SPI_FSL_LIB_H__ + +#include + +/* SPI/eSPI Controller driver's private data. */ +struct mpc8xxx_spi { + struct device *dev; + void *reg_base; + + /* rx & tx bufs from the spi_transfer */ + const void *tx; + void *rx; +#ifdef CONFIG_SPI_FSL_ESPI + int len; +#endif + + int subblock; + struct spi_pram __iomem *pram; + struct cpm_buf_desc __iomem *tx_bd; + struct cpm_buf_desc __iomem *rx_bd; + + struct spi_transfer *xfer_in_progress; + + /* dma addresses for CPM transfers */ + dma_addr_t tx_dma; + dma_addr_t rx_dma; + bool map_tx_dma; + bool map_rx_dma; + + dma_addr_t dma_dummy_tx; + dma_addr_t dma_dummy_rx; + + /* functions to deal with different sized buffers */ + void (*get_rx) (u32 rx_data, struct mpc8xxx_spi *); + u32(*get_tx) (struct mpc8xxx_spi *); + + /* hooks for different controller driver */ + void (*spi_do_one_msg) (struct spi_message *m); + void (*spi_remove) (struct mpc8xxx_spi *mspi); + + unsigned int count; + unsigned int irq; + + unsigned nsecs; /* (clock cycle time)/2 */ + + u32 spibrg; /* SPIBRG input clock */ + u32 rx_shift; /* RX data reg shift when in qe mode */ + u32 tx_shift; /* TX data reg shift when in qe mode */ + + unsigned int flags; + + struct workqueue_struct *workqueue; + struct work_struct work; + + struct list_head queue; + spinlock_t lock; + + struct completion done; +}; + +struct spi_mpc8xxx_cs { + /* functions to deal with different sized buffers */ + void (*get_rx) (u32 rx_data, struct mpc8xxx_spi *); + u32 (*get_tx) (struct mpc8xxx_spi *); + u32 rx_shift; /* RX data reg shift when in qe mode */ + u32 tx_shift; /* TX data reg shift when in qe mode */ + u32 hw_mode; /* Holds HW mode register settings */ +}; + +static inline void mpc8xxx_spi_write_reg(__be32 __iomem *reg, u32 val) +{ + out_be32(reg, val); +} + +static inline u32 mpc8xxx_spi_read_reg(__be32 __iomem *reg) +{ + return in_be32(reg); +} + +struct mpc8xxx_spi_probe_info { + struct fsl_spi_platform_data pdata; + int *gpios; + bool *alow_flags; +}; + +extern u32 mpc8xxx_spi_tx_buf_u8(struct mpc8xxx_spi *mpc8xxx_spi); +extern u32 mpc8xxx_spi_tx_buf_u16(struct mpc8xxx_spi *mpc8xxx_spi); +extern u32 mpc8xxx_spi_tx_buf_u32(struct mpc8xxx_spi *mpc8xxx_spi); +extern void mpc8xxx_spi_rx_buf_u8(u32 data, struct mpc8xxx_spi *mpc8xxx_spi); +extern void mpc8xxx_spi_rx_buf_u16(u32 data, struct mpc8xxx_spi *mpc8xxx_spi); +extern void mpc8xxx_spi_rx_buf_u32(u32 data, struct mpc8xxx_spi *mpc8xxx_spi); + +extern struct mpc8xxx_spi_probe_info *to_of_pinfo( + struct fsl_spi_platform_data *pdata); +extern int mpc8xxx_spi_bufs(struct mpc8xxx_spi *mspi, + struct spi_transfer *t, unsigned int len); +extern int mpc8xxx_spi_transfer(struct spi_device *spi, struct spi_message *m); +extern void mpc8xxx_spi_cleanup(struct spi_device *spi); +extern const char *mpc8xxx_spi_strmode(unsigned int flags); +extern int mpc8xxx_spi_probe(struct device *dev, struct resource *mem, + unsigned int irq); +extern int mpc8xxx_spi_remove(struct device *dev); +extern int of_mpc8xxx_spi_probe(struct platform_device *ofdev); + +#endif /* __SPI_FSL_LIB_H__ */ diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c new file mode 100644 index 0000000..e013117 --- /dev/null +++ b/drivers/spi/spi-fsl-spi.c @@ -0,0 +1,1192 @@ +/* + * Freescale SPI controller driver. + * + * Maintainer: Kumar Gala + * + * Copyright (C) 2006 Polycom, Inc. + * Copyright 2010 Freescale Semiconductor, Inc. + * + * CPM SPI and QE buffer descriptors mode support: + * Copyright (c) 2009 MontaVista Software, Inc. + * Author: Anton Vorontsov + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "spi-fsl-lib.h" + +/* CPM1 and CPM2 are mutually exclusive. */ +#ifdef CONFIG_CPM1 +#include +#define CPM_SPI_CMD mk_cr_cmd(CPM_CR_CH_SPI, 0) +#else +#include +#define CPM_SPI_CMD mk_cr_cmd(CPM_CR_SPI_PAGE, CPM_CR_SPI_SBLOCK, 0, 0) +#endif + +/* SPI Controller registers */ +struct fsl_spi_reg { + u8 res1[0x20]; + __be32 mode; + __be32 event; + __be32 mask; + __be32 command; + __be32 transmit; + __be32 receive; +}; + +/* SPI Controller mode register definitions */ +#define SPMODE_LOOP (1 << 30) +#define SPMODE_CI_INACTIVEHIGH (1 << 29) +#define SPMODE_CP_BEGIN_EDGECLK (1 << 28) +#define SPMODE_DIV16 (1 << 27) +#define SPMODE_REV (1 << 26) +#define SPMODE_MS (1 << 25) +#define SPMODE_ENABLE (1 << 24) +#define SPMODE_LEN(x) ((x) << 20) +#define SPMODE_PM(x) ((x) << 16) +#define SPMODE_OP (1 << 14) +#define SPMODE_CG(x) ((x) << 7) + +/* + * Default for SPI Mode: + * SPI MODE 0 (inactive low, phase middle, MSB, 8-bit length, slow clk + */ +#define SPMODE_INIT_VAL (SPMODE_CI_INACTIVEHIGH | SPMODE_DIV16 | SPMODE_REV | \ + SPMODE_MS | SPMODE_LEN(7) | SPMODE_PM(0xf)) + +/* SPIE register values */ +#define SPIE_NE 0x00000200 /* Not empty */ +#define SPIE_NF 0x00000100 /* Not full */ + +/* SPIM register values */ +#define SPIM_NE 0x00000200 /* Not empty */ +#define SPIM_NF 0x00000100 /* Not full */ + +#define SPIE_TXB 0x00000200 /* Last char is written to tx fifo */ +#define SPIE_RXB 0x00000100 /* Last char is written to rx buf */ + +/* SPCOM register values */ +#define SPCOM_STR (1 << 23) /* Start transmit */ + +#define SPI_PRAM_SIZE 0x100 +#define SPI_MRBLR ((unsigned int)PAGE_SIZE) + +static void *fsl_dummy_rx; +static DEFINE_MUTEX(fsl_dummy_rx_lock); +static int fsl_dummy_rx_refcnt; + +static void fsl_spi_change_mode(struct spi_device *spi) +{ + struct mpc8xxx_spi *mspi = spi_master_get_devdata(spi->master); + struct spi_mpc8xxx_cs *cs = spi->controller_state; + struct fsl_spi_reg *reg_base = mspi->reg_base; + __be32 __iomem *mode = ®_base->mode; + unsigned long flags; + + if (cs->hw_mode == mpc8xxx_spi_read_reg(mode)) + return; + + /* Turn off IRQs locally to minimize time that SPI is disabled. */ + local_irq_save(flags); + + /* Turn off SPI unit prior changing mode */ + mpc8xxx_spi_write_reg(mode, cs->hw_mode & ~SPMODE_ENABLE); + + /* When in CPM mode, we need to reinit tx and rx. */ + if (mspi->flags & SPI_CPM_MODE) { + if (mspi->flags & SPI_QE) { + qe_issue_cmd(QE_INIT_TX_RX, mspi->subblock, + QE_CR_PROTOCOL_UNSPECIFIED, 0); + } else { + cpm_command(CPM_SPI_CMD, CPM_CR_INIT_TRX); + if (mspi->flags & SPI_CPM1) { + out_be16(&mspi->pram->rbptr, + in_be16(&mspi->pram->rbase)); + out_be16(&mspi->pram->tbptr, + in_be16(&mspi->pram->tbase)); + } + } + } + mpc8xxx_spi_write_reg(mode, cs->hw_mode); + local_irq_restore(flags); +} + +static void fsl_spi_chipselect(struct spi_device *spi, int value) +{ + struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); + struct fsl_spi_platform_data *pdata = spi->dev.parent->platform_data; + bool pol = spi->mode & SPI_CS_HIGH; + struct spi_mpc8xxx_cs *cs = spi->controller_state; + + if (value == BITBANG_CS_INACTIVE) { + if (pdata->cs_control) + pdata->cs_control(spi, !pol); + } + + if (value == BITBANG_CS_ACTIVE) { + mpc8xxx_spi->rx_shift = cs->rx_shift; + mpc8xxx_spi->tx_shift = cs->tx_shift; + mpc8xxx_spi->get_rx = cs->get_rx; + mpc8xxx_spi->get_tx = cs->get_tx; + + fsl_spi_change_mode(spi); + + if (pdata->cs_control) + pdata->cs_control(spi, pol); + } +} + +static int mspi_apply_cpu_mode_quirks(struct spi_mpc8xxx_cs *cs, + struct spi_device *spi, + struct mpc8xxx_spi *mpc8xxx_spi, + int bits_per_word) +{ + cs->rx_shift = 0; + cs->tx_shift = 0; + if (bits_per_word <= 8) { + cs->get_rx = mpc8xxx_spi_rx_buf_u8; + cs->get_tx = mpc8xxx_spi_tx_buf_u8; + if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) { + cs->rx_shift = 16; + cs->tx_shift = 24; + } + } else if (bits_per_word <= 16) { + cs->get_rx = mpc8xxx_spi_rx_buf_u16; + cs->get_tx = mpc8xxx_spi_tx_buf_u16; + if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) { + cs->rx_shift = 16; + cs->tx_shift = 16; + } + } else if (bits_per_word <= 32) { + cs->get_rx = mpc8xxx_spi_rx_buf_u32; + cs->get_tx = mpc8xxx_spi_tx_buf_u32; + } else + return -EINVAL; + + if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE && + spi->mode & SPI_LSB_FIRST) { + cs->tx_shift = 0; + if (bits_per_word <= 8) + cs->rx_shift = 8; + else + cs->rx_shift = 0; + } + mpc8xxx_spi->rx_shift = cs->rx_shift; + mpc8xxx_spi->tx_shift = cs->tx_shift; + mpc8xxx_spi->get_rx = cs->get_rx; + mpc8xxx_spi->get_tx = cs->get_tx; + + return bits_per_word; +} + +static int mspi_apply_qe_mode_quirks(struct spi_mpc8xxx_cs *cs, + struct spi_device *spi, + int bits_per_word) +{ + /* QE uses Little Endian for words > 8 + * so transform all words > 8 into 8 bits + * Unfortnatly that doesn't work for LSB so + * reject these for now */ + /* Note: 32 bits word, LSB works iff + * tfcr/rfcr is set to CPMFCR_GBL */ + if (spi->mode & SPI_LSB_FIRST && + bits_per_word > 8) + return -EINVAL; + if (bits_per_word > 8) + return 8; /* pretend its 8 bits */ + return bits_per_word; +} + +static int fsl_spi_setup_transfer(struct spi_device *spi, + struct spi_transfer *t) +{ + struct mpc8xxx_spi *mpc8xxx_spi; + int bits_per_word = 0; + u8 pm; + u32 hz = 0; + struct spi_mpc8xxx_cs *cs = spi->controller_state; + + mpc8xxx_spi = spi_master_get_devdata(spi->master); + + if (t) { + bits_per_word = t->bits_per_word; + hz = t->speed_hz; + } + + /* spi_transfer level calls that work per-word */ + if (!bits_per_word) + bits_per_word = spi->bits_per_word; + + /* Make sure its a bit width we support [4..16, 32] */ + if ((bits_per_word < 4) + || ((bits_per_word > 16) && (bits_per_word != 32))) + return -EINVAL; + + if (!hz) + hz = spi->max_speed_hz; + + if (!(mpc8xxx_spi->flags & SPI_CPM_MODE)) + bits_per_word = mspi_apply_cpu_mode_quirks(cs, spi, + mpc8xxx_spi, + bits_per_word); + else if (mpc8xxx_spi->flags & SPI_QE) + bits_per_word = mspi_apply_qe_mode_quirks(cs, spi, + bits_per_word); + + if (bits_per_word < 0) + return bits_per_word; + + if (bits_per_word == 32) + bits_per_word = 0; + else + bits_per_word = bits_per_word - 1; + + /* mask out bits we are going to set */ + cs->hw_mode &= ~(SPMODE_LEN(0xF) | SPMODE_DIV16 + | SPMODE_PM(0xF)); + + cs->hw_mode |= SPMODE_LEN(bits_per_word); + + if ((mpc8xxx_spi->spibrg / hz) > 64) { + cs->hw_mode |= SPMODE_DIV16; + pm = (mpc8xxx_spi->spibrg - 1) / (hz * 64) + 1; + + WARN_ONCE(pm > 16, "%s: Requested speed is too low: %d Hz. " + "Will use %d Hz instead.\n", dev_name(&spi->dev), + hz, mpc8xxx_spi->spibrg / 1024); + if (pm > 16) + pm = 16; + } else { + pm = (mpc8xxx_spi->spibrg - 1) / (hz * 4) + 1; + } + if (pm) + pm--; + + cs->hw_mode |= SPMODE_PM(pm); + + fsl_spi_change_mode(spi); + return 0; +} + +static void fsl_spi_cpm_bufs_start(struct mpc8xxx_spi *mspi) +{ + struct cpm_buf_desc __iomem *tx_bd = mspi->tx_bd; + struct cpm_buf_desc __iomem *rx_bd = mspi->rx_bd; + unsigned int xfer_len = min(mspi->count, SPI_MRBLR); + unsigned int xfer_ofs; + struct fsl_spi_reg *reg_base = mspi->reg_base; + + xfer_ofs = mspi->xfer_in_progress->len - mspi->count; + + if (mspi->rx_dma == mspi->dma_dummy_rx) + out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma); + else + out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma + xfer_ofs); + out_be16(&rx_bd->cbd_datlen, 0); + out_be16(&rx_bd->cbd_sc, BD_SC_EMPTY | BD_SC_INTRPT | BD_SC_WRAP); + + if (mspi->tx_dma == mspi->dma_dummy_tx) + out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma); + else + out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma + xfer_ofs); + out_be16(&tx_bd->cbd_datlen, xfer_len); + out_be16(&tx_bd->cbd_sc, BD_SC_READY | BD_SC_INTRPT | BD_SC_WRAP | + BD_SC_LAST); + + /* start transfer */ + mpc8xxx_spi_write_reg(®_base->command, SPCOM_STR); +} + +static int fsl_spi_cpm_bufs(struct mpc8xxx_spi *mspi, + struct spi_transfer *t, bool is_dma_mapped) +{ + struct device *dev = mspi->dev; + struct fsl_spi_reg *reg_base = mspi->reg_base; + + if (is_dma_mapped) { + mspi->map_tx_dma = 0; + mspi->map_rx_dma = 0; + } else { + mspi->map_tx_dma = 1; + mspi->map_rx_dma = 1; + } + + if (!t->tx_buf) { + mspi->tx_dma = mspi->dma_dummy_tx; + mspi->map_tx_dma = 0; + } + + if (!t->rx_buf) { + mspi->rx_dma = mspi->dma_dummy_rx; + mspi->map_rx_dma = 0; + } + + if (mspi->map_tx_dma) { + void *nonconst_tx = (void *)mspi->tx; /* shut up gcc */ + + mspi->tx_dma = dma_map_single(dev, nonconst_tx, t->len, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, mspi->tx_dma)) { + dev_err(dev, "unable to map tx dma\n"); + return -ENOMEM; + } + } else if (t->tx_buf) { + mspi->tx_dma = t->tx_dma; + } + + if (mspi->map_rx_dma) { + mspi->rx_dma = dma_map_single(dev, mspi->rx, t->len, + DMA_FROM_DEVICE); + if (dma_mapping_error(dev, mspi->rx_dma)) { + dev_err(dev, "unable to map rx dma\n"); + goto err_rx_dma; + } + } else if (t->rx_buf) { + mspi->rx_dma = t->rx_dma; + } + + /* enable rx ints */ + mpc8xxx_spi_write_reg(®_base->mask, SPIE_RXB); + + mspi->xfer_in_progress = t; + mspi->count = t->len; + + /* start CPM transfers */ + fsl_spi_cpm_bufs_start(mspi); + + return 0; + +err_rx_dma: + if (mspi->map_tx_dma) + dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE); + return -ENOMEM; +} + +static void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi) +{ + struct device *dev = mspi->dev; + struct spi_transfer *t = mspi->xfer_in_progress; + + if (mspi->map_tx_dma) + dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE); + if (mspi->map_rx_dma) + dma_unmap_single(dev, mspi->rx_dma, t->len, DMA_FROM_DEVICE); + mspi->xfer_in_progress = NULL; +} + +static int fsl_spi_cpu_bufs(struct mpc8xxx_spi *mspi, + struct spi_transfer *t, unsigned int len) +{ + u32 word; + struct fsl_spi_reg *reg_base = mspi->reg_base; + + mspi->count = len; + + /* enable rx ints */ + mpc8xxx_spi_write_reg(®_base->mask, SPIM_NE); + + /* transmit word */ + word = mspi->get_tx(mspi); + mpc8xxx_spi_write_reg(®_base->transmit, word); + + return 0; +} + +static int fsl_spi_bufs(struct spi_device *spi, struct spi_transfer *t, + bool is_dma_mapped) +{ + struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); + struct fsl_spi_reg *reg_base; + unsigned int len = t->len; + u8 bits_per_word; + int ret; + + reg_base = mpc8xxx_spi->reg_base; + bits_per_word = spi->bits_per_word; + if (t->bits_per_word) + bits_per_word = t->bits_per_word; + + if (bits_per_word > 8) { + /* invalid length? */ + if (len & 1) + return -EINVAL; + len /= 2; + } + if (bits_per_word > 16) { + /* invalid length? */ + if (len & 1) + return -EINVAL; + len /= 2; + } + + mpc8xxx_spi->tx = t->tx_buf; + mpc8xxx_spi->rx = t->rx_buf; + + INIT_COMPLETION(mpc8xxx_spi->done); + + if (mpc8xxx_spi->flags & SPI_CPM_MODE) + ret = fsl_spi_cpm_bufs(mpc8xxx_spi, t, is_dma_mapped); + else + ret = fsl_spi_cpu_bufs(mpc8xxx_spi, t, len); + if (ret) + return ret; + + wait_for_completion(&mpc8xxx_spi->done); + + /* disable rx ints */ + mpc8xxx_spi_write_reg(®_base->mask, 0); + + if (mpc8xxx_spi->flags & SPI_CPM_MODE) + fsl_spi_cpm_bufs_complete(mpc8xxx_spi); + + return mpc8xxx_spi->count; +} + +static void fsl_spi_do_one_msg(struct spi_message *m) +{ + struct spi_device *spi = m->spi; + struct spi_transfer *t; + unsigned int cs_change; + const int nsecs = 50; + int status; + + cs_change = 1; + status = 0; + list_for_each_entry(t, &m->transfers, transfer_list) { + if (t->bits_per_word || t->speed_hz) { + /* Don't allow changes if CS is active */ + status = -EINVAL; + + if (cs_change) + status = fsl_spi_setup_transfer(spi, t); + if (status < 0) + break; + } + + if (cs_change) { + fsl_spi_chipselect(spi, BITBANG_CS_ACTIVE); + ndelay(nsecs); + } + cs_change = t->cs_change; + if (t->len) + status = fsl_spi_bufs(spi, t, m->is_dma_mapped); + if (status) { + status = -EMSGSIZE; + break; + } + m->actual_length += t->len; + + if (t->delay_usecs) + udelay(t->delay_usecs); + + if (cs_change) { + ndelay(nsecs); + fsl_spi_chipselect(spi, BITBANG_CS_INACTIVE); + ndelay(nsecs); + } + } + + m->status = status; + m->complete(m->context); + + if (status || !cs_change) { + ndelay(nsecs); + fsl_spi_chipselect(spi, BITBANG_CS_INACTIVE); + } + + fsl_spi_setup_transfer(spi, NULL); +} + +static int fsl_spi_setup(struct spi_device *spi) +{ + struct mpc8xxx_spi *mpc8xxx_spi; + struct fsl_spi_reg *reg_base; + int retval; + u32 hw_mode; + struct spi_mpc8xxx_cs *cs = spi->controller_state; + + if (!spi->max_speed_hz) + return -EINVAL; + + if (!cs) { + cs = kzalloc(sizeof *cs, GFP_KERNEL); + if (!cs) + return -ENOMEM; + spi->controller_state = cs; + } + mpc8xxx_spi = spi_master_get_devdata(spi->master); + + reg_base = mpc8xxx_spi->reg_base; + + hw_mode = cs->hw_mode; /* Save original settings */ + cs->hw_mode = mpc8xxx_spi_read_reg(®_base->mode); + /* mask out bits we are going to set */ + cs->hw_mode &= ~(SPMODE_CP_BEGIN_EDGECLK | SPMODE_CI_INACTIVEHIGH + | SPMODE_REV | SPMODE_LOOP); + + if (spi->mode & SPI_CPHA) + cs->hw_mode |= SPMODE_CP_BEGIN_EDGECLK; + if (spi->mode & SPI_CPOL) + cs->hw_mode |= SPMODE_CI_INACTIVEHIGH; + if (!(spi->mode & SPI_LSB_FIRST)) + cs->hw_mode |= SPMODE_REV; + if (spi->mode & SPI_LOOP) + cs->hw_mode |= SPMODE_LOOP; + + retval = fsl_spi_setup_transfer(spi, NULL); + if (retval < 0) { + cs->hw_mode = hw_mode; /* Restore settings */ + return retval; + } + return 0; +} + +static void fsl_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events) +{ + u16 len; + struct fsl_spi_reg *reg_base = mspi->reg_base; + + dev_dbg(mspi->dev, "%s: bd datlen %d, count %d\n", __func__, + in_be16(&mspi->rx_bd->cbd_datlen), mspi->count); + + len = in_be16(&mspi->rx_bd->cbd_datlen); + if (len > mspi->count) { + WARN_ON(1); + len = mspi->count; + } + + /* Clear the events */ + mpc8xxx_spi_write_reg(®_base->event, events); + + mspi->count -= len; + if (mspi->count) + fsl_spi_cpm_bufs_start(mspi); + else + complete(&mspi->done); +} + +static void fsl_spi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events) +{ + struct fsl_spi_reg *reg_base = mspi->reg_base; + + /* We need handle RX first */ + if (events & SPIE_NE) { + u32 rx_data = mpc8xxx_spi_read_reg(®_base->receive); + + if (mspi->rx) + mspi->get_rx(rx_data, mspi); + } + + if ((events & SPIE_NF) == 0) + /* spin until TX is done */ + while (((events = + mpc8xxx_spi_read_reg(®_base->event)) & + SPIE_NF) == 0) + cpu_relax(); + + /* Clear the events */ + mpc8xxx_spi_write_reg(®_base->event, events); + + mspi->count -= 1; + if (mspi->count) { + u32 word = mspi->get_tx(mspi); + + mpc8xxx_spi_write_reg(®_base->transmit, word); + } else { + complete(&mspi->done); + } +} + +static irqreturn_t fsl_spi_irq(s32 irq, void *context_data) +{ + struct mpc8xxx_spi *mspi = context_data; + irqreturn_t ret = IRQ_NONE; + u32 events; + struct fsl_spi_reg *reg_base = mspi->reg_base; + + /* Get interrupt events(tx/rx) */ + events = mpc8xxx_spi_read_reg(®_base->event); + if (events) + ret = IRQ_HANDLED; + + dev_dbg(mspi->dev, "%s: events %x\n", __func__, events); + + if (mspi->flags & SPI_CPM_MODE) + fsl_spi_cpm_irq(mspi, events); + else + fsl_spi_cpu_irq(mspi, events); + + return ret; +} + +static void *fsl_spi_alloc_dummy_rx(void) +{ + mutex_lock(&fsl_dummy_rx_lock); + + if (!fsl_dummy_rx) + fsl_dummy_rx = kmalloc(SPI_MRBLR, GFP_KERNEL); + if (fsl_dummy_rx) + fsl_dummy_rx_refcnt++; + + mutex_unlock(&fsl_dummy_rx_lock); + + return fsl_dummy_rx; +} + +static void fsl_spi_free_dummy_rx(void) +{ + mutex_lock(&fsl_dummy_rx_lock); + + switch (fsl_dummy_rx_refcnt) { + case 0: + WARN_ON(1); + break; + case 1: + kfree(fsl_dummy_rx); + fsl_dummy_rx = NULL; + /* fall through */ + default: + fsl_dummy_rx_refcnt--; + break; + } + + mutex_unlock(&fsl_dummy_rx_lock); +} + +static unsigned long fsl_spi_cpm_get_pram(struct mpc8xxx_spi *mspi) +{ + struct device *dev = mspi->dev; + struct device_node *np = dev->of_node; + const u32 *iprop; + int size; + unsigned long spi_base_ofs; + unsigned long pram_ofs = -ENOMEM; + + /* Can't use of_address_to_resource(), QE muram isn't at 0. */ + iprop = of_get_property(np, "reg", &size); + + /* QE with a fixed pram location? */ + if (mspi->flags & SPI_QE && iprop && size == sizeof(*iprop) * 4) + return cpm_muram_alloc_fixed(iprop[2], SPI_PRAM_SIZE); + + /* QE but with a dynamic pram location? */ + if (mspi->flags & SPI_QE) { + pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64); + qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, mspi->subblock, + QE_CR_PROTOCOL_UNSPECIFIED, pram_ofs); + return pram_ofs; + } + + /* CPM1 and CPM2 pram must be at a fixed addr. */ + if (!iprop || size != sizeof(*iprop) * 4) + return -ENOMEM; + + spi_base_ofs = cpm_muram_alloc_fixed(iprop[2], 2); + if (IS_ERR_VALUE(spi_base_ofs)) + return -ENOMEM; + + if (mspi->flags & SPI_CPM2) { + pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64); + if (!IS_ERR_VALUE(pram_ofs)) { + u16 __iomem *spi_base = cpm_muram_addr(spi_base_ofs); + + out_be16(spi_base, pram_ofs); + } + } else { + struct spi_pram __iomem *pram = cpm_muram_addr(spi_base_ofs); + u16 rpbase = in_be16(&pram->rpbase); + + /* Microcode relocation patch applied? */ + if (rpbase) + pram_ofs = rpbase; + else + return spi_base_ofs; + } + + cpm_muram_free(spi_base_ofs); + return pram_ofs; +} + +static int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi) +{ + struct device *dev = mspi->dev; + struct device_node *np = dev->of_node; + const u32 *iprop; + int size; + unsigned long pram_ofs; + unsigned long bds_ofs; + + if (!(mspi->flags & SPI_CPM_MODE)) + return 0; + + if (!fsl_spi_alloc_dummy_rx()) + return -ENOMEM; + + if (mspi->flags & SPI_QE) { + iprop = of_get_property(np, "cell-index", &size); + if (iprop && size == sizeof(*iprop)) + mspi->subblock = *iprop; + + switch (mspi->subblock) { + default: + dev_warn(dev, "cell-index unspecified, assuming SPI1"); + /* fall through */ + case 0: + mspi->subblock = QE_CR_SUBBLOCK_SPI1; + break; + case 1: + mspi->subblock = QE_CR_SUBBLOCK_SPI2; + break; + } + } + + pram_ofs = fsl_spi_cpm_get_pram(mspi); + if (IS_ERR_VALUE(pram_ofs)) { + dev_err(dev, "can't allocate spi parameter ram\n"); + goto err_pram; + } + + bds_ofs = cpm_muram_alloc(sizeof(*mspi->tx_bd) + + sizeof(*mspi->rx_bd), 8); + if (IS_ERR_VALUE(bds_ofs)) { + dev_err(dev, "can't allocate bds\n"); + goto err_bds; + } + + mspi->dma_dummy_tx = dma_map_single(dev, empty_zero_page, PAGE_SIZE, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, mspi->dma_dummy_tx)) { + dev_err(dev, "unable to map dummy tx buffer\n"); + goto err_dummy_tx; + } + + mspi->dma_dummy_rx = dma_map_single(dev, fsl_dummy_rx, SPI_MRBLR, + DMA_FROM_DEVICE); + if (dma_mapping_error(dev, mspi->dma_dummy_rx)) { + dev_err(dev, "unable to map dummy rx buffer\n"); + goto err_dummy_rx; + } + + mspi->pram = cpm_muram_addr(pram_ofs); + + mspi->tx_bd = cpm_muram_addr(bds_ofs); + mspi->rx_bd = cpm_muram_addr(bds_ofs + sizeof(*mspi->tx_bd)); + + /* Initialize parameter ram. */ + out_be16(&mspi->pram->tbase, cpm_muram_offset(mspi->tx_bd)); + out_be16(&mspi->pram->rbase, cpm_muram_offset(mspi->rx_bd)); + out_8(&mspi->pram->tfcr, CPMFCR_EB | CPMFCR_GBL); + out_8(&mspi->pram->rfcr, CPMFCR_EB | CPMFCR_GBL); + out_be16(&mspi->pram->mrblr, SPI_MRBLR); + out_be32(&mspi->pram->rstate, 0); + out_be32(&mspi->pram->rdp, 0); + out_be16(&mspi->pram->rbptr, 0); + out_be16(&mspi->pram->rbc, 0); + out_be32(&mspi->pram->rxtmp, 0); + out_be32(&mspi->pram->tstate, 0); + out_be32(&mspi->pram->tdp, 0); + out_be16(&mspi->pram->tbptr, 0); + out_be16(&mspi->pram->tbc, 0); + out_be32(&mspi->pram->txtmp, 0); + + return 0; + +err_dummy_rx: + dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE); +err_dummy_tx: + cpm_muram_free(bds_ofs); +err_bds: + cpm_muram_free(pram_ofs); +err_pram: + fsl_spi_free_dummy_rx(); + return -ENOMEM; +} + +static void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi) +{ + struct device *dev = mspi->dev; + + dma_unmap_single(dev, mspi->dma_dummy_rx, SPI_MRBLR, DMA_FROM_DEVICE); + dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE); + cpm_muram_free(cpm_muram_offset(mspi->tx_bd)); + cpm_muram_free(cpm_muram_offset(mspi->pram)); + fsl_spi_free_dummy_rx(); +} + +static void fsl_spi_remove(struct mpc8xxx_spi *mspi) +{ + iounmap(mspi->reg_base); + fsl_spi_cpm_free(mspi); +} + +static struct spi_master * __devinit fsl_spi_probe(struct device *dev, + struct resource *mem, unsigned int irq) +{ + struct fsl_spi_platform_data *pdata = dev->platform_data; + struct spi_master *master; + struct mpc8xxx_spi *mpc8xxx_spi; + struct fsl_spi_reg *reg_base; + u32 regval; + int ret = 0; + + master = spi_alloc_master(dev, sizeof(struct mpc8xxx_spi)); + if (master == NULL) { + ret = -ENOMEM; + goto err; + } + + dev_set_drvdata(dev, master); + + ret = mpc8xxx_spi_probe(dev, mem, irq); + if (ret) + goto err_probe; + + master->setup = fsl_spi_setup; + + mpc8xxx_spi = spi_master_get_devdata(master); + mpc8xxx_spi->spi_do_one_msg = fsl_spi_do_one_msg; + mpc8xxx_spi->spi_remove = fsl_spi_remove; + + + ret = fsl_spi_cpm_init(mpc8xxx_spi); + if (ret) + goto err_cpm_init; + + if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) { + mpc8xxx_spi->rx_shift = 16; + mpc8xxx_spi->tx_shift = 24; + } + + mpc8xxx_spi->reg_base = ioremap(mem->start, resource_size(mem)); + if (mpc8xxx_spi->reg_base == NULL) { + ret = -ENOMEM; + goto err_ioremap; + } + + /* Register for SPI Interrupt */ + ret = request_irq(mpc8xxx_spi->irq, fsl_spi_irq, + 0, "fsl_spi", mpc8xxx_spi); + + if (ret != 0) + goto free_irq; + + reg_base = mpc8xxx_spi->reg_base; + + /* SPI controller initializations */ + mpc8xxx_spi_write_reg(®_base->mode, 0); + mpc8xxx_spi_write_reg(®_base->mask, 0); + mpc8xxx_spi_write_reg(®_base->command, 0); + mpc8xxx_spi_write_reg(®_base->event, 0xffffffff); + + /* Enable SPI interface */ + regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE; + if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) + regval |= SPMODE_OP; + + mpc8xxx_spi_write_reg(®_base->mode, regval); + + ret = spi_register_master(master); + if (ret < 0) + goto unreg_master; + + dev_info(dev, "at 0x%p (irq = %d), %s mode\n", reg_base, + mpc8xxx_spi->irq, mpc8xxx_spi_strmode(mpc8xxx_spi->flags)); + + return master; + +unreg_master: + free_irq(mpc8xxx_spi->irq, mpc8xxx_spi); +free_irq: + iounmap(mpc8xxx_spi->reg_base); +err_ioremap: + fsl_spi_cpm_free(mpc8xxx_spi); +err_cpm_init: +err_probe: + spi_master_put(master); +err: + return ERR_PTR(ret); +} + +static void fsl_spi_cs_control(struct spi_device *spi, bool on) +{ + struct device *dev = spi->dev.parent; + struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(dev->platform_data); + u16 cs = spi->chip_select; + int gpio = pinfo->gpios[cs]; + bool alow = pinfo->alow_flags[cs]; + + gpio_set_value(gpio, on ^ alow); +} + +static int of_fsl_spi_get_chipselects(struct device *dev) +{ + struct device_node *np = dev->of_node; + struct fsl_spi_platform_data *pdata = dev->platform_data; + struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata); + unsigned int ngpios; + int i = 0; + int ret; + + ngpios = of_gpio_count(np); + if (!ngpios) { + /* + * SPI w/o chip-select line. One SPI device is still permitted + * though. + */ + pdata->max_chipselect = 1; + return 0; + } + + pinfo->gpios = kmalloc(ngpios * sizeof(*pinfo->gpios), GFP_KERNEL); + if (!pinfo->gpios) + return -ENOMEM; + memset(pinfo->gpios, -1, ngpios * sizeof(*pinfo->gpios)); + + pinfo->alow_flags = kzalloc(ngpios * sizeof(*pinfo->alow_flags), + GFP_KERNEL); + if (!pinfo->alow_flags) { + ret = -ENOMEM; + goto err_alloc_flags; + } + + for (; i < ngpios; i++) { + int gpio; + enum of_gpio_flags flags; + + gpio = of_get_gpio_flags(np, i, &flags); + if (!gpio_is_valid(gpio)) { + dev_err(dev, "invalid gpio #%d: %d\n", i, gpio); + ret = gpio; + goto err_loop; + } + + ret = gpio_request(gpio, dev_name(dev)); + if (ret) { + dev_err(dev, "can't request gpio #%d: %d\n", i, ret); + goto err_loop; + } + + pinfo->gpios[i] = gpio; + pinfo->alow_flags[i] = flags & OF_GPIO_ACTIVE_LOW; + + ret = gpio_direction_output(pinfo->gpios[i], + pinfo->alow_flags[i]); + if (ret) { + dev_err(dev, "can't set output direction for gpio " + "#%d: %d\n", i, ret); + goto err_loop; + } + } + + pdata->max_chipselect = ngpios; + pdata->cs_control = fsl_spi_cs_control; + + return 0; + +err_loop: + while (i >= 0) { + if (gpio_is_valid(pinfo->gpios[i])) + gpio_free(pinfo->gpios[i]); + i--; + } + + kfree(pinfo->alow_flags); + pinfo->alow_flags = NULL; +err_alloc_flags: + kfree(pinfo->gpios); + pinfo->gpios = NULL; + return ret; +} + +static int of_fsl_spi_free_chipselects(struct device *dev) +{ + struct fsl_spi_platform_data *pdata = dev->platform_data; + struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata); + int i; + + if (!pinfo->gpios) + return 0; + + for (i = 0; i < pdata->max_chipselect; i++) { + if (gpio_is_valid(pinfo->gpios[i])) + gpio_free(pinfo->gpios[i]); + } + + kfree(pinfo->gpios); + kfree(pinfo->alow_flags); + return 0; +} + +static int __devinit of_fsl_spi_probe(struct platform_device *ofdev) +{ + struct device *dev = &ofdev->dev; + struct device_node *np = ofdev->dev.of_node; + struct spi_master *master; + struct resource mem; + struct resource irq; + int ret = -ENOMEM; + + ret = of_mpc8xxx_spi_probe(ofdev); + if (ret) + return ret; + + ret = of_fsl_spi_get_chipselects(dev); + if (ret) + goto err; + + ret = of_address_to_resource(np, 0, &mem); + if (ret) + goto err; + + ret = of_irq_to_resource(np, 0, &irq); + if (!ret) { + ret = -EINVAL; + goto err; + } + + master = fsl_spi_probe(dev, &mem, irq.start); + if (IS_ERR(master)) { + ret = PTR_ERR(master); + goto err; + } + + return 0; + +err: + of_fsl_spi_free_chipselects(dev); + return ret; +} + +static int __devexit of_fsl_spi_remove(struct platform_device *ofdev) +{ + int ret; + + ret = mpc8xxx_spi_remove(&ofdev->dev); + if (ret) + return ret; + of_fsl_spi_free_chipselects(&ofdev->dev); + return 0; +} + +static const struct of_device_id of_fsl_spi_match[] = { + { .compatible = "fsl,spi" }, + {} +}; +MODULE_DEVICE_TABLE(of, of_fsl_spi_match); + +static struct platform_driver of_fsl_spi_driver = { + .driver = { + .name = "fsl_spi", + .owner = THIS_MODULE, + .of_match_table = of_fsl_spi_match, + }, + .probe = of_fsl_spi_probe, + .remove = __devexit_p(of_fsl_spi_remove), +}; + +#ifdef CONFIG_MPC832x_RDB +/* + * XXX XXX XXX + * This is "legacy" platform driver, was used by the MPC8323E-RDB boards + * only. The driver should go away soon, since newer MPC8323E-RDB's device + * tree can work with OpenFirmware driver. But for now we support old trees + * as well. + */ +static int __devinit plat_mpc8xxx_spi_probe(struct platform_device *pdev) +{ + struct resource *mem; + int irq; + struct spi_master *master; + + if (!pdev->dev.platform_data) + return -EINVAL; + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mem) + return -EINVAL; + + irq = platform_get_irq(pdev, 0); + if (irq <= 0) + return -EINVAL; + + master = fsl_spi_probe(&pdev->dev, mem, irq); + if (IS_ERR(master)) + return PTR_ERR(master); + return 0; +} + +static int __devexit plat_mpc8xxx_spi_remove(struct platform_device *pdev) +{ + return mpc8xxx_spi_remove(&pdev->dev); +} + +MODULE_ALIAS("platform:mpc8xxx_spi"); +static struct platform_driver mpc8xxx_spi_driver = { + .probe = plat_mpc8xxx_spi_probe, + .remove = __devexit_p(plat_mpc8xxx_spi_remove), + .driver = { + .name = "mpc8xxx_spi", + .owner = THIS_MODULE, + }, +}; + +static bool legacy_driver_failed; + +static void __init legacy_driver_register(void) +{ + legacy_driver_failed = platform_driver_register(&mpc8xxx_spi_driver); +} + +static void __exit legacy_driver_unregister(void) +{ + if (legacy_driver_failed) + return; + platform_driver_unregister(&mpc8xxx_spi_driver); +} +#else +static void __init legacy_driver_register(void) {} +static void __exit legacy_driver_unregister(void) {} +#endif /* CONFIG_MPC832x_RDB */ + +static int __init fsl_spi_init(void) +{ + legacy_driver_register(); + return platform_driver_register(&of_fsl_spi_driver); +} +module_init(fsl_spi_init); + +static void __exit fsl_spi_exit(void) +{ + platform_driver_unregister(&of_fsl_spi_driver); + legacy_driver_unregister(); +} +module_exit(fsl_spi_exit); + +MODULE_AUTHOR("Kumar Gala"); +MODULE_DESCRIPTION("Simple Freescale SPI Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c new file mode 100644 index 0000000..0e88ab7 --- /dev/null +++ b/drivers/spi/spi-gpio.c @@ -0,0 +1,429 @@ +/* + * SPI master driver using generic bitbanged GPIO + * + * Copyright (C) 2006,2008 David Brownell + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ +#include +#include +#include +#include + +#include +#include +#include + + +/* + * This bitbanging SPI master driver should help make systems usable + * when a native hardware SPI engine is not available, perhaps because + * its driver isn't yet working or because the I/O pins it requires + * are used for other purposes. + * + * platform_device->driver_data ... points to spi_gpio + * + * spi->controller_state ... reserved for bitbang framework code + * spi->controller_data ... holds chipselect GPIO + * + * spi->master->dev.driver_data ... points to spi_gpio->bitbang + */ + +struct spi_gpio { + struct spi_bitbang bitbang; + struct spi_gpio_platform_data pdata; + struct platform_device *pdev; +}; + +/*----------------------------------------------------------------------*/ + +/* + * Because the overhead of going through four GPIO procedure calls + * per transferred bit can make performance a problem, this code + * is set up so that you can use it in either of two ways: + * + * - The slow generic way: set up platform_data to hold the GPIO + * numbers used for MISO/MOSI/SCK, and issue procedure calls for + * each of them. This driver can handle several such busses. + * + * - The quicker inlined way: only helps with platform GPIO code + * that inlines operations for constant GPIOs. This can give + * you tight (fast!) inner loops, but each such bus needs a + * new driver. You'll define a new C file, with Makefile and + * Kconfig support; the C code can be a total of six lines: + * + * #define DRIVER_NAME "myboard_spi2" + * #define SPI_MISO_GPIO 119 + * #define SPI_MOSI_GPIO 120 + * #define SPI_SCK_GPIO 121 + * #define SPI_N_CHIPSEL 4 + * #include "spi-gpio.c" + */ + +#ifndef DRIVER_NAME +#define DRIVER_NAME "spi_gpio" + +#define GENERIC_BITBANG /* vs tight inlines */ + +/* all functions referencing these symbols must define pdata */ +#define SPI_MISO_GPIO ((pdata)->miso) +#define SPI_MOSI_GPIO ((pdata)->mosi) +#define SPI_SCK_GPIO ((pdata)->sck) + +#define SPI_N_CHIPSEL ((pdata)->num_chipselect) + +#endif + +/*----------------------------------------------------------------------*/ + +static inline const struct spi_gpio_platform_data * __pure +spi_to_pdata(const struct spi_device *spi) +{ + const struct spi_bitbang *bang; + const struct spi_gpio *spi_gpio; + + bang = spi_master_get_devdata(spi->master); + spi_gpio = container_of(bang, struct spi_gpio, bitbang); + return &spi_gpio->pdata; +} + +/* this is #defined to avoid unused-variable warnings when inlining */ +#define pdata spi_to_pdata(spi) + +static inline void setsck(const struct spi_device *spi, int is_on) +{ + gpio_set_value(SPI_SCK_GPIO, is_on); +} + +static inline void setmosi(const struct spi_device *spi, int is_on) +{ + gpio_set_value(SPI_MOSI_GPIO, is_on); +} + +static inline int getmiso(const struct spi_device *spi) +{ + return !!gpio_get_value(SPI_MISO_GPIO); +} + +#undef pdata + +/* + * NOTE: this clocks "as fast as we can". It "should" be a function of the + * requested device clock. Software overhead means we usually have trouble + * reaching even one Mbit/sec (except when we can inline bitops), so for now + * we'll just assume we never need additional per-bit slowdowns. + */ +#define spidelay(nsecs) do {} while (0) + +#include "spi-bitbang-txrx.h" + +/* + * These functions can leverage inline expansion of GPIO calls to shrink + * costs for a txrx bit, often by factors of around ten (by instruction + * count). That is particularly visible for larger word sizes, but helps + * even with default 8-bit words. + * + * REVISIT overheads calling these functions for each word also have + * significant performance costs. Having txrx_bufs() calls that inline + * the txrx_word() logic would help performance, e.g. on larger blocks + * used with flash storage or MMC/SD. There should also be ways to make + * GCC be less stupid about reloading registers inside the I/O loops, + * even without inlined GPIO calls; __attribute__((hot)) on GCC 4.3? + */ + +static u32 spi_gpio_txrx_word_mode0(struct spi_device *spi, + unsigned nsecs, u32 word, u8 bits) +{ + return bitbang_txrx_be_cpha0(spi, nsecs, 0, 0, word, bits); +} + +static u32 spi_gpio_txrx_word_mode1(struct spi_device *spi, + unsigned nsecs, u32 word, u8 bits) +{ + return bitbang_txrx_be_cpha1(spi, nsecs, 0, 0, word, bits); +} + +static u32 spi_gpio_txrx_word_mode2(struct spi_device *spi, + unsigned nsecs, u32 word, u8 bits) +{ + return bitbang_txrx_be_cpha0(spi, nsecs, 1, 0, word, bits); +} + +static u32 spi_gpio_txrx_word_mode3(struct spi_device *spi, + unsigned nsecs, u32 word, u8 bits) +{ + return bitbang_txrx_be_cpha1(spi, nsecs, 1, 0, word, bits); +} + +/* + * These functions do not call setmosi or getmiso if respective flag + * (SPI_MASTER_NO_RX or SPI_MASTER_NO_TX) is set, so they are safe to + * call when such pin is not present or defined in the controller. + * A separate set of callbacks is defined to get highest possible + * speed in the generic case (when both MISO and MOSI lines are + * available), as optimiser will remove the checks when argument is + * constant. + */ + +static u32 spi_gpio_spec_txrx_word_mode0(struct spi_device *spi, + unsigned nsecs, u32 word, u8 bits) +{ + unsigned flags = spi->master->flags; + return bitbang_txrx_be_cpha0(spi, nsecs, 0, flags, word, bits); +} + +static u32 spi_gpio_spec_txrx_word_mode1(struct spi_device *spi, + unsigned nsecs, u32 word, u8 bits) +{ + unsigned flags = spi->master->flags; + return bitbang_txrx_be_cpha1(spi, nsecs, 0, flags, word, bits); +} + +static u32 spi_gpio_spec_txrx_word_mode2(struct spi_device *spi, + unsigned nsecs, u32 word, u8 bits) +{ + unsigned flags = spi->master->flags; + return bitbang_txrx_be_cpha0(spi, nsecs, 1, flags, word, bits); +} + +static u32 spi_gpio_spec_txrx_word_mode3(struct spi_device *spi, + unsigned nsecs, u32 word, u8 bits) +{ + unsigned flags = spi->master->flags; + return bitbang_txrx_be_cpha1(spi, nsecs, 1, flags, word, bits); +} + +/*----------------------------------------------------------------------*/ + +static void spi_gpio_chipselect(struct spi_device *spi, int is_active) +{ + unsigned long cs = (unsigned long) spi->controller_data; + + /* set initial clock polarity */ + if (is_active) + setsck(spi, spi->mode & SPI_CPOL); + + if (cs != SPI_GPIO_NO_CHIPSELECT) { + /* SPI is normally active-low */ + gpio_set_value(cs, (spi->mode & SPI_CS_HIGH) ? is_active : !is_active); + } +} + +static int spi_gpio_setup(struct spi_device *spi) +{ + unsigned long cs = (unsigned long) spi->controller_data; + int status = 0; + + if (spi->bits_per_word > 32) + return -EINVAL; + + if (!spi->controller_state) { + if (cs != SPI_GPIO_NO_CHIPSELECT) { + status = gpio_request(cs, dev_name(&spi->dev)); + if (status) + return status; + status = gpio_direction_output(cs, spi->mode & SPI_CS_HIGH); + } + } + if (!status) + status = spi_bitbang_setup(spi); + if (status) { + if (!spi->controller_state && cs != SPI_GPIO_NO_CHIPSELECT) + gpio_free(cs); + } + return status; +} + +static void spi_gpio_cleanup(struct spi_device *spi) +{ + unsigned long cs = (unsigned long) spi->controller_data; + + if (cs != SPI_GPIO_NO_CHIPSELECT) + gpio_free(cs); + spi_bitbang_cleanup(spi); +} + +static int __init spi_gpio_alloc(unsigned pin, const char *label, bool is_in) +{ + int value; + + value = gpio_request(pin, label); + if (value == 0) { + if (is_in) + value = gpio_direction_input(pin); + else + value = gpio_direction_output(pin, 0); + } + return value; +} + +static int __init +spi_gpio_request(struct spi_gpio_platform_data *pdata, const char *label, + u16 *res_flags) +{ + int value; + + /* NOTE: SPI_*_GPIO symbols may reference "pdata" */ + + if (SPI_MOSI_GPIO != SPI_GPIO_NO_MOSI) { + value = spi_gpio_alloc(SPI_MOSI_GPIO, label, false); + if (value) + goto done; + } else { + /* HW configuration without MOSI pin */ + *res_flags |= SPI_MASTER_NO_TX; + } + + if (SPI_MISO_GPIO != SPI_GPIO_NO_MISO) { + value = spi_gpio_alloc(SPI_MISO_GPIO, label, true); + if (value) + goto free_mosi; + } else { + /* HW configuration without MISO pin */ + *res_flags |= SPI_MASTER_NO_RX; + } + + value = spi_gpio_alloc(SPI_SCK_GPIO, label, false); + if (value) + goto free_miso; + + goto done; + +free_miso: + if (SPI_MISO_GPIO != SPI_GPIO_NO_MISO) + gpio_free(SPI_MISO_GPIO); +free_mosi: + if (SPI_MOSI_GPIO != SPI_GPIO_NO_MOSI) + gpio_free(SPI_MOSI_GPIO); +done: + return value; +} + +static int __init spi_gpio_probe(struct platform_device *pdev) +{ + int status; + struct spi_master *master; + struct spi_gpio *spi_gpio; + struct spi_gpio_platform_data *pdata; + u16 master_flags = 0; + + pdata = pdev->dev.platform_data; +#ifdef GENERIC_BITBANG + if (!pdata || !pdata->num_chipselect) + return -ENODEV; +#endif + + status = spi_gpio_request(pdata, dev_name(&pdev->dev), &master_flags); + if (status < 0) + return status; + + master = spi_alloc_master(&pdev->dev, sizeof *spi_gpio); + if (!master) { + status = -ENOMEM; + goto gpio_free; + } + spi_gpio = spi_master_get_devdata(master); + platform_set_drvdata(pdev, spi_gpio); + + spi_gpio->pdev = pdev; + if (pdata) + spi_gpio->pdata = *pdata; + + master->flags = master_flags; + master->bus_num = pdev->id; + master->num_chipselect = SPI_N_CHIPSEL; + master->setup = spi_gpio_setup; + master->cleanup = spi_gpio_cleanup; + + spi_gpio->bitbang.master = spi_master_get(master); + spi_gpio->bitbang.chipselect = spi_gpio_chipselect; + + if ((master_flags & (SPI_MASTER_NO_TX | SPI_MASTER_NO_RX)) == 0) { + spi_gpio->bitbang.txrx_word[SPI_MODE_0] = spi_gpio_txrx_word_mode0; + spi_gpio->bitbang.txrx_word[SPI_MODE_1] = spi_gpio_txrx_word_mode1; + spi_gpio->bitbang.txrx_word[SPI_MODE_2] = spi_gpio_txrx_word_mode2; + spi_gpio->bitbang.txrx_word[SPI_MODE_3] = spi_gpio_txrx_word_mode3; + } else { + spi_gpio->bitbang.txrx_word[SPI_MODE_0] = spi_gpio_spec_txrx_word_mode0; + spi_gpio->bitbang.txrx_word[SPI_MODE_1] = spi_gpio_spec_txrx_word_mode1; + spi_gpio->bitbang.txrx_word[SPI_MODE_2] = spi_gpio_spec_txrx_word_mode2; + spi_gpio->bitbang.txrx_word[SPI_MODE_3] = spi_gpio_spec_txrx_word_mode3; + } + spi_gpio->bitbang.setup_transfer = spi_bitbang_setup_transfer; + spi_gpio->bitbang.flags = SPI_CS_HIGH; + + status = spi_bitbang_start(&spi_gpio->bitbang); + if (status < 0) { + spi_master_put(spi_gpio->bitbang.master); +gpio_free: + if (SPI_MISO_GPIO != SPI_GPIO_NO_MISO) + gpio_free(SPI_MISO_GPIO); + if (SPI_MOSI_GPIO != SPI_GPIO_NO_MOSI) + gpio_free(SPI_MOSI_GPIO); + gpio_free(SPI_SCK_GPIO); + spi_master_put(master); + } + + return status; +} + +static int __exit spi_gpio_remove(struct platform_device *pdev) +{ + struct spi_gpio *spi_gpio; + struct spi_gpio_platform_data *pdata; + int status; + + spi_gpio = platform_get_drvdata(pdev); + pdata = pdev->dev.platform_data; + + /* stop() unregisters child devices too */ + status = spi_bitbang_stop(&spi_gpio->bitbang); + spi_master_put(spi_gpio->bitbang.master); + + platform_set_drvdata(pdev, NULL); + + if (SPI_MISO_GPIO != SPI_GPIO_NO_MISO) + gpio_free(SPI_MISO_GPIO); + if (SPI_MOSI_GPIO != SPI_GPIO_NO_MOSI) + gpio_free(SPI_MOSI_GPIO); + gpio_free(SPI_SCK_GPIO); + + return status; +} + +MODULE_ALIAS("platform:" DRIVER_NAME); + +static struct platform_driver spi_gpio_driver = { + .driver.name = DRIVER_NAME, + .driver.owner = THIS_MODULE, + .remove = __exit_p(spi_gpio_remove), +}; + +static int __init spi_gpio_init(void) +{ + return platform_driver_probe(&spi_gpio_driver, spi_gpio_probe); +} +module_init(spi_gpio_init); + +static void __exit spi_gpio_exit(void) +{ + platform_driver_unregister(&spi_gpio_driver); +} +module_exit(spi_gpio_exit); + + +MODULE_DESCRIPTION("SPI master driver using generic bitbanged GPIO "); +MODULE_AUTHOR("David Brownell"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c new file mode 100644 index 0000000..69d6dba --- /dev/null +++ b/drivers/spi/spi-imx.c @@ -0,0 +1,944 @@ +/* + * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved. + * Copyright (C) 2008 Juergen Beisert + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the + * Free Software Foundation + * 51 Franklin Street, Fifth Floor + * Boston, MA 02110-1301, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define DRIVER_NAME "spi_imx" + +#define MXC_CSPIRXDATA 0x00 +#define MXC_CSPITXDATA 0x04 +#define MXC_CSPICTRL 0x08 +#define MXC_CSPIINT 0x0c +#define MXC_RESET 0x1c + +#define MX3_CSPISTAT 0x14 +#define MX3_CSPISTAT_RR (1 << 3) + +/* generic defines to abstract from the different register layouts */ +#define MXC_INT_RR (1 << 0) /* Receive data ready interrupt */ +#define MXC_INT_TE (1 << 1) /* Transmit FIFO empty interrupt */ + +struct spi_imx_config { + unsigned int speed_hz; + unsigned int bpw; + unsigned int mode; + u8 cs; +}; + +enum spi_imx_devtype { + SPI_IMX_VER_IMX1, + SPI_IMX_VER_0_0, + SPI_IMX_VER_0_4, + SPI_IMX_VER_0_5, + SPI_IMX_VER_0_7, + SPI_IMX_VER_2_3, +}; + +struct spi_imx_data; + +struct spi_imx_devtype_data { + void (*intctrl)(struct spi_imx_data *, int); + int (*config)(struct spi_imx_data *, struct spi_imx_config *); + void (*trigger)(struct spi_imx_data *); + int (*rx_available)(struct spi_imx_data *); + void (*reset)(struct spi_imx_data *); + unsigned int fifosize; +}; + +struct spi_imx_data { + struct spi_bitbang bitbang; + + struct completion xfer_done; + void *base; + int irq; + struct clk *clk; + unsigned long spi_clk; + int *chipselect; + + unsigned int count; + void (*tx)(struct spi_imx_data *); + void (*rx)(struct spi_imx_data *); + void *rx_buf; + const void *tx_buf; + unsigned int txfifo; /* number of words pushed in tx FIFO */ + + struct spi_imx_devtype_data devtype_data; +}; + +#define MXC_SPI_BUF_RX(type) \ +static void spi_imx_buf_rx_##type(struct spi_imx_data *spi_imx) \ +{ \ + unsigned int val = readl(spi_imx->base + MXC_CSPIRXDATA); \ + \ + if (spi_imx->rx_buf) { \ + *(type *)spi_imx->rx_buf = val; \ + spi_imx->rx_buf += sizeof(type); \ + } \ +} + +#define MXC_SPI_BUF_TX(type) \ +static void spi_imx_buf_tx_##type(struct spi_imx_data *spi_imx) \ +{ \ + type val = 0; \ + \ + if (spi_imx->tx_buf) { \ + val = *(type *)spi_imx->tx_buf; \ + spi_imx->tx_buf += sizeof(type); \ + } \ + \ + spi_imx->count -= sizeof(type); \ + \ + writel(val, spi_imx->base + MXC_CSPITXDATA); \ +} + +MXC_SPI_BUF_RX(u8) +MXC_SPI_BUF_TX(u8) +MXC_SPI_BUF_RX(u16) +MXC_SPI_BUF_TX(u16) +MXC_SPI_BUF_RX(u32) +MXC_SPI_BUF_TX(u32) + +/* First entry is reserved, second entry is valid only if SDHC_SPIEN is set + * (which is currently not the case in this driver) + */ +static int mxc_clkdivs[] = {0, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192, + 256, 384, 512, 768, 1024}; + +/* MX21, MX27 */ +static unsigned int spi_imx_clkdiv_1(unsigned int fin, + unsigned int fspi) +{ + int i, max; + + if (cpu_is_mx21()) + max = 18; + else + max = 16; + + for (i = 2; i < max; i++) + if (fspi * mxc_clkdivs[i] >= fin) + return i; + + return max; +} + +/* MX1, MX31, MX35, MX51 CSPI */ +static unsigned int spi_imx_clkdiv_2(unsigned int fin, + unsigned int fspi) +{ + int i, div = 4; + + for (i = 0; i < 7; i++) { + if (fspi * div >= fin) + return i; + div <<= 1; + } + + return 7; +} + +#define SPI_IMX2_3_CTRL 0x08 +#define SPI_IMX2_3_CTRL_ENABLE (1 << 0) +#define SPI_IMX2_3_CTRL_XCH (1 << 2) +#define SPI_IMX2_3_CTRL_MODE_MASK (0xf << 4) +#define SPI_IMX2_3_CTRL_POSTDIV_OFFSET 8 +#define SPI_IMX2_3_CTRL_PREDIV_OFFSET 12 +#define SPI_IMX2_3_CTRL_CS(cs) ((cs) << 18) +#define SPI_IMX2_3_CTRL_BL_OFFSET 20 + +#define SPI_IMX2_3_CONFIG 0x0c +#define SPI_IMX2_3_CONFIG_SCLKPHA(cs) (1 << ((cs) + 0)) +#define SPI_IMX2_3_CONFIG_SCLKPOL(cs) (1 << ((cs) + 4)) +#define SPI_IMX2_3_CONFIG_SBBCTRL(cs) (1 << ((cs) + 8)) +#define SPI_IMX2_3_CONFIG_SSBPOL(cs) (1 << ((cs) + 12)) + +#define SPI_IMX2_3_INT 0x10 +#define SPI_IMX2_3_INT_TEEN (1 << 0) +#define SPI_IMX2_3_INT_RREN (1 << 3) + +#define SPI_IMX2_3_STAT 0x18 +#define SPI_IMX2_3_STAT_RR (1 << 3) + +/* MX51 eCSPI */ +static unsigned int spi_imx2_3_clkdiv(unsigned int fin, unsigned int fspi) +{ + /* + * there are two 4-bit dividers, the pre-divider divides by + * $pre, the post-divider by 2^$post + */ + unsigned int pre, post; + + if (unlikely(fspi > fin)) + return 0; + + post = fls(fin) - fls(fspi); + if (fin > fspi << post) + post++; + + /* now we have: (fin <= fspi << post) with post being minimal */ + + post = max(4U, post) - 4; + if (unlikely(post > 0xf)) { + pr_err("%s: cannot set clock freq: %u (base freq: %u)\n", + __func__, fspi, fin); + return 0xff; + } + + pre = DIV_ROUND_UP(fin, fspi << post) - 1; + + pr_debug("%s: fin: %u, fspi: %u, post: %u, pre: %u\n", + __func__, fin, fspi, post, pre); + return (pre << SPI_IMX2_3_CTRL_PREDIV_OFFSET) | + (post << SPI_IMX2_3_CTRL_POSTDIV_OFFSET); +} + +static void __maybe_unused spi_imx2_3_intctrl(struct spi_imx_data *spi_imx, int enable) +{ + unsigned val = 0; + + if (enable & MXC_INT_TE) + val |= SPI_IMX2_3_INT_TEEN; + + if (enable & MXC_INT_RR) + val |= SPI_IMX2_3_INT_RREN; + + writel(val, spi_imx->base + SPI_IMX2_3_INT); +} + +static void __maybe_unused spi_imx2_3_trigger(struct spi_imx_data *spi_imx) +{ + u32 reg; + + reg = readl(spi_imx->base + SPI_IMX2_3_CTRL); + reg |= SPI_IMX2_3_CTRL_XCH; + writel(reg, spi_imx->base + SPI_IMX2_3_CTRL); +} + +static int __maybe_unused spi_imx2_3_config(struct spi_imx_data *spi_imx, + struct spi_imx_config *config) +{ + u32 ctrl = SPI_IMX2_3_CTRL_ENABLE, cfg = 0; + + /* + * The hardware seems to have a race condition when changing modes. The + * current assumption is that the selection of the channel arrives + * earlier in the hardware than the mode bits when they are written at + * the same time. + * So set master mode for all channels as we do not support slave mode. + */ + ctrl |= SPI_IMX2_3_CTRL_MODE_MASK; + + /* set clock speed */ + ctrl |= spi_imx2_3_clkdiv(spi_imx->spi_clk, config->speed_hz); + + /* set chip select to use */ + ctrl |= SPI_IMX2_3_CTRL_CS(config->cs); + + ctrl |= (config->bpw - 1) << SPI_IMX2_3_CTRL_BL_OFFSET; + + cfg |= SPI_IMX2_3_CONFIG_SBBCTRL(config->cs); + + if (config->mode & SPI_CPHA) + cfg |= SPI_IMX2_3_CONFIG_SCLKPHA(config->cs); + + if (config->mode & SPI_CPOL) + cfg |= SPI_IMX2_3_CONFIG_SCLKPOL(config->cs); + + if (config->mode & SPI_CS_HIGH) + cfg |= SPI_IMX2_3_CONFIG_SSBPOL(config->cs); + + writel(ctrl, spi_imx->base + SPI_IMX2_3_CTRL); + writel(cfg, spi_imx->base + SPI_IMX2_3_CONFIG); + + return 0; +} + +static int __maybe_unused spi_imx2_3_rx_available(struct spi_imx_data *spi_imx) +{ + return readl(spi_imx->base + SPI_IMX2_3_STAT) & SPI_IMX2_3_STAT_RR; +} + +static void __maybe_unused spi_imx2_3_reset(struct spi_imx_data *spi_imx) +{ + /* drain receive buffer */ + while (spi_imx2_3_rx_available(spi_imx)) + readl(spi_imx->base + MXC_CSPIRXDATA); +} + +#define MX31_INTREG_TEEN (1 << 0) +#define MX31_INTREG_RREN (1 << 3) + +#define MX31_CSPICTRL_ENABLE (1 << 0) +#define MX31_CSPICTRL_MASTER (1 << 1) +#define MX31_CSPICTRL_XCH (1 << 2) +#define MX31_CSPICTRL_POL (1 << 4) +#define MX31_CSPICTRL_PHA (1 << 5) +#define MX31_CSPICTRL_SSCTL (1 << 6) +#define MX31_CSPICTRL_SSPOL (1 << 7) +#define MX31_CSPICTRL_BC_SHIFT 8 +#define MX35_CSPICTRL_BL_SHIFT 20 +#define MX31_CSPICTRL_CS_SHIFT 24 +#define MX35_CSPICTRL_CS_SHIFT 12 +#define MX31_CSPICTRL_DR_SHIFT 16 + +#define MX31_CSPISTATUS 0x14 +#define MX31_STATUS_RR (1 << 3) + +/* These functions also work for the i.MX35, but be aware that + * the i.MX35 has a slightly different register layout for bits + * we do not use here. + */ +static void __maybe_unused mx31_intctrl(struct spi_imx_data *spi_imx, int enable) +{ + unsigned int val = 0; + + if (enable & MXC_INT_TE) + val |= MX31_INTREG_TEEN; + if (enable & MXC_INT_RR) + val |= MX31_INTREG_RREN; + + writel(val, spi_imx->base + MXC_CSPIINT); +} + +static void __maybe_unused mx31_trigger(struct spi_imx_data *spi_imx) +{ + unsigned int reg; + + reg = readl(spi_imx->base + MXC_CSPICTRL); + reg |= MX31_CSPICTRL_XCH; + writel(reg, spi_imx->base + MXC_CSPICTRL); +} + +static int __maybe_unused spi_imx0_4_config(struct spi_imx_data *spi_imx, + struct spi_imx_config *config) +{ + unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER; + int cs = spi_imx->chipselect[config->cs]; + + reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz) << + MX31_CSPICTRL_DR_SHIFT; + + reg |= (config->bpw - 1) << MX31_CSPICTRL_BC_SHIFT; + + if (config->mode & SPI_CPHA) + reg |= MX31_CSPICTRL_PHA; + if (config->mode & SPI_CPOL) + reg |= MX31_CSPICTRL_POL; + if (config->mode & SPI_CS_HIGH) + reg |= MX31_CSPICTRL_SSPOL; + if (cs < 0) + reg |= (cs + 32) << MX31_CSPICTRL_CS_SHIFT; + + writel(reg, spi_imx->base + MXC_CSPICTRL); + + return 0; +} + +static int __maybe_unused spi_imx0_7_config(struct spi_imx_data *spi_imx, + struct spi_imx_config *config) +{ + unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER; + int cs = spi_imx->chipselect[config->cs]; + + reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz) << + MX31_CSPICTRL_DR_SHIFT; + + reg |= (config->bpw - 1) << MX35_CSPICTRL_BL_SHIFT; + reg |= MX31_CSPICTRL_SSCTL; + + if (config->mode & SPI_CPHA) + reg |= MX31_CSPICTRL_PHA; + if (config->mode & SPI_CPOL) + reg |= MX31_CSPICTRL_POL; + if (config->mode & SPI_CS_HIGH) + reg |= MX31_CSPICTRL_SSPOL; + if (cs < 0) + reg |= (cs + 32) << MX35_CSPICTRL_CS_SHIFT; + + writel(reg, spi_imx->base + MXC_CSPICTRL); + + return 0; +} + +static int __maybe_unused mx31_rx_available(struct spi_imx_data *spi_imx) +{ + return readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR; +} + +static void __maybe_unused spi_imx0_4_reset(struct spi_imx_data *spi_imx) +{ + /* drain receive buffer */ + while (readl(spi_imx->base + MX3_CSPISTAT) & MX3_CSPISTAT_RR) + readl(spi_imx->base + MXC_CSPIRXDATA); +} + +#define MX27_INTREG_RR (1 << 4) +#define MX27_INTREG_TEEN (1 << 9) +#define MX27_INTREG_RREN (1 << 13) + +#define MX27_CSPICTRL_POL (1 << 5) +#define MX27_CSPICTRL_PHA (1 << 6) +#define MX27_CSPICTRL_SSPOL (1 << 8) +#define MX27_CSPICTRL_XCH (1 << 9) +#define MX27_CSPICTRL_ENABLE (1 << 10) +#define MX27_CSPICTRL_MASTER (1 << 11) +#define MX27_CSPICTRL_DR_SHIFT 14 +#define MX27_CSPICTRL_CS_SHIFT 19 + +static void __maybe_unused mx27_intctrl(struct spi_imx_data *spi_imx, int enable) +{ + unsigned int val = 0; + + if (enable & MXC_INT_TE) + val |= MX27_INTREG_TEEN; + if (enable & MXC_INT_RR) + val |= MX27_INTREG_RREN; + + writel(val, spi_imx->base + MXC_CSPIINT); +} + +static void __maybe_unused mx27_trigger(struct spi_imx_data *spi_imx) +{ + unsigned int reg; + + reg = readl(spi_imx->base + MXC_CSPICTRL); + reg |= MX27_CSPICTRL_XCH; + writel(reg, spi_imx->base + MXC_CSPICTRL); +} + +static int __maybe_unused mx27_config(struct spi_imx_data *spi_imx, + struct spi_imx_config *config) +{ + unsigned int reg = MX27_CSPICTRL_ENABLE | MX27_CSPICTRL_MASTER; + int cs = spi_imx->chipselect[config->cs]; + + reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, config->speed_hz) << + MX27_CSPICTRL_DR_SHIFT; + reg |= config->bpw - 1; + + if (config->mode & SPI_CPHA) + reg |= MX27_CSPICTRL_PHA; + if (config->mode & SPI_CPOL) + reg |= MX27_CSPICTRL_POL; + if (config->mode & SPI_CS_HIGH) + reg |= MX27_CSPICTRL_SSPOL; + if (cs < 0) + reg |= (cs + 32) << MX27_CSPICTRL_CS_SHIFT; + + writel(reg, spi_imx->base + MXC_CSPICTRL); + + return 0; +} + +static int __maybe_unused mx27_rx_available(struct spi_imx_data *spi_imx) +{ + return readl(spi_imx->base + MXC_CSPIINT) & MX27_INTREG_RR; +} + +static void __maybe_unused spi_imx0_0_reset(struct spi_imx_data *spi_imx) +{ + writel(1, spi_imx->base + MXC_RESET); +} + +#define MX1_INTREG_RR (1 << 3) +#define MX1_INTREG_TEEN (1 << 8) +#define MX1_INTREG_RREN (1 << 11) + +#define MX1_CSPICTRL_POL (1 << 4) +#define MX1_CSPICTRL_PHA (1 << 5) +#define MX1_CSPICTRL_XCH (1 << 8) +#define MX1_CSPICTRL_ENABLE (1 << 9) +#define MX1_CSPICTRL_MASTER (1 << 10) +#define MX1_CSPICTRL_DR_SHIFT 13 + +static void __maybe_unused mx1_intctrl(struct spi_imx_data *spi_imx, int enable) +{ + unsigned int val = 0; + + if (enable & MXC_INT_TE) + val |= MX1_INTREG_TEEN; + if (enable & MXC_INT_RR) + val |= MX1_INTREG_RREN; + + writel(val, spi_imx->base + MXC_CSPIINT); +} + +static void __maybe_unused mx1_trigger(struct spi_imx_data *spi_imx) +{ + unsigned int reg; + + reg = readl(spi_imx->base + MXC_CSPICTRL); + reg |= MX1_CSPICTRL_XCH; + writel(reg, spi_imx->base + MXC_CSPICTRL); +} + +static int __maybe_unused mx1_config(struct spi_imx_data *spi_imx, + struct spi_imx_config *config) +{ + unsigned int reg = MX1_CSPICTRL_ENABLE | MX1_CSPICTRL_MASTER; + + reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz) << + MX1_CSPICTRL_DR_SHIFT; + reg |= config->bpw - 1; + + if (config->mode & SPI_CPHA) + reg |= MX1_CSPICTRL_PHA; + if (config->mode & SPI_CPOL) + reg |= MX1_CSPICTRL_POL; + + writel(reg, spi_imx->base + MXC_CSPICTRL); + + return 0; +} + +static int __maybe_unused mx1_rx_available(struct spi_imx_data *spi_imx) +{ + return readl(spi_imx->base + MXC_CSPIINT) & MX1_INTREG_RR; +} + +static void __maybe_unused mx1_reset(struct spi_imx_data *spi_imx) +{ + writel(1, spi_imx->base + MXC_RESET); +} + +/* + * These version numbers are taken from the Freescale driver. Unfortunately it + * doesn't support i.MX1, so this entry doesn't match the scheme. :-( + */ +static struct spi_imx_devtype_data spi_imx_devtype_data[] __devinitdata = { +#ifdef CONFIG_SPI_IMX_VER_IMX1 + [SPI_IMX_VER_IMX1] = { + .intctrl = mx1_intctrl, + .config = mx1_config, + .trigger = mx1_trigger, + .rx_available = mx1_rx_available, + .reset = mx1_reset, + .fifosize = 8, + }, +#endif +#ifdef CONFIG_SPI_IMX_VER_0_0 + [SPI_IMX_VER_0_0] = { + .intctrl = mx27_intctrl, + .config = mx27_config, + .trigger = mx27_trigger, + .rx_available = mx27_rx_available, + .reset = spi_imx0_0_reset, + .fifosize = 8, + }, +#endif +#ifdef CONFIG_SPI_IMX_VER_0_4 + [SPI_IMX_VER_0_4] = { + .intctrl = mx31_intctrl, + .config = spi_imx0_4_config, + .trigger = mx31_trigger, + .rx_available = mx31_rx_available, + .reset = spi_imx0_4_reset, + .fifosize = 8, + }, +#endif +#ifdef CONFIG_SPI_IMX_VER_0_7 + [SPI_IMX_VER_0_7] = { + .intctrl = mx31_intctrl, + .config = spi_imx0_7_config, + .trigger = mx31_trigger, + .rx_available = mx31_rx_available, + .reset = spi_imx0_4_reset, + .fifosize = 8, + }, +#endif +#ifdef CONFIG_SPI_IMX_VER_2_3 + [SPI_IMX_VER_2_3] = { + .intctrl = spi_imx2_3_intctrl, + .config = spi_imx2_3_config, + .trigger = spi_imx2_3_trigger, + .rx_available = spi_imx2_3_rx_available, + .reset = spi_imx2_3_reset, + .fifosize = 64, + }, +#endif +}; + +static void spi_imx_chipselect(struct spi_device *spi, int is_active) +{ + struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); + int gpio = spi_imx->chipselect[spi->chip_select]; + int active = is_active != BITBANG_CS_INACTIVE; + int dev_is_lowactive = !(spi->mode & SPI_CS_HIGH); + + if (gpio < 0) + return; + + gpio_set_value(gpio, dev_is_lowactive ^ active); +} + +static void spi_imx_push(struct spi_imx_data *spi_imx) +{ + while (spi_imx->txfifo < spi_imx->devtype_data.fifosize) { + if (!spi_imx->count) + break; + spi_imx->tx(spi_imx); + spi_imx->txfifo++; + } + + spi_imx->devtype_data.trigger(spi_imx); +} + +static irqreturn_t spi_imx_isr(int irq, void *dev_id) +{ + struct spi_imx_data *spi_imx = dev_id; + + while (spi_imx->devtype_data.rx_available(spi_imx)) { + spi_imx->rx(spi_imx); + spi_imx->txfifo--; + } + + if (spi_imx->count) { + spi_imx_push(spi_imx); + return IRQ_HANDLED; + } + + if (spi_imx->txfifo) { + /* No data left to push, but still waiting for rx data, + * enable receive data available interrupt. + */ + spi_imx->devtype_data.intctrl( + spi_imx, MXC_INT_RR); + return IRQ_HANDLED; + } + + spi_imx->devtype_data.intctrl(spi_imx, 0); + complete(&spi_imx->xfer_done); + + return IRQ_HANDLED; +} + +static int spi_imx_setupxfer(struct spi_device *spi, + struct spi_transfer *t) +{ + struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); + struct spi_imx_config config; + + config.bpw = t ? t->bits_per_word : spi->bits_per_word; + config.speed_hz = t ? t->speed_hz : spi->max_speed_hz; + config.mode = spi->mode; + config.cs = spi->chip_select; + + if (!config.speed_hz) + config.speed_hz = spi->max_speed_hz; + if (!config.bpw) + config.bpw = spi->bits_per_word; + if (!config.speed_hz) + config.speed_hz = spi->max_speed_hz; + + /* Initialize the functions for transfer */ + if (config.bpw <= 8) { + spi_imx->rx = spi_imx_buf_rx_u8; + spi_imx->tx = spi_imx_buf_tx_u8; + } else if (config.bpw <= 16) { + spi_imx->rx = spi_imx_buf_rx_u16; + spi_imx->tx = spi_imx_buf_tx_u16; + } else if (config.bpw <= 32) { + spi_imx->rx = spi_imx_buf_rx_u32; + spi_imx->tx = spi_imx_buf_tx_u32; + } else + BUG(); + + spi_imx->devtype_data.config(spi_imx, &config); + + return 0; +} + +static int spi_imx_transfer(struct spi_device *spi, + struct spi_transfer *transfer) +{ + struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); + + spi_imx->tx_buf = transfer->tx_buf; + spi_imx->rx_buf = transfer->rx_buf; + spi_imx->count = transfer->len; + spi_imx->txfifo = 0; + + init_completion(&spi_imx->xfer_done); + + spi_imx_push(spi_imx); + + spi_imx->devtype_data.intctrl(spi_imx, MXC_INT_TE); + + wait_for_completion(&spi_imx->xfer_done); + + return transfer->len; +} + +static int spi_imx_setup(struct spi_device *spi) +{ + struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); + int gpio = spi_imx->chipselect[spi->chip_select]; + + dev_dbg(&spi->dev, "%s: mode %d, %u bpw, %d hz\n", __func__, + spi->mode, spi->bits_per_word, spi->max_speed_hz); + + if (gpio >= 0) + gpio_direction_output(gpio, spi->mode & SPI_CS_HIGH ? 0 : 1); + + spi_imx_chipselect(spi, BITBANG_CS_INACTIVE); + + return 0; +} + +static void spi_imx_cleanup(struct spi_device *spi) +{ +} + +static struct platform_device_id spi_imx_devtype[] = { + { + .name = "imx1-cspi", + .driver_data = SPI_IMX_VER_IMX1, + }, { + .name = "imx21-cspi", + .driver_data = SPI_IMX_VER_0_0, + }, { + .name = "imx25-cspi", + .driver_data = SPI_IMX_VER_0_7, + }, { + .name = "imx27-cspi", + .driver_data = SPI_IMX_VER_0_0, + }, { + .name = "imx31-cspi", + .driver_data = SPI_IMX_VER_0_4, + }, { + .name = "imx35-cspi", + .driver_data = SPI_IMX_VER_0_7, + }, { + .name = "imx51-cspi", + .driver_data = SPI_IMX_VER_0_7, + }, { + .name = "imx51-ecspi", + .driver_data = SPI_IMX_VER_2_3, + }, { + .name = "imx53-cspi", + .driver_data = SPI_IMX_VER_0_7, + }, { + .name = "imx53-ecspi", + .driver_data = SPI_IMX_VER_2_3, + }, { + /* sentinel */ + } +}; + +static int __devinit spi_imx_probe(struct platform_device *pdev) +{ + struct spi_imx_master *mxc_platform_info; + struct spi_master *master; + struct spi_imx_data *spi_imx; + struct resource *res; + int i, ret; + + mxc_platform_info = dev_get_platdata(&pdev->dev); + if (!mxc_platform_info) { + dev_err(&pdev->dev, "can't get the platform data\n"); + return -EINVAL; + } + + master = spi_alloc_master(&pdev->dev, sizeof(struct spi_imx_data)); + if (!master) + return -ENOMEM; + + platform_set_drvdata(pdev, master); + + master->bus_num = pdev->id; + master->num_chipselect = mxc_platform_info->num_chipselect; + + spi_imx = spi_master_get_devdata(master); + spi_imx->bitbang.master = spi_master_get(master); + spi_imx->chipselect = mxc_platform_info->chipselect; + + for (i = 0; i < master->num_chipselect; i++) { + if (spi_imx->chipselect[i] < 0) + continue; + ret = gpio_request(spi_imx->chipselect[i], DRIVER_NAME); + if (ret) { + while (i > 0) { + i--; + if (spi_imx->chipselect[i] >= 0) + gpio_free(spi_imx->chipselect[i]); + } + dev_err(&pdev->dev, "can't get cs gpios\n"); + goto out_master_put; + } + } + + spi_imx->bitbang.chipselect = spi_imx_chipselect; + spi_imx->bitbang.setup_transfer = spi_imx_setupxfer; + spi_imx->bitbang.txrx_bufs = spi_imx_transfer; + spi_imx->bitbang.master->setup = spi_imx_setup; + spi_imx->bitbang.master->cleanup = spi_imx_cleanup; + spi_imx->bitbang.master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; + + init_completion(&spi_imx->xfer_done); + + spi_imx->devtype_data = + spi_imx_devtype_data[pdev->id_entry->driver_data]; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&pdev->dev, "can't get platform resource\n"); + ret = -ENOMEM; + goto out_gpio_free; + } + + if (!request_mem_region(res->start, resource_size(res), pdev->name)) { + dev_err(&pdev->dev, "request_mem_region failed\n"); + ret = -EBUSY; + goto out_gpio_free; + } + + spi_imx->base = ioremap(res->start, resource_size(res)); + if (!spi_imx->base) { + ret = -EINVAL; + goto out_release_mem; + } + + spi_imx->irq = platform_get_irq(pdev, 0); + if (spi_imx->irq < 0) { + ret = -EINVAL; + goto out_iounmap; + } + + ret = request_irq(spi_imx->irq, spi_imx_isr, 0, DRIVER_NAME, spi_imx); + if (ret) { + dev_err(&pdev->dev, "can't get irq%d: %d\n", spi_imx->irq, ret); + goto out_iounmap; + } + + spi_imx->clk = clk_get(&pdev->dev, NULL); + if (IS_ERR(spi_imx->clk)) { + dev_err(&pdev->dev, "unable to get clock\n"); + ret = PTR_ERR(spi_imx->clk); + goto out_free_irq; + } + + clk_enable(spi_imx->clk); + spi_imx->spi_clk = clk_get_rate(spi_imx->clk); + + spi_imx->devtype_data.reset(spi_imx); + + spi_imx->devtype_data.intctrl(spi_imx, 0); + + ret = spi_bitbang_start(&spi_imx->bitbang); + if (ret) { + dev_err(&pdev->dev, "bitbang start failed with %d\n", ret); + goto out_clk_put; + } + + dev_info(&pdev->dev, "probed\n"); + + return ret; + +out_clk_put: + clk_disable(spi_imx->clk); + clk_put(spi_imx->clk); +out_free_irq: + free_irq(spi_imx->irq, spi_imx); +out_iounmap: + iounmap(spi_imx->base); +out_release_mem: + release_mem_region(res->start, resource_size(res)); +out_gpio_free: + for (i = 0; i < master->num_chipselect; i++) + if (spi_imx->chipselect[i] >= 0) + gpio_free(spi_imx->chipselect[i]); +out_master_put: + spi_master_put(master); + kfree(master); + platform_set_drvdata(pdev, NULL); + return ret; +} + +static int __devexit spi_imx_remove(struct platform_device *pdev) +{ + struct spi_master *master = platform_get_drvdata(pdev); + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + struct spi_imx_data *spi_imx = spi_master_get_devdata(master); + int i; + + spi_bitbang_stop(&spi_imx->bitbang); + + writel(0, spi_imx->base + MXC_CSPICTRL); + clk_disable(spi_imx->clk); + clk_put(spi_imx->clk); + free_irq(spi_imx->irq, spi_imx); + iounmap(spi_imx->base); + + for (i = 0; i < master->num_chipselect; i++) + if (spi_imx->chipselect[i] >= 0) + gpio_free(spi_imx->chipselect[i]); + + spi_master_put(master); + + release_mem_region(res->start, resource_size(res)); + + platform_set_drvdata(pdev, NULL); + + return 0; +} + +static struct platform_driver spi_imx_driver = { + .driver = { + .name = DRIVER_NAME, + .owner = THIS_MODULE, + }, + .id_table = spi_imx_devtype, + .probe = spi_imx_probe, + .remove = __devexit_p(spi_imx_remove), +}; + +static int __init spi_imx_init(void) +{ + return platform_driver_register(&spi_imx_driver); +} + +static void __exit spi_imx_exit(void) +{ + platform_driver_unregister(&spi_imx_driver); +} + +module_init(spi_imx_init); +module_exit(spi_imx_exit); + +MODULE_DESCRIPTION("SPI Master Controller driver"); +MODULE_AUTHOR("Sascha Hauer, Pengutronix"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-lm70llp.c b/drivers/spi/spi-lm70llp.c new file mode 100644 index 0000000..933eb9d --- /dev/null +++ b/drivers/spi/spi-lm70llp.c @@ -0,0 +1,351 @@ +/* + * Driver for LM70EVAL-LLP board for the LM70 sensor + * + * Copyright (C) 2006 Kaiwan N Billimoria + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + + +#include +#include + + +/* + * The LM70 communicates with a host processor using a 3-wire variant of + * the SPI/Microwire bus interface. This driver specifically supports an + * NS LM70 LLP Evaluation Board, interfacing to a PC using its parallel + * port to bitbang an SPI-parport bridge. Accordingly, this is an SPI + * master controller driver. The hwmon/lm70 driver is a "SPI protocol + * driver", layered on top of this one and usable without the lm70llp. + * + * Datasheet and Schematic: + * The LM70 is a temperature sensor chip from National Semiconductor; its + * datasheet is available at http://www.national.com/pf/LM/LM70.html + * The schematic for this particular board (the LM70EVAL-LLP) is + * available (on page 4) here: + * http://www.national.com/appinfo/tempsensors/files/LM70LLPEVALmanual.pdf + * + * Also see Documentation/spi/spi-lm70llp. The SPI<->parport code here is + * (heavily) based on spi-butterfly by David Brownell. + * + * The LM70 LLP connects to the PC parallel port in the following manner: + * + * Parallel LM70 LLP + * Port Direction JP2 Header + * ----------- --------- ------------ + * D0 2 - - + * D1 3 --> V+ 5 + * D2 4 --> V+ 5 + * D3 5 --> V+ 5 + * D4 6 --> V+ 5 + * D5 7 --> nCS 8 + * D6 8 --> SCLK 3 + * D7 9 --> SI/O 5 + * GND 25 - GND 7 + * Select 13 <-- SI/O 1 + * + * Note that parport pin 13 actually gets inverted by the transistor + * arrangement which lets either the parport or the LM70 drive the + * SI/SO signal (see the schematic for details). + */ + +#define DRVNAME "spi-lm70llp" + +#define lm70_INIT 0xBE +#define SIO 0x10 +#define nCS 0x20 +#define SCLK 0x40 + +/*-------------------------------------------------------------------------*/ + +struct spi_lm70llp { + struct spi_bitbang bitbang; + struct parport *port; + struct pardevice *pd; + struct spi_device *spidev_lm70; + struct spi_board_info info; + //struct device *dev; +}; + +/* REVISIT : ugly global ; provides "exclusive open" facility */ +static struct spi_lm70llp *lm70llp; + + +/*-------------------------------------------------------------------*/ + +static inline struct spi_lm70llp *spidev_to_pp(struct spi_device *spi) +{ + return spi->controller_data; +} + +/*---------------------- LM70 LLP eval board-specific inlines follow */ + +/* NOTE: we don't actually need to reread the output values, since they'll + * still be what we wrote before. Plus, going through parport builds in + * a ~1ms/operation delay; these SPI transfers could easily be faster. + */ + +static inline void deassertCS(struct spi_lm70llp *pp) +{ + u8 data = parport_read_data(pp->port); + + data &= ~0x80; /* pull D7/SI-out low while de-asserted */ + parport_write_data(pp->port, data | nCS); +} + +static inline void assertCS(struct spi_lm70llp *pp) +{ + u8 data = parport_read_data(pp->port); + + data |= 0x80; /* pull D7/SI-out high so lm70 drives SO-in */ + parport_write_data(pp->port, data & ~nCS); +} + +static inline void clkHigh(struct spi_lm70llp *pp) +{ + u8 data = parport_read_data(pp->port); + parport_write_data(pp->port, data | SCLK); +} + +static inline void clkLow(struct spi_lm70llp *pp) +{ + u8 data = parport_read_data(pp->port); + parport_write_data(pp->port, data & ~SCLK); +} + +/*------------------------- SPI-LM70-specific inlines ----------------------*/ + +static inline void spidelay(unsigned d) +{ + udelay(d); +} + +static inline void setsck(struct spi_device *s, int is_on) +{ + struct spi_lm70llp *pp = spidev_to_pp(s); + + if (is_on) + clkHigh(pp); + else + clkLow(pp); +} + +static inline void setmosi(struct spi_device *s, int is_on) +{ + /* FIXME update D7 ... this way we can put the chip + * into shutdown mode and read the manufacturer ID, + * but we can't put it back into operational mode. + */ +} + +/* + * getmiso: + * Why do we return 0 when the SIO line is high and vice-versa? + * The fact is, the lm70 eval board from NS (which this driver drives), + * is wired in just such a way : when the lm70's SIO goes high, a transistor + * switches it to low reflecting this on the parport (pin 13), and vice-versa. + */ +static inline int getmiso(struct spi_device *s) +{ + struct spi_lm70llp *pp = spidev_to_pp(s); + return ((SIO == (parport_read_status(pp->port) & SIO)) ? 0 : 1 ); +} +/*--------------------------------------------------------------------*/ + +#include "spi-bitbang-txrx.h" + +static void lm70_chipselect(struct spi_device *spi, int value) +{ + struct spi_lm70llp *pp = spidev_to_pp(spi); + + if (value) + assertCS(pp); + else + deassertCS(pp); +} + +/* + * Our actual bitbanger routine. + */ +static u32 lm70_txrx(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits) +{ + return bitbang_txrx_be_cpha0(spi, nsecs, 0, 0, word, bits); +} + +static void spi_lm70llp_attach(struct parport *p) +{ + struct pardevice *pd; + struct spi_lm70llp *pp; + struct spi_master *master; + int status; + + if (lm70llp) { + printk(KERN_WARNING + "%s: spi_lm70llp instance already loaded. Aborting.\n", + DRVNAME); + return; + } + + /* TODO: this just _assumes_ a lm70 is there ... no probe; + * the lm70 driver could verify it, reading the manf ID. + */ + + master = spi_alloc_master(p->physport->dev, sizeof *pp); + if (!master) { + status = -ENOMEM; + goto out_fail; + } + pp = spi_master_get_devdata(master); + + master->bus_num = -1; /* dynamic alloc of a bus number */ + master->num_chipselect = 1; + + /* + * SPI and bitbang hookup. + */ + pp->bitbang.master = spi_master_get(master); + pp->bitbang.chipselect = lm70_chipselect; + pp->bitbang.txrx_word[SPI_MODE_0] = lm70_txrx; + pp->bitbang.flags = SPI_3WIRE; + + /* + * Parport hookup + */ + pp->port = p; + pd = parport_register_device(p, DRVNAME, + NULL, NULL, NULL, + PARPORT_FLAG_EXCL, pp); + if (!pd) { + status = -ENOMEM; + goto out_free_master; + } + pp->pd = pd; + + status = parport_claim(pd); + if (status < 0) + goto out_parport_unreg; + + /* + * Start SPI ... + */ + status = spi_bitbang_start(&pp->bitbang); + if (status < 0) { + printk(KERN_WARNING + "%s: spi_bitbang_start failed with status %d\n", + DRVNAME, status); + goto out_off_and_release; + } + + /* + * The modalias name MUST match the device_driver name + * for the bus glue code to match and subsequently bind them. + * We are binding to the generic drivers/hwmon/lm70.c device + * driver. + */ + strcpy(pp->info.modalias, "lm70"); + pp->info.max_speed_hz = 6 * 1000 * 1000; + pp->info.chip_select = 0; + pp->info.mode = SPI_3WIRE | SPI_MODE_0; + + /* power up the chip, and let the LM70 control SI/SO */ + parport_write_data(pp->port, lm70_INIT); + + /* Enable access to our primary data structure via + * the board info's (void *)controller_data. + */ + pp->info.controller_data = pp; + pp->spidev_lm70 = spi_new_device(pp->bitbang.master, &pp->info); + if (pp->spidev_lm70) + dev_dbg(&pp->spidev_lm70->dev, "spidev_lm70 at %s\n", + dev_name(&pp->spidev_lm70->dev)); + else { + printk(KERN_WARNING "%s: spi_new_device failed\n", DRVNAME); + status = -ENODEV; + goto out_bitbang_stop; + } + pp->spidev_lm70->bits_per_word = 8; + + lm70llp = pp; + return; + +out_bitbang_stop: + spi_bitbang_stop(&pp->bitbang); +out_off_and_release: + /* power down */ + parport_write_data(pp->port, 0); + mdelay(10); + parport_release(pp->pd); +out_parport_unreg: + parport_unregister_device(pd); +out_free_master: + (void) spi_master_put(master); +out_fail: + pr_info("%s: spi_lm70llp probe fail, status %d\n", DRVNAME, status); +} + +static void spi_lm70llp_detach(struct parport *p) +{ + struct spi_lm70llp *pp; + + if (!lm70llp || lm70llp->port != p) + return; + + pp = lm70llp; + spi_bitbang_stop(&pp->bitbang); + + /* power down */ + parport_write_data(pp->port, 0); + + parport_release(pp->pd); + parport_unregister_device(pp->pd); + + (void) spi_master_put(pp->bitbang.master); + + lm70llp = NULL; +} + + +static struct parport_driver spi_lm70llp_drv = { + .name = DRVNAME, + .attach = spi_lm70llp_attach, + .detach = spi_lm70llp_detach, +}; + +static int __init init_spi_lm70llp(void) +{ + return parport_register_driver(&spi_lm70llp_drv); +} +module_init(init_spi_lm70llp); + +static void __exit cleanup_spi_lm70llp(void) +{ + parport_unregister_driver(&spi_lm70llp_drv); +} +module_exit(cleanup_spi_lm70llp); + +MODULE_AUTHOR("Kaiwan N Billimoria "); +MODULE_DESCRIPTION( + "Parport adapter for the National Semiconductor LM70 LLP eval board"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-mpc512x-psc.c b/drivers/spi/spi-mpc512x-psc.c new file mode 100644 index 0000000..6a5b423 --- /dev/null +++ b/drivers/spi/spi-mpc512x-psc.c @@ -0,0 +1,577 @@ +/* + * MPC512x PSC in SPI mode driver. + * + * Copyright (C) 2007,2008 Freescale Semiconductor Inc. + * Original port from 52xx driver: + * Hongjun Chen + * + * Fork of mpc52xx_psc_spi.c: + * Copyright (C) 2006 TOPTICA Photonics AG., Dragos Carp + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct mpc512x_psc_spi { + void (*cs_control)(struct spi_device *spi, bool on); + u32 sysclk; + + /* driver internal data */ + struct mpc52xx_psc __iomem *psc; + struct mpc512x_psc_fifo __iomem *fifo; + unsigned int irq; + u8 bits_per_word; + u8 busy; + u32 mclk; + u8 eofbyte; + + struct workqueue_struct *workqueue; + struct work_struct work; + + struct list_head queue; + spinlock_t lock; /* Message queue lock */ + + struct completion done; +}; + +/* controller state */ +struct mpc512x_psc_spi_cs { + int bits_per_word; + int speed_hz; +}; + +/* set clock freq, clock ramp, bits per work + * if t is NULL then reset the values to the default values + */ +static int mpc512x_psc_spi_transfer_setup(struct spi_device *spi, + struct spi_transfer *t) +{ + struct mpc512x_psc_spi_cs *cs = spi->controller_state; + + cs->speed_hz = (t && t->speed_hz) + ? t->speed_hz : spi->max_speed_hz; + cs->bits_per_word = (t && t->bits_per_word) + ? t->bits_per_word : spi->bits_per_word; + cs->bits_per_word = ((cs->bits_per_word + 7) / 8) * 8; + return 0; +} + +static void mpc512x_psc_spi_activate_cs(struct spi_device *spi) +{ + struct mpc512x_psc_spi_cs *cs = spi->controller_state; + struct mpc512x_psc_spi *mps = spi_master_get_devdata(spi->master); + struct mpc52xx_psc __iomem *psc = mps->psc; + u32 sicr; + u32 ccr; + u16 bclkdiv; + + sicr = in_be32(&psc->sicr); + + /* Set clock phase and polarity */ + if (spi->mode & SPI_CPHA) + sicr |= 0x00001000; + else + sicr &= ~0x00001000; + + if (spi->mode & SPI_CPOL) + sicr |= 0x00002000; + else + sicr &= ~0x00002000; + + if (spi->mode & SPI_LSB_FIRST) + sicr |= 0x10000000; + else + sicr &= ~0x10000000; + out_be32(&psc->sicr, sicr); + + ccr = in_be32(&psc->ccr); + ccr &= 0xFF000000; + if (cs->speed_hz) + bclkdiv = (mps->mclk / cs->speed_hz) - 1; + else + bclkdiv = (mps->mclk / 1000000) - 1; /* default 1MHz */ + + ccr |= (((bclkdiv & 0xff) << 16) | (((bclkdiv >> 8) & 0xff) << 8)); + out_be32(&psc->ccr, ccr); + mps->bits_per_word = cs->bits_per_word; + + if (mps->cs_control) + mps->cs_control(spi, (spi->mode & SPI_CS_HIGH) ? 1 : 0); +} + +static void mpc512x_psc_spi_deactivate_cs(struct spi_device *spi) +{ + struct mpc512x_psc_spi *mps = spi_master_get_devdata(spi->master); + + if (mps->cs_control) + mps->cs_control(spi, (spi->mode & SPI_CS_HIGH) ? 0 : 1); + +} + +/* extract and scale size field in txsz or rxsz */ +#define MPC512x_PSC_FIFO_SZ(sz) ((sz & 0x7ff) << 2); + +#define EOFBYTE 1 + +static int mpc512x_psc_spi_transfer_rxtx(struct spi_device *spi, + struct spi_transfer *t) +{ + struct mpc512x_psc_spi *mps = spi_master_get_devdata(spi->master); + struct mpc52xx_psc __iomem *psc = mps->psc; + struct mpc512x_psc_fifo __iomem *fifo = mps->fifo; + size_t len = t->len; + u8 *tx_buf = (u8 *)t->tx_buf; + u8 *rx_buf = (u8 *)t->rx_buf; + + if (!tx_buf && !rx_buf && t->len) + return -EINVAL; + + /* Zero MR2 */ + in_8(&psc->mode); + out_8(&psc->mode, 0x0); + + while (len) { + int count; + int i; + u8 data; + size_t fifosz; + int rxcount; + + /* + * The number of bytes that can be sent at a time + * depends on the fifo size. + */ + fifosz = MPC512x_PSC_FIFO_SZ(in_be32(&fifo->txsz)); + count = min(fifosz, len); + + for (i = count; i > 0; i--) { + data = tx_buf ? *tx_buf++ : 0; + if (len == EOFBYTE) + setbits32(&fifo->txcmd, MPC512x_PSC_FIFO_EOF); + out_8(&fifo->txdata_8, data); + len--; + } + + INIT_COMPLETION(mps->done); + + /* interrupt on tx fifo empty */ + out_be32(&fifo->txisr, MPC512x_PSC_FIFO_EMPTY); + out_be32(&fifo->tximr, MPC512x_PSC_FIFO_EMPTY); + + /* enable transmiter/receiver */ + out_8(&psc->command, + MPC52xx_PSC_TX_ENABLE | MPC52xx_PSC_RX_ENABLE); + + wait_for_completion(&mps->done); + + mdelay(1); + + /* rx fifo should have count bytes in it */ + rxcount = in_be32(&fifo->rxcnt); + if (rxcount != count) + mdelay(1); + + rxcount = in_be32(&fifo->rxcnt); + if (rxcount != count) { + dev_warn(&spi->dev, "expected %d bytes in rx fifo " + "but got %d\n", count, rxcount); + } + + rxcount = min(rxcount, count); + for (i = rxcount; i > 0; i--) { + data = in_8(&fifo->rxdata_8); + if (rx_buf) + *rx_buf++ = data; + } + while (in_be32(&fifo->rxcnt)) { + in_8(&fifo->rxdata_8); + } + + out_8(&psc->command, + MPC52xx_PSC_TX_DISABLE | MPC52xx_PSC_RX_DISABLE); + } + /* disable transmiter/receiver and fifo interrupt */ + out_8(&psc->command, MPC52xx_PSC_TX_DISABLE | MPC52xx_PSC_RX_DISABLE); + out_be32(&fifo->tximr, 0); + return 0; +} + +static void mpc512x_psc_spi_work(struct work_struct *work) +{ + struct mpc512x_psc_spi *mps = container_of(work, + struct mpc512x_psc_spi, + work); + + spin_lock_irq(&mps->lock); + mps->busy = 1; + while (!list_empty(&mps->queue)) { + struct spi_message *m; + struct spi_device *spi; + struct spi_transfer *t = NULL; + unsigned cs_change; + int status; + + m = container_of(mps->queue.next, struct spi_message, queue); + list_del_init(&m->queue); + spin_unlock_irq(&mps->lock); + + spi = m->spi; + cs_change = 1; + status = 0; + list_for_each_entry(t, &m->transfers, transfer_list) { + if (t->bits_per_word || t->speed_hz) { + status = mpc512x_psc_spi_transfer_setup(spi, t); + if (status < 0) + break; + } + + if (cs_change) + mpc512x_psc_spi_activate_cs(spi); + cs_change = t->cs_change; + + status = mpc512x_psc_spi_transfer_rxtx(spi, t); + if (status) + break; + m->actual_length += t->len; + + if (t->delay_usecs) + udelay(t->delay_usecs); + + if (cs_change) + mpc512x_psc_spi_deactivate_cs(spi); + } + + m->status = status; + m->complete(m->context); + + if (status || !cs_change) + mpc512x_psc_spi_deactivate_cs(spi); + + mpc512x_psc_spi_transfer_setup(spi, NULL); + + spin_lock_irq(&mps->lock); + } + mps->busy = 0; + spin_unlock_irq(&mps->lock); +} + +static int mpc512x_psc_spi_setup(struct spi_device *spi) +{ + struct mpc512x_psc_spi *mps = spi_master_get_devdata(spi->master); + struct mpc512x_psc_spi_cs *cs = spi->controller_state; + unsigned long flags; + + if (spi->bits_per_word % 8) + return -EINVAL; + + if (!cs) { + cs = kzalloc(sizeof *cs, GFP_KERNEL); + if (!cs) + return -ENOMEM; + spi->controller_state = cs; + } + + cs->bits_per_word = spi->bits_per_word; + cs->speed_hz = spi->max_speed_hz; + + spin_lock_irqsave(&mps->lock, flags); + if (!mps->busy) + mpc512x_psc_spi_deactivate_cs(spi); + spin_unlock_irqrestore(&mps->lock, flags); + + return 0; +} + +static int mpc512x_psc_spi_transfer(struct spi_device *spi, + struct spi_message *m) +{ + struct mpc512x_psc_spi *mps = spi_master_get_devdata(spi->master); + unsigned long flags; + + m->actual_length = 0; + m->status = -EINPROGRESS; + + spin_lock_irqsave(&mps->lock, flags); + list_add_tail(&m->queue, &mps->queue); + queue_work(mps->workqueue, &mps->work); + spin_unlock_irqrestore(&mps->lock, flags); + + return 0; +} + +static void mpc512x_psc_spi_cleanup(struct spi_device *spi) +{ + kfree(spi->controller_state); +} + +static int mpc512x_psc_spi_port_config(struct spi_master *master, + struct mpc512x_psc_spi *mps) +{ + struct mpc52xx_psc __iomem *psc = mps->psc; + struct mpc512x_psc_fifo __iomem *fifo = mps->fifo; + struct clk *spiclk; + int ret = 0; + char name[32]; + u32 sicr; + u32 ccr; + u16 bclkdiv; + + sprintf(name, "psc%d_mclk", master->bus_num); + spiclk = clk_get(&master->dev, name); + clk_enable(spiclk); + mps->mclk = clk_get_rate(spiclk); + clk_put(spiclk); + + /* Reset the PSC into a known state */ + out_8(&psc->command, MPC52xx_PSC_RST_RX); + out_8(&psc->command, MPC52xx_PSC_RST_TX); + out_8(&psc->command, MPC52xx_PSC_TX_DISABLE | MPC52xx_PSC_RX_DISABLE); + + /* Disable psc interrupts all useful interrupts are in fifo */ + out_be16(&psc->isr_imr.imr, 0); + + /* Disable fifo interrupts, will be enabled later */ + out_be32(&fifo->tximr, 0); + out_be32(&fifo->rximr, 0); + + /* Setup fifo slice address and size */ + /*out_be32(&fifo->txsz, 0x0fe00004);*/ + /*out_be32(&fifo->rxsz, 0x0ff00004);*/ + + sicr = 0x01000000 | /* SIM = 0001 -- 8 bit */ + 0x00800000 | /* GenClk = 1 -- internal clk */ + 0x00008000 | /* SPI = 1 */ + 0x00004000 | /* MSTR = 1 -- SPI master */ + 0x00000800; /* UseEOF = 1 -- SS low until EOF */ + + out_be32(&psc->sicr, sicr); + + ccr = in_be32(&psc->ccr); + ccr &= 0xFF000000; + bclkdiv = (mps->mclk / 1000000) - 1; /* default 1MHz */ + ccr |= (((bclkdiv & 0xff) << 16) | (((bclkdiv >> 8) & 0xff) << 8)); + out_be32(&psc->ccr, ccr); + + /* Set 2ms DTL delay */ + out_8(&psc->ctur, 0x00); + out_8(&psc->ctlr, 0x82); + + /* we don't use the alarms */ + out_be32(&fifo->rxalarm, 0xfff); + out_be32(&fifo->txalarm, 0); + + /* Enable FIFO slices for Rx/Tx */ + out_be32(&fifo->rxcmd, + MPC512x_PSC_FIFO_ENABLE_SLICE | MPC512x_PSC_FIFO_ENABLE_DMA); + out_be32(&fifo->txcmd, + MPC512x_PSC_FIFO_ENABLE_SLICE | MPC512x_PSC_FIFO_ENABLE_DMA); + + mps->bits_per_word = 8; + + return ret; +} + +static irqreturn_t mpc512x_psc_spi_isr(int irq, void *dev_id) +{ + struct mpc512x_psc_spi *mps = (struct mpc512x_psc_spi *)dev_id; + struct mpc512x_psc_fifo __iomem *fifo = mps->fifo; + + /* clear interrupt and wake up the work queue */ + if (in_be32(&fifo->txisr) & + in_be32(&fifo->tximr) & MPC512x_PSC_FIFO_EMPTY) { + out_be32(&fifo->txisr, MPC512x_PSC_FIFO_EMPTY); + out_be32(&fifo->tximr, 0); + complete(&mps->done); + return IRQ_HANDLED; + } + return IRQ_NONE; +} + +/* bus_num is used only for the case dev->platform_data == NULL */ +static int __devinit mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr, + u32 size, unsigned int irq, + s16 bus_num) +{ + struct fsl_spi_platform_data *pdata = dev->platform_data; + struct mpc512x_psc_spi *mps; + struct spi_master *master; + int ret; + void *tempp; + + master = spi_alloc_master(dev, sizeof *mps); + if (master == NULL) + return -ENOMEM; + + dev_set_drvdata(dev, master); + mps = spi_master_get_devdata(master); + mps->irq = irq; + + if (pdata == NULL) { + dev_err(dev, "probe called without platform data, no " + "cs_control function will be called\n"); + mps->cs_control = NULL; + mps->sysclk = 0; + master->bus_num = bus_num; + master->num_chipselect = 255; + } else { + mps->cs_control = pdata->cs_control; + mps->sysclk = pdata->sysclk; + master->bus_num = pdata->bus_num; + master->num_chipselect = pdata->max_chipselect; + } + + master->setup = mpc512x_psc_spi_setup; + master->transfer = mpc512x_psc_spi_transfer; + master->cleanup = mpc512x_psc_spi_cleanup; + master->dev.of_node = dev->of_node; + + tempp = ioremap(regaddr, size); + if (!tempp) { + dev_err(dev, "could not ioremap I/O port range\n"); + ret = -EFAULT; + goto free_master; + } + mps->psc = tempp; + mps->fifo = + (struct mpc512x_psc_fifo *)(tempp + sizeof(struct mpc52xx_psc)); + + ret = request_irq(mps->irq, mpc512x_psc_spi_isr, IRQF_SHARED, + "mpc512x-psc-spi", mps); + if (ret) + goto free_master; + + ret = mpc512x_psc_spi_port_config(master, mps); + if (ret < 0) + goto free_irq; + + spin_lock_init(&mps->lock); + init_completion(&mps->done); + INIT_WORK(&mps->work, mpc512x_psc_spi_work); + INIT_LIST_HEAD(&mps->queue); + + mps->workqueue = + create_singlethread_workqueue(dev_name(master->dev.parent)); + if (mps->workqueue == NULL) { + ret = -EBUSY; + goto free_irq; + } + + ret = spi_register_master(master); + if (ret < 0) + goto unreg_master; + + return ret; + +unreg_master: + destroy_workqueue(mps->workqueue); +free_irq: + free_irq(mps->irq, mps); +free_master: + if (mps->psc) + iounmap(mps->psc); + spi_master_put(master); + + return ret; +} + +static int __devexit mpc512x_psc_spi_do_remove(struct device *dev) +{ + struct spi_master *master = dev_get_drvdata(dev); + struct mpc512x_psc_spi *mps = spi_master_get_devdata(master); + + flush_workqueue(mps->workqueue); + destroy_workqueue(mps->workqueue); + spi_unregister_master(master); + free_irq(mps->irq, mps); + if (mps->psc) + iounmap(mps->psc); + + return 0; +} + +static int __devinit mpc512x_psc_spi_of_probe(struct platform_device *op) +{ + const u32 *regaddr_p; + u64 regaddr64, size64; + s16 id = -1; + + regaddr_p = of_get_address(op->dev.of_node, 0, &size64, NULL); + if (!regaddr_p) { + dev_err(&op->dev, "Invalid PSC address\n"); + return -EINVAL; + } + regaddr64 = of_translate_address(op->dev.of_node, regaddr_p); + + /* get PSC id (0..11, used by port_config) */ + if (op->dev.platform_data == NULL) { + const u32 *psc_nump; + + psc_nump = of_get_property(op->dev.of_node, "cell-index", NULL); + if (!psc_nump || *psc_nump > 11) { + dev_err(&op->dev, "mpc512x_psc_spi: Device node %s " + "has invalid cell-index property\n", + op->dev.of_node->full_name); + return -EINVAL; + } + id = *psc_nump; + } + + return mpc512x_psc_spi_do_probe(&op->dev, (u32) regaddr64, (u32) size64, + irq_of_parse_and_map(op->dev.of_node, 0), id); +} + +static int __devexit mpc512x_psc_spi_of_remove(struct platform_device *op) +{ + return mpc512x_psc_spi_do_remove(&op->dev); +} + +static struct of_device_id mpc512x_psc_spi_of_match[] = { + { .compatible = "fsl,mpc5121-psc-spi", }, + {}, +}; + +MODULE_DEVICE_TABLE(of, mpc512x_psc_spi_of_match); + +static struct platform_driver mpc512x_psc_spi_of_driver = { + .probe = mpc512x_psc_spi_of_probe, + .remove = __devexit_p(mpc512x_psc_spi_of_remove), + .driver = { + .name = "mpc512x-psc-spi", + .owner = THIS_MODULE, + .of_match_table = mpc512x_psc_spi_of_match, + }, +}; + +static int __init mpc512x_psc_spi_init(void) +{ + return platform_driver_register(&mpc512x_psc_spi_of_driver); +} +module_init(mpc512x_psc_spi_init); + +static void __exit mpc512x_psc_spi_exit(void) +{ + platform_driver_unregister(&mpc512x_psc_spi_of_driver); +} +module_exit(mpc512x_psc_spi_exit); + +MODULE_AUTHOR("John Rigby"); +MODULE_DESCRIPTION("MPC512x PSC SPI Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-mpc52xx-psc.c b/drivers/spi/spi-mpc52xx-psc.c new file mode 100644 index 0000000..e30baf0 --- /dev/null +++ b/drivers/spi/spi-mpc52xx-psc.c @@ -0,0 +1,529 @@ +/* + * MPC52xx PSC in SPI mode driver. + * + * Maintainer: Dragos Carp + * + * Copyright (C) 2006 TOPTICA Photonics AG. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#define MCLK 20000000 /* PSC port MClk in hz */ + +struct mpc52xx_psc_spi { + /* fsl_spi_platform data */ + void (*cs_control)(struct spi_device *spi, bool on); + u32 sysclk; + + /* driver internal data */ + struct mpc52xx_psc __iomem *psc; + struct mpc52xx_psc_fifo __iomem *fifo; + unsigned int irq; + u8 bits_per_word; + u8 busy; + + struct workqueue_struct *workqueue; + struct work_struct work; + + struct list_head queue; + spinlock_t lock; + + struct completion done; +}; + +/* controller state */ +struct mpc52xx_psc_spi_cs { + int bits_per_word; + int speed_hz; +}; + +/* set clock freq, clock ramp, bits per work + * if t is NULL then reset the values to the default values + */ +static int mpc52xx_psc_spi_transfer_setup(struct spi_device *spi, + struct spi_transfer *t) +{ + struct mpc52xx_psc_spi_cs *cs = spi->controller_state; + + cs->speed_hz = (t && t->speed_hz) + ? t->speed_hz : spi->max_speed_hz; + cs->bits_per_word = (t && t->bits_per_word) + ? t->bits_per_word : spi->bits_per_word; + cs->bits_per_word = ((cs->bits_per_word + 7) / 8) * 8; + return 0; +} + +static void mpc52xx_psc_spi_activate_cs(struct spi_device *spi) +{ + struct mpc52xx_psc_spi_cs *cs = spi->controller_state; + struct mpc52xx_psc_spi *mps = spi_master_get_devdata(spi->master); + struct mpc52xx_psc __iomem *psc = mps->psc; + u32 sicr; + u16 ccr; + + sicr = in_be32(&psc->sicr); + + /* Set clock phase and polarity */ + if (spi->mode & SPI_CPHA) + sicr |= 0x00001000; + else + sicr &= ~0x00001000; + if (spi->mode & SPI_CPOL) + sicr |= 0x00002000; + else + sicr &= ~0x00002000; + + if (spi->mode & SPI_LSB_FIRST) + sicr |= 0x10000000; + else + sicr &= ~0x10000000; + out_be32(&psc->sicr, sicr); + + /* Set clock frequency and bits per word + * Because psc->ccr is defined as 16bit register instead of 32bit + * just set the lower byte of BitClkDiv + */ + ccr = in_be16((u16 __iomem *)&psc->ccr); + ccr &= 0xFF00; + if (cs->speed_hz) + ccr |= (MCLK / cs->speed_hz - 1) & 0xFF; + else /* by default SPI Clk 1MHz */ + ccr |= (MCLK / 1000000 - 1) & 0xFF; + out_be16((u16 __iomem *)&psc->ccr, ccr); + mps->bits_per_word = cs->bits_per_word; + + if (mps->cs_control) + mps->cs_control(spi, (spi->mode & SPI_CS_HIGH) ? 1 : 0); +} + +static void mpc52xx_psc_spi_deactivate_cs(struct spi_device *spi) +{ + struct mpc52xx_psc_spi *mps = spi_master_get_devdata(spi->master); + + if (mps->cs_control) + mps->cs_control(spi, (spi->mode & SPI_CS_HIGH) ? 0 : 1); +} + +#define MPC52xx_PSC_BUFSIZE (MPC52xx_PSC_RFNUM_MASK + 1) +/* wake up when 80% fifo full */ +#define MPC52xx_PSC_RFALARM (MPC52xx_PSC_BUFSIZE * 20 / 100) + +static int mpc52xx_psc_spi_transfer_rxtx(struct spi_device *spi, + struct spi_transfer *t) +{ + struct mpc52xx_psc_spi *mps = spi_master_get_devdata(spi->master); + struct mpc52xx_psc __iomem *psc = mps->psc; + struct mpc52xx_psc_fifo __iomem *fifo = mps->fifo; + unsigned rb = 0; /* number of bytes receieved */ + unsigned sb = 0; /* number of bytes sent */ + unsigned char *rx_buf = (unsigned char *)t->rx_buf; + unsigned char *tx_buf = (unsigned char *)t->tx_buf; + unsigned rfalarm; + unsigned send_at_once = MPC52xx_PSC_BUFSIZE; + unsigned recv_at_once; + int last_block = 0; + + if (!t->tx_buf && !t->rx_buf && t->len) + return -EINVAL; + + /* enable transmiter/receiver */ + out_8(&psc->command, MPC52xx_PSC_TX_ENABLE | MPC52xx_PSC_RX_ENABLE); + while (rb < t->len) { + if (t->len - rb > MPC52xx_PSC_BUFSIZE) { + rfalarm = MPC52xx_PSC_RFALARM; + last_block = 0; + } else { + send_at_once = t->len - sb; + rfalarm = MPC52xx_PSC_BUFSIZE - (t->len - rb); + last_block = 1; + } + + dev_dbg(&spi->dev, "send %d bytes...\n", send_at_once); + for (; send_at_once; sb++, send_at_once--) { + /* set EOF flag before the last word is sent */ + if (send_at_once == 1 && last_block) + out_8(&psc->ircr2, 0x01); + + if (tx_buf) + out_8(&psc->mpc52xx_psc_buffer_8, tx_buf[sb]); + else + out_8(&psc->mpc52xx_psc_buffer_8, 0); + } + + + /* enable interrupts and wait for wake up + * if just one byte is expected the Rx FIFO genererates no + * FFULL interrupt, so activate the RxRDY interrupt + */ + out_8(&psc->command, MPC52xx_PSC_SEL_MODE_REG_1); + if (t->len - rb == 1) { + out_8(&psc->mode, 0); + } else { + out_8(&psc->mode, MPC52xx_PSC_MODE_FFULL); + out_be16(&fifo->rfalarm, rfalarm); + } + out_be16(&psc->mpc52xx_psc_imr, MPC52xx_PSC_IMR_RXRDY); + wait_for_completion(&mps->done); + recv_at_once = in_be16(&fifo->rfnum); + dev_dbg(&spi->dev, "%d bytes received\n", recv_at_once); + + send_at_once = recv_at_once; + if (rx_buf) { + for (; recv_at_once; rb++, recv_at_once--) + rx_buf[rb] = in_8(&psc->mpc52xx_psc_buffer_8); + } else { + for (; recv_at_once; rb++, recv_at_once--) + in_8(&psc->mpc52xx_psc_buffer_8); + } + } + /* disable transmiter/receiver */ + out_8(&psc->command, MPC52xx_PSC_TX_DISABLE | MPC52xx_PSC_RX_DISABLE); + + return 0; +} + +static void mpc52xx_psc_spi_work(struct work_struct *work) +{ + struct mpc52xx_psc_spi *mps = + container_of(work, struct mpc52xx_psc_spi, work); + + spin_lock_irq(&mps->lock); + mps->busy = 1; + while (!list_empty(&mps->queue)) { + struct spi_message *m; + struct spi_device *spi; + struct spi_transfer *t = NULL; + unsigned cs_change; + int status; + + m = container_of(mps->queue.next, struct spi_message, queue); + list_del_init(&m->queue); + spin_unlock_irq(&mps->lock); + + spi = m->spi; + cs_change = 1; + status = 0; + list_for_each_entry (t, &m->transfers, transfer_list) { + if (t->bits_per_word || t->speed_hz) { + status = mpc52xx_psc_spi_transfer_setup(spi, t); + if (status < 0) + break; + } + + if (cs_change) + mpc52xx_psc_spi_activate_cs(spi); + cs_change = t->cs_change; + + status = mpc52xx_psc_spi_transfer_rxtx(spi, t); + if (status) + break; + m->actual_length += t->len; + + if (t->delay_usecs) + udelay(t->delay_usecs); + + if (cs_change) + mpc52xx_psc_spi_deactivate_cs(spi); + } + + m->status = status; + m->complete(m->context); + + if (status || !cs_change) + mpc52xx_psc_spi_deactivate_cs(spi); + + mpc52xx_psc_spi_transfer_setup(spi, NULL); + + spin_lock_irq(&mps->lock); + } + mps->busy = 0; + spin_unlock_irq(&mps->lock); +} + +static int mpc52xx_psc_spi_setup(struct spi_device *spi) +{ + struct mpc52xx_psc_spi *mps = spi_master_get_devdata(spi->master); + struct mpc52xx_psc_spi_cs *cs = spi->controller_state; + unsigned long flags; + + if (spi->bits_per_word%8) + return -EINVAL; + + if (!cs) { + cs = kzalloc(sizeof *cs, GFP_KERNEL); + if (!cs) + return -ENOMEM; + spi->controller_state = cs; + } + + cs->bits_per_word = spi->bits_per_word; + cs->speed_hz = spi->max_speed_hz; + + spin_lock_irqsave(&mps->lock, flags); + if (!mps->busy) + mpc52xx_psc_spi_deactivate_cs(spi); + spin_unlock_irqrestore(&mps->lock, flags); + + return 0; +} + +static int mpc52xx_psc_spi_transfer(struct spi_device *spi, + struct spi_message *m) +{ + struct mpc52xx_psc_spi *mps = spi_master_get_devdata(spi->master); + unsigned long flags; + + m->actual_length = 0; + m->status = -EINPROGRESS; + + spin_lock_irqsave(&mps->lock, flags); + list_add_tail(&m->queue, &mps->queue); + queue_work(mps->workqueue, &mps->work); + spin_unlock_irqrestore(&mps->lock, flags); + + return 0; +} + +static void mpc52xx_psc_spi_cleanup(struct spi_device *spi) +{ + kfree(spi->controller_state); +} + +static int mpc52xx_psc_spi_port_config(int psc_id, struct mpc52xx_psc_spi *mps) +{ + struct mpc52xx_psc __iomem *psc = mps->psc; + struct mpc52xx_psc_fifo __iomem *fifo = mps->fifo; + u32 mclken_div; + int ret; + + /* default sysclk is 512MHz */ + mclken_div = (mps->sysclk ? mps->sysclk : 512000000) / MCLK; + ret = mpc52xx_set_psc_clkdiv(psc_id, mclken_div); + if (ret) + return ret; + + /* Reset the PSC into a known state */ + out_8(&psc->command, MPC52xx_PSC_RST_RX); + out_8(&psc->command, MPC52xx_PSC_RST_TX); + out_8(&psc->command, MPC52xx_PSC_TX_DISABLE | MPC52xx_PSC_RX_DISABLE); + + /* Disable interrupts, interrupts are based on alarm level */ + out_be16(&psc->mpc52xx_psc_imr, 0); + out_8(&psc->command, MPC52xx_PSC_SEL_MODE_REG_1); + out_8(&fifo->rfcntl, 0); + out_8(&psc->mode, MPC52xx_PSC_MODE_FFULL); + + /* Configure 8bit codec mode as a SPI master and use EOF flags */ + /* SICR_SIM_CODEC8|SICR_GENCLK|SICR_SPI|SICR_MSTR|SICR_USEEOF */ + out_be32(&psc->sicr, 0x0180C800); + out_be16((u16 __iomem *)&psc->ccr, 0x070F); /* default SPI Clk 1MHz */ + + /* Set 2ms DTL delay */ + out_8(&psc->ctur, 0x00); + out_8(&psc->ctlr, 0x84); + + mps->bits_per_word = 8; + + return 0; +} + +static irqreturn_t mpc52xx_psc_spi_isr(int irq, void *dev_id) +{ + struct mpc52xx_psc_spi *mps = (struct mpc52xx_psc_spi *)dev_id; + struct mpc52xx_psc __iomem *psc = mps->psc; + + /* disable interrupt and wake up the work queue */ + if (in_be16(&psc->mpc52xx_psc_isr) & MPC52xx_PSC_IMR_RXRDY) { + out_be16(&psc->mpc52xx_psc_imr, 0); + complete(&mps->done); + return IRQ_HANDLED; + } + return IRQ_NONE; +} + +/* bus_num is used only for the case dev->platform_data == NULL */ +static int __devinit mpc52xx_psc_spi_do_probe(struct device *dev, u32 regaddr, + u32 size, unsigned int irq, s16 bus_num) +{ + struct fsl_spi_platform_data *pdata = dev->platform_data; + struct mpc52xx_psc_spi *mps; + struct spi_master *master; + int ret; + + master = spi_alloc_master(dev, sizeof *mps); + if (master == NULL) + return -ENOMEM; + + dev_set_drvdata(dev, master); + mps = spi_master_get_devdata(master); + + /* the spi->mode bits understood by this driver: */ + master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST; + + mps->irq = irq; + if (pdata == NULL) { + dev_warn(dev, "probe called without platform data, no " + "cs_control function will be called\n"); + mps->cs_control = NULL; + mps->sysclk = 0; + master->bus_num = bus_num; + master->num_chipselect = 255; + } else { + mps->cs_control = pdata->cs_control; + mps->sysclk = pdata->sysclk; + master->bus_num = pdata->bus_num; + master->num_chipselect = pdata->max_chipselect; + } + master->setup = mpc52xx_psc_spi_setup; + master->transfer = mpc52xx_psc_spi_transfer; + master->cleanup = mpc52xx_psc_spi_cleanup; + master->dev.of_node = dev->of_node; + + mps->psc = ioremap(regaddr, size); + if (!mps->psc) { + dev_err(dev, "could not ioremap I/O port range\n"); + ret = -EFAULT; + goto free_master; + } + /* On the 5200, fifo regs are immediately ajacent to the psc regs */ + mps->fifo = ((void __iomem *)mps->psc) + sizeof(struct mpc52xx_psc); + + ret = request_irq(mps->irq, mpc52xx_psc_spi_isr, 0, "mpc52xx-psc-spi", + mps); + if (ret) + goto free_master; + + ret = mpc52xx_psc_spi_port_config(master->bus_num, mps); + if (ret < 0) { + dev_err(dev, "can't configure PSC! Is it capable of SPI?\n"); + goto free_irq; + } + + spin_lock_init(&mps->lock); + init_completion(&mps->done); + INIT_WORK(&mps->work, mpc52xx_psc_spi_work); + INIT_LIST_HEAD(&mps->queue); + + mps->workqueue = create_singlethread_workqueue( + dev_name(master->dev.parent)); + if (mps->workqueue == NULL) { + ret = -EBUSY; + goto free_irq; + } + + ret = spi_register_master(master); + if (ret < 0) + goto unreg_master; + + return ret; + +unreg_master: + destroy_workqueue(mps->workqueue); +free_irq: + free_irq(mps->irq, mps); +free_master: + if (mps->psc) + iounmap(mps->psc); + spi_master_put(master); + + return ret; +} + +static int __devinit mpc52xx_psc_spi_of_probe(struct platform_device *op) +{ + const u32 *regaddr_p; + u64 regaddr64, size64; + s16 id = -1; + + regaddr_p = of_get_address(op->dev.of_node, 0, &size64, NULL); + if (!regaddr_p) { + dev_err(&op->dev, "Invalid PSC address\n"); + return -EINVAL; + } + regaddr64 = of_translate_address(op->dev.of_node, regaddr_p); + + /* get PSC id (1..6, used by port_config) */ + if (op->dev.platform_data == NULL) { + const u32 *psc_nump; + + psc_nump = of_get_property(op->dev.of_node, "cell-index", NULL); + if (!psc_nump || *psc_nump > 5) { + dev_err(&op->dev, "Invalid cell-index property\n"); + return -EINVAL; + } + id = *psc_nump + 1; + } + + return mpc52xx_psc_spi_do_probe(&op->dev, (u32)regaddr64, (u32)size64, + irq_of_parse_and_map(op->dev.of_node, 0), id); +} + +static int __devexit mpc52xx_psc_spi_of_remove(struct platform_device *op) +{ + struct spi_master *master = dev_get_drvdata(&op->dev); + struct mpc52xx_psc_spi *mps = spi_master_get_devdata(master); + + flush_workqueue(mps->workqueue); + destroy_workqueue(mps->workqueue); + spi_unregister_master(master); + free_irq(mps->irq, mps); + if (mps->psc) + iounmap(mps->psc); + + return 0; +} + +static const struct of_device_id mpc52xx_psc_spi_of_match[] = { + { .compatible = "fsl,mpc5200-psc-spi", }, + { .compatible = "mpc5200-psc-spi", }, /* old */ + {} +}; + +MODULE_DEVICE_TABLE(of, mpc52xx_psc_spi_of_match); + +static struct platform_driver mpc52xx_psc_spi_of_driver = { + .probe = mpc52xx_psc_spi_of_probe, + .remove = __devexit_p(mpc52xx_psc_spi_of_remove), + .driver = { + .name = "mpc52xx-psc-spi", + .owner = THIS_MODULE, + .of_match_table = mpc52xx_psc_spi_of_match, + }, +}; + +static int __init mpc52xx_psc_spi_init(void) +{ + return platform_driver_register(&mpc52xx_psc_spi_of_driver); +} +module_init(mpc52xx_psc_spi_init); + +static void __exit mpc52xx_psc_spi_exit(void) +{ + platform_driver_unregister(&mpc52xx_psc_spi_of_driver); +} +module_exit(mpc52xx_psc_spi_exit); + +MODULE_AUTHOR("Dragos Carp"); +MODULE_DESCRIPTION("MPC52xx PSC SPI Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-mpc52xx.c b/drivers/spi/spi-mpc52xx.c new file mode 100644 index 0000000..015a974 --- /dev/null +++ b/drivers/spi/spi-mpc52xx.c @@ -0,0 +1,579 @@ +/* + * MPC52xx SPI bus driver. + * + * Copyright (C) 2008 Secret Lab Technologies Ltd. + * + * This file is released under the GPLv2 + * + * This is the driver for the MPC5200's dedicated SPI controller. + * + * Note: this driver does not support the MPC5200 PSC in SPI mode. For + * that driver see drivers/spi/mpc52xx_psc_spi.c + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +MODULE_AUTHOR("Grant Likely "); +MODULE_DESCRIPTION("MPC52xx SPI (non-PSC) Driver"); +MODULE_LICENSE("GPL"); + +/* Register offsets */ +#define SPI_CTRL1 0x00 +#define SPI_CTRL1_SPIE (1 << 7) +#define SPI_CTRL1_SPE (1 << 6) +#define SPI_CTRL1_MSTR (1 << 4) +#define SPI_CTRL1_CPOL (1 << 3) +#define SPI_CTRL1_CPHA (1 << 2) +#define SPI_CTRL1_SSOE (1 << 1) +#define SPI_CTRL1_LSBFE (1 << 0) + +#define SPI_CTRL2 0x01 +#define SPI_BRR 0x04 + +#define SPI_STATUS 0x05 +#define SPI_STATUS_SPIF (1 << 7) +#define SPI_STATUS_WCOL (1 << 6) +#define SPI_STATUS_MODF (1 << 4) + +#define SPI_DATA 0x09 +#define SPI_PORTDATA 0x0d +#define SPI_DATADIR 0x10 + +/* FSM state return values */ +#define FSM_STOP 0 /* Nothing more for the state machine to */ + /* do. If something interesting happens */ + /* then an IRQ will be received */ +#define FSM_POLL 1 /* need to poll for completion, an IRQ is */ + /* not expected */ +#define FSM_CONTINUE 2 /* Keep iterating the state machine */ + +/* Driver internal data */ +struct mpc52xx_spi { + struct spi_master *master; + void __iomem *regs; + int irq0; /* MODF irq */ + int irq1; /* SPIF irq */ + unsigned int ipb_freq; + + /* Statistics; not used now, but will be reintroduced for debugfs */ + int msg_count; + int wcol_count; + int wcol_ticks; + u32 wcol_tx_timestamp; + int modf_count; + int byte_count; + + struct list_head queue; /* queue of pending messages */ + spinlock_t lock; + struct work_struct work; + + /* Details of current transfer (length, and buffer pointers) */ + struct spi_message *message; /* current message */ + struct spi_transfer *transfer; /* current transfer */ + int (*state)(int irq, struct mpc52xx_spi *ms, u8 status, u8 data); + int len; + int timestamp; + u8 *rx_buf; + const u8 *tx_buf; + int cs_change; + int gpio_cs_count; + unsigned int *gpio_cs; +}; + +/* + * CS control function + */ +static void mpc52xx_spi_chipsel(struct mpc52xx_spi *ms, int value) +{ + int cs; + + if (ms->gpio_cs_count > 0) { + cs = ms->message->spi->chip_select; + gpio_set_value(ms->gpio_cs[cs], value ? 0 : 1); + } else + out_8(ms->regs + SPI_PORTDATA, value ? 0 : 0x08); +} + +/* + * Start a new transfer. This is called both by the idle state + * for the first transfer in a message, and by the wait state when the + * previous transfer in a message is complete. + */ +static void mpc52xx_spi_start_transfer(struct mpc52xx_spi *ms) +{ + ms->rx_buf = ms->transfer->rx_buf; + ms->tx_buf = ms->transfer->tx_buf; + ms->len = ms->transfer->len; + + /* Activate the chip select */ + if (ms->cs_change) + mpc52xx_spi_chipsel(ms, 1); + ms->cs_change = ms->transfer->cs_change; + + /* Write out the first byte */ + ms->wcol_tx_timestamp = get_tbl(); + if (ms->tx_buf) + out_8(ms->regs + SPI_DATA, *ms->tx_buf++); + else + out_8(ms->regs + SPI_DATA, 0); +} + +/* Forward declaration of state handlers */ +static int mpc52xx_spi_fsmstate_transfer(int irq, struct mpc52xx_spi *ms, + u8 status, u8 data); +static int mpc52xx_spi_fsmstate_wait(int irq, struct mpc52xx_spi *ms, + u8 status, u8 data); + +/* + * IDLE state + * + * No transfers are in progress; if another transfer is pending then retrieve + * it and kick it off. Otherwise, stop processing the state machine + */ +static int +mpc52xx_spi_fsmstate_idle(int irq, struct mpc52xx_spi *ms, u8 status, u8 data) +{ + struct spi_device *spi; + int spr, sppr; + u8 ctrl1; + + if (status && (irq != NO_IRQ)) + dev_err(&ms->master->dev, "spurious irq, status=0x%.2x\n", + status); + + /* Check if there is another transfer waiting. */ + if (list_empty(&ms->queue)) + return FSM_STOP; + + /* get the head of the queue */ + ms->message = list_first_entry(&ms->queue, struct spi_message, queue); + list_del_init(&ms->message->queue); + + /* Setup the controller parameters */ + ctrl1 = SPI_CTRL1_SPIE | SPI_CTRL1_SPE | SPI_CTRL1_MSTR; + spi = ms->message->spi; + if (spi->mode & SPI_CPHA) + ctrl1 |= SPI_CTRL1_CPHA; + if (spi->mode & SPI_CPOL) + ctrl1 |= SPI_CTRL1_CPOL; + if (spi->mode & SPI_LSB_FIRST) + ctrl1 |= SPI_CTRL1_LSBFE; + out_8(ms->regs + SPI_CTRL1, ctrl1); + + /* Setup the controller speed */ + /* minimum divider is '2'. Also, add '1' to force rounding the + * divider up. */ + sppr = ((ms->ipb_freq / ms->message->spi->max_speed_hz) + 1) >> 1; + spr = 0; + if (sppr < 1) + sppr = 1; + while (((sppr - 1) & ~0x7) != 0) { + sppr = (sppr + 1) >> 1; /* add '1' to force rounding up */ + spr++; + } + sppr--; /* sppr quantity in register is offset by 1 */ + if (spr > 7) { + /* Don't overrun limits of SPI baudrate register */ + spr = 7; + sppr = 7; + } + out_8(ms->regs + SPI_BRR, sppr << 4 | spr); /* Set speed */ + + ms->cs_change = 1; + ms->transfer = container_of(ms->message->transfers.next, + struct spi_transfer, transfer_list); + + mpc52xx_spi_start_transfer(ms); + ms->state = mpc52xx_spi_fsmstate_transfer; + + return FSM_CONTINUE; +} + +/* + * TRANSFER state + * + * In the middle of a transfer. If the SPI core has completed processing + * a byte, then read out the received data and write out the next byte + * (unless this transfer is finished; in which case go on to the wait + * state) + */ +static int mpc52xx_spi_fsmstate_transfer(int irq, struct mpc52xx_spi *ms, + u8 status, u8 data) +{ + if (!status) + return ms->irq0 ? FSM_STOP : FSM_POLL; + + if (status & SPI_STATUS_WCOL) { + /* The SPI controller is stoopid. At slower speeds, it may + * raise the SPIF flag before the state machine is actually + * finished, which causes a collision (internal to the state + * machine only). The manual recommends inserting a delay + * between receiving the interrupt and sending the next byte, + * but it can also be worked around simply by retrying the + * transfer which is what we do here. */ + ms->wcol_count++; + ms->wcol_ticks += get_tbl() - ms->wcol_tx_timestamp; + ms->wcol_tx_timestamp = get_tbl(); + data = 0; + if (ms->tx_buf) + data = *(ms->tx_buf - 1); + out_8(ms->regs + SPI_DATA, data); /* try again */ + return FSM_CONTINUE; + } else if (status & SPI_STATUS_MODF) { + ms->modf_count++; + dev_err(&ms->master->dev, "mode fault\n"); + mpc52xx_spi_chipsel(ms, 0); + ms->message->status = -EIO; + ms->message->complete(ms->message->context); + ms->state = mpc52xx_spi_fsmstate_idle; + return FSM_CONTINUE; + } + + /* Read data out of the spi device */ + ms->byte_count++; + if (ms->rx_buf) + *ms->rx_buf++ = data; + + /* Is the transfer complete? */ + ms->len--; + if (ms->len == 0) { + ms->timestamp = get_tbl(); + ms->timestamp += ms->transfer->delay_usecs * tb_ticks_per_usec; + ms->state = mpc52xx_spi_fsmstate_wait; + return FSM_CONTINUE; + } + + /* Write out the next byte */ + ms->wcol_tx_timestamp = get_tbl(); + if (ms->tx_buf) + out_8(ms->regs + SPI_DATA, *ms->tx_buf++); + else + out_8(ms->regs + SPI_DATA, 0); + + return FSM_CONTINUE; +} + +/* + * WAIT state + * + * A transfer has completed; need to wait for the delay period to complete + * before starting the next transfer + */ +static int +mpc52xx_spi_fsmstate_wait(int irq, struct mpc52xx_spi *ms, u8 status, u8 data) +{ + if (status && irq) + dev_err(&ms->master->dev, "spurious irq, status=0x%.2x\n", + status); + + if (((int)get_tbl()) - ms->timestamp < 0) + return FSM_POLL; + + ms->message->actual_length += ms->transfer->len; + + /* Check if there is another transfer in this message. If there + * aren't then deactivate CS, notify sender, and drop back to idle + * to start the next message. */ + if (ms->transfer->transfer_list.next == &ms->message->transfers) { + ms->msg_count++; + mpc52xx_spi_chipsel(ms, 0); + ms->message->status = 0; + ms->message->complete(ms->message->context); + ms->state = mpc52xx_spi_fsmstate_idle; + return FSM_CONTINUE; + } + + /* There is another transfer; kick it off */ + + if (ms->cs_change) + mpc52xx_spi_chipsel(ms, 0); + + ms->transfer = container_of(ms->transfer->transfer_list.next, + struct spi_transfer, transfer_list); + mpc52xx_spi_start_transfer(ms); + ms->state = mpc52xx_spi_fsmstate_transfer; + return FSM_CONTINUE; +} + +/** + * mpc52xx_spi_fsm_process - Finite State Machine iteration function + * @irq: irq number that triggered the FSM or 0 for polling + * @ms: pointer to mpc52xx_spi driver data + */ +static void mpc52xx_spi_fsm_process(int irq, struct mpc52xx_spi *ms) +{ + int rc = FSM_CONTINUE; + u8 status, data; + + while (rc == FSM_CONTINUE) { + /* Interrupt cleared by read of STATUS followed by + * read of DATA registers */ + status = in_8(ms->regs + SPI_STATUS); + data = in_8(ms->regs + SPI_DATA); + rc = ms->state(irq, ms, status, data); + } + + if (rc == FSM_POLL) + schedule_work(&ms->work); +} + +/** + * mpc52xx_spi_irq - IRQ handler + */ +static irqreturn_t mpc52xx_spi_irq(int irq, void *_ms) +{ + struct mpc52xx_spi *ms = _ms; + spin_lock(&ms->lock); + mpc52xx_spi_fsm_process(irq, ms); + spin_unlock(&ms->lock); + return IRQ_HANDLED; +} + +/** + * mpc52xx_spi_wq - Workqueue function for polling the state machine + */ +static void mpc52xx_spi_wq(struct work_struct *work) +{ + struct mpc52xx_spi *ms = container_of(work, struct mpc52xx_spi, work); + unsigned long flags; + + spin_lock_irqsave(&ms->lock, flags); + mpc52xx_spi_fsm_process(0, ms); + spin_unlock_irqrestore(&ms->lock, flags); +} + +/* + * spi_master ops + */ + +static int mpc52xx_spi_setup(struct spi_device *spi) +{ + if (spi->bits_per_word % 8) + return -EINVAL; + + if (spi->mode & ~(SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST)) + return -EINVAL; + + if (spi->chip_select >= spi->master->num_chipselect) + return -EINVAL; + + return 0; +} + +static int mpc52xx_spi_transfer(struct spi_device *spi, struct spi_message *m) +{ + struct mpc52xx_spi *ms = spi_master_get_devdata(spi->master); + unsigned long flags; + + m->actual_length = 0; + m->status = -EINPROGRESS; + + spin_lock_irqsave(&ms->lock, flags); + list_add_tail(&m->queue, &ms->queue); + spin_unlock_irqrestore(&ms->lock, flags); + schedule_work(&ms->work); + + return 0; +} + +/* + * OF Platform Bus Binding + */ +static int __devinit mpc52xx_spi_probe(struct platform_device *op) +{ + struct spi_master *master; + struct mpc52xx_spi *ms; + void __iomem *regs; + u8 ctrl1; + int rc, i = 0; + int gpio_cs; + + /* MMIO registers */ + dev_dbg(&op->dev, "probing mpc5200 SPI device\n"); + regs = of_iomap(op->dev.of_node, 0); + if (!regs) + return -ENODEV; + + /* initialize the device */ + ctrl1 = SPI_CTRL1_SPIE | SPI_CTRL1_SPE | SPI_CTRL1_MSTR; + out_8(regs + SPI_CTRL1, ctrl1); + out_8(regs + SPI_CTRL2, 0x0); + out_8(regs + SPI_DATADIR, 0xe); /* Set output pins */ + out_8(regs + SPI_PORTDATA, 0x8); /* Deassert /SS signal */ + + /* Clear the status register and re-read it to check for a MODF + * failure. This driver cannot currently handle multiple masters + * on the SPI bus. This fault will also occur if the SPI signals + * are not connected to any pins (port_config setting) */ + in_8(regs + SPI_STATUS); + out_8(regs + SPI_CTRL1, ctrl1); + + in_8(regs + SPI_DATA); + if (in_8(regs + SPI_STATUS) & SPI_STATUS_MODF) { + dev_err(&op->dev, "mode fault; is port_config correct?\n"); + rc = -EIO; + goto err_init; + } + + dev_dbg(&op->dev, "allocating spi_master struct\n"); + master = spi_alloc_master(&op->dev, sizeof *ms); + if (!master) { + rc = -ENOMEM; + goto err_alloc; + } + + master->bus_num = -1; + master->setup = mpc52xx_spi_setup; + master->transfer = mpc52xx_spi_transfer; + master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST; + master->dev.of_node = op->dev.of_node; + + dev_set_drvdata(&op->dev, master); + + ms = spi_master_get_devdata(master); + ms->master = master; + ms->regs = regs; + ms->irq0 = irq_of_parse_and_map(op->dev.of_node, 0); + ms->irq1 = irq_of_parse_and_map(op->dev.of_node, 1); + ms->state = mpc52xx_spi_fsmstate_idle; + ms->ipb_freq = mpc5xxx_get_bus_frequency(op->dev.of_node); + ms->gpio_cs_count = of_gpio_count(op->dev.of_node); + if (ms->gpio_cs_count > 0) { + master->num_chipselect = ms->gpio_cs_count; + ms->gpio_cs = kmalloc(ms->gpio_cs_count * sizeof(unsigned int), + GFP_KERNEL); + if (!ms->gpio_cs) { + rc = -ENOMEM; + goto err_alloc; + } + + for (i = 0; i < ms->gpio_cs_count; i++) { + gpio_cs = of_get_gpio(op->dev.of_node, i); + if (gpio_cs < 0) { + dev_err(&op->dev, + "could not parse the gpio field " + "in oftree\n"); + rc = -ENODEV; + goto err_gpio; + } + + rc = gpio_request(gpio_cs, dev_name(&op->dev)); + if (rc) { + dev_err(&op->dev, + "can't request spi cs gpio #%d " + "on gpio line %d\n", i, gpio_cs); + goto err_gpio; + } + + gpio_direction_output(gpio_cs, 1); + ms->gpio_cs[i] = gpio_cs; + } + } else { + master->num_chipselect = 1; + } + + spin_lock_init(&ms->lock); + INIT_LIST_HEAD(&ms->queue); + INIT_WORK(&ms->work, mpc52xx_spi_wq); + + /* Decide if interrupts can be used */ + if (ms->irq0 && ms->irq1) { + rc = request_irq(ms->irq0, mpc52xx_spi_irq, 0, + "mpc5200-spi-modf", ms); + rc |= request_irq(ms->irq1, mpc52xx_spi_irq, 0, + "mpc5200-spi-spif", ms); + if (rc) { + free_irq(ms->irq0, ms); + free_irq(ms->irq1, ms); + ms->irq0 = ms->irq1 = 0; + } + } else { + /* operate in polled mode */ + ms->irq0 = ms->irq1 = 0; + } + + if (!ms->irq0) + dev_info(&op->dev, "using polled mode\n"); + + dev_dbg(&op->dev, "registering spi_master struct\n"); + rc = spi_register_master(master); + if (rc) + goto err_register; + + dev_info(&ms->master->dev, "registered MPC5200 SPI bus\n"); + + return rc; + + err_register: + dev_err(&ms->master->dev, "initialization failed\n"); + spi_master_put(master); + err_gpio: + while (i-- > 0) + gpio_free(ms->gpio_cs[i]); + + kfree(ms->gpio_cs); + err_alloc: + err_init: + iounmap(regs); + return rc; +} + +static int __devexit mpc52xx_spi_remove(struct platform_device *op) +{ + struct spi_master *master = dev_get_drvdata(&op->dev); + struct mpc52xx_spi *ms = spi_master_get_devdata(master); + int i; + + free_irq(ms->irq0, ms); + free_irq(ms->irq1, ms); + + for (i = 0; i < ms->gpio_cs_count; i++) + gpio_free(ms->gpio_cs[i]); + + kfree(ms->gpio_cs); + spi_unregister_master(master); + spi_master_put(master); + iounmap(ms->regs); + + return 0; +} + +static const struct of_device_id mpc52xx_spi_match[] __devinitconst = { + { .compatible = "fsl,mpc5200-spi", }, + {} +}; +MODULE_DEVICE_TABLE(of, mpc52xx_spi_match); + +static struct platform_driver mpc52xx_spi_of_driver = { + .driver = { + .name = "mpc52xx-spi", + .owner = THIS_MODULE, + .of_match_table = mpc52xx_spi_match, + }, + .probe = mpc52xx_spi_probe, + .remove = __devexit_p(mpc52xx_spi_remove), +}; + +static int __init mpc52xx_spi_init(void) +{ + return platform_driver_register(&mpc52xx_spi_of_driver); +} +module_init(mpc52xx_spi_init); + +static void __exit mpc52xx_spi_exit(void) +{ + platform_driver_unregister(&mpc52xx_spi_of_driver); +} +module_exit(mpc52xx_spi_exit); + diff --git a/drivers/spi/spi-nuc900.c b/drivers/spi/spi-nuc900.c new file mode 100644 index 0000000..c0a6ce8 --- /dev/null +++ b/drivers/spi/spi-nuc900.c @@ -0,0 +1,504 @@ +/* + * Copyright (c) 2009 Nuvoton technology. + * Wan ZongShun + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +/* usi registers offset */ +#define USI_CNT 0x00 +#define USI_DIV 0x04 +#define USI_SSR 0x08 +#define USI_RX0 0x10 +#define USI_TX0 0x10 + +/* usi register bit */ +#define ENINT (0x01 << 17) +#define ENFLG (0x01 << 16) +#define TXNUM (0x03 << 8) +#define TXNEG (0x01 << 2) +#define RXNEG (0x01 << 1) +#define LSB (0x01 << 10) +#define SELECTLEV (0x01 << 2) +#define SELECTPOL (0x01 << 31) +#define SELECTSLAVE 0x01 +#define GOBUSY 0x01 + +struct nuc900_spi { + struct spi_bitbang bitbang; + struct completion done; + void __iomem *regs; + int irq; + int len; + int count; + const unsigned char *tx; + unsigned char *rx; + struct clk *clk; + struct resource *ioarea; + struct spi_master *master; + struct spi_device *curdev; + struct device *dev; + struct nuc900_spi_info *pdata; + spinlock_t lock; + struct resource *res; +}; + +static inline struct nuc900_spi *to_hw(struct spi_device *sdev) +{ + return spi_master_get_devdata(sdev->master); +} + +static void nuc900_slave_select(struct spi_device *spi, unsigned int ssr) +{ + struct nuc900_spi *hw = to_hw(spi); + unsigned int val; + unsigned int cs = spi->mode & SPI_CS_HIGH ? 1 : 0; + unsigned int cpol = spi->mode & SPI_CPOL ? 1 : 0; + unsigned long flags; + + spin_lock_irqsave(&hw->lock, flags); + + val = __raw_readl(hw->regs + USI_SSR); + + if (!cs) + val &= ~SELECTLEV; + else + val |= SELECTLEV; + + if (!ssr) + val &= ~SELECTSLAVE; + else + val |= SELECTSLAVE; + + __raw_writel(val, hw->regs + USI_SSR); + + val = __raw_readl(hw->regs + USI_CNT); + + if (!cpol) + val &= ~SELECTPOL; + else + val |= SELECTPOL; + + __raw_writel(val, hw->regs + USI_CNT); + + spin_unlock_irqrestore(&hw->lock, flags); +} + +static void nuc900_spi_chipsel(struct spi_device *spi, int value) +{ + switch (value) { + case BITBANG_CS_INACTIVE: + nuc900_slave_select(spi, 0); + break; + + case BITBANG_CS_ACTIVE: + nuc900_slave_select(spi, 1); + break; + } +} + +static void nuc900_spi_setup_txnum(struct nuc900_spi *hw, + unsigned int txnum) +{ + unsigned int val; + unsigned long flags; + + spin_lock_irqsave(&hw->lock, flags); + + val = __raw_readl(hw->regs + USI_CNT); + + if (!txnum) + val &= ~TXNUM; + else + val |= txnum << 0x08; + + __raw_writel(val, hw->regs + USI_CNT); + + spin_unlock_irqrestore(&hw->lock, flags); + +} + +static void nuc900_spi_setup_txbitlen(struct nuc900_spi *hw, + unsigned int txbitlen) +{ + unsigned int val; + unsigned long flags; + + spin_lock_irqsave(&hw->lock, flags); + + val = __raw_readl(hw->regs + USI_CNT); + + val |= (txbitlen << 0x03); + + __raw_writel(val, hw->regs + USI_CNT); + + spin_unlock_irqrestore(&hw->lock, flags); +} + +static void nuc900_spi_gobusy(struct nuc900_spi *hw) +{ + unsigned int val; + unsigned long flags; + + spin_lock_irqsave(&hw->lock, flags); + + val = __raw_readl(hw->regs + USI_CNT); + + val |= GOBUSY; + + __raw_writel(val, hw->regs + USI_CNT); + + spin_unlock_irqrestore(&hw->lock, flags); +} + +static int nuc900_spi_setupxfer(struct spi_device *spi, + struct spi_transfer *t) +{ + return 0; +} + +static int nuc900_spi_setup(struct spi_device *spi) +{ + return 0; +} + +static inline unsigned int hw_txbyte(struct nuc900_spi *hw, int count) +{ + return hw->tx ? hw->tx[count] : 0; +} + +static int nuc900_spi_txrx(struct spi_device *spi, struct spi_transfer *t) +{ + struct nuc900_spi *hw = to_hw(spi); + + hw->tx = t->tx_buf; + hw->rx = t->rx_buf; + hw->len = t->len; + hw->count = 0; + + __raw_writel(hw_txbyte(hw, 0x0), hw->regs + USI_TX0); + + nuc900_spi_gobusy(hw); + + wait_for_completion(&hw->done); + + return hw->count; +} + +static irqreturn_t nuc900_spi_irq(int irq, void *dev) +{ + struct nuc900_spi *hw = dev; + unsigned int status; + unsigned int count = hw->count; + + status = __raw_readl(hw->regs + USI_CNT); + __raw_writel(status, hw->regs + USI_CNT); + + if (status & ENFLG) { + hw->count++; + + if (hw->rx) + hw->rx[count] = __raw_readl(hw->regs + USI_RX0); + count++; + + if (count < hw->len) { + __raw_writel(hw_txbyte(hw, count), hw->regs + USI_TX0); + nuc900_spi_gobusy(hw); + } else { + complete(&hw->done); + } + + return IRQ_HANDLED; + } + + complete(&hw->done); + return IRQ_HANDLED; +} + +static void nuc900_tx_edge(struct nuc900_spi *hw, unsigned int edge) +{ + unsigned int val; + unsigned long flags; + + spin_lock_irqsave(&hw->lock, flags); + + val = __raw_readl(hw->regs + USI_CNT); + + if (edge) + val |= TXNEG; + else + val &= ~TXNEG; + __raw_writel(val, hw->regs + USI_CNT); + + spin_unlock_irqrestore(&hw->lock, flags); +} + +static void nuc900_rx_edge(struct nuc900_spi *hw, unsigned int edge) +{ + unsigned int val; + unsigned long flags; + + spin_lock_irqsave(&hw->lock, flags); + + val = __raw_readl(hw->regs + USI_CNT); + + if (edge) + val |= RXNEG; + else + val &= ~RXNEG; + __raw_writel(val, hw->regs + USI_CNT); + + spin_unlock_irqrestore(&hw->lock, flags); +} + +static void nuc900_send_first(struct nuc900_spi *hw, unsigned int lsb) +{ + unsigned int val; + unsigned long flags; + + spin_lock_irqsave(&hw->lock, flags); + + val = __raw_readl(hw->regs + USI_CNT); + + if (lsb) + val |= LSB; + else + val &= ~LSB; + __raw_writel(val, hw->regs + USI_CNT); + + spin_unlock_irqrestore(&hw->lock, flags); +} + +static void nuc900_set_sleep(struct nuc900_spi *hw, unsigned int sleep) +{ + unsigned int val; + unsigned long flags; + + spin_lock_irqsave(&hw->lock, flags); + + val = __raw_readl(hw->regs + USI_CNT); + + if (sleep) + val |= (sleep << 12); + else + val &= ~(0x0f << 12); + __raw_writel(val, hw->regs + USI_CNT); + + spin_unlock_irqrestore(&hw->lock, flags); +} + +static void nuc900_enable_int(struct nuc900_spi *hw) +{ + unsigned int val; + unsigned long flags; + + spin_lock_irqsave(&hw->lock, flags); + + val = __raw_readl(hw->regs + USI_CNT); + + val |= ENINT; + + __raw_writel(val, hw->regs + USI_CNT); + + spin_unlock_irqrestore(&hw->lock, flags); +} + +static void nuc900_set_divider(struct nuc900_spi *hw) +{ + __raw_writel(hw->pdata->divider, hw->regs + USI_DIV); +} + +static void nuc900_init_spi(struct nuc900_spi *hw) +{ + clk_enable(hw->clk); + spin_lock_init(&hw->lock); + + nuc900_tx_edge(hw, hw->pdata->txneg); + nuc900_rx_edge(hw, hw->pdata->rxneg); + nuc900_send_first(hw, hw->pdata->lsb); + nuc900_set_sleep(hw, hw->pdata->sleep); + nuc900_spi_setup_txbitlen(hw, hw->pdata->txbitlen); + nuc900_spi_setup_txnum(hw, hw->pdata->txnum); + nuc900_set_divider(hw); + nuc900_enable_int(hw); +} + +static int __devinit nuc900_spi_probe(struct platform_device *pdev) +{ + struct nuc900_spi *hw; + struct spi_master *master; + int err = 0; + + master = spi_alloc_master(&pdev->dev, sizeof(struct nuc900_spi)); + if (master == NULL) { + dev_err(&pdev->dev, "No memory for spi_master\n"); + err = -ENOMEM; + goto err_nomem; + } + + hw = spi_master_get_devdata(master); + memset(hw, 0, sizeof(struct nuc900_spi)); + + hw->master = spi_master_get(master); + hw->pdata = pdev->dev.platform_data; + hw->dev = &pdev->dev; + + if (hw->pdata == NULL) { + dev_err(&pdev->dev, "No platform data supplied\n"); + err = -ENOENT; + goto err_pdata; + } + + platform_set_drvdata(pdev, hw); + init_completion(&hw->done); + + master->mode_bits = SPI_MODE_0; + master->num_chipselect = hw->pdata->num_cs; + master->bus_num = hw->pdata->bus_num; + hw->bitbang.master = hw->master; + hw->bitbang.setup_transfer = nuc900_spi_setupxfer; + hw->bitbang.chipselect = nuc900_spi_chipsel; + hw->bitbang.txrx_bufs = nuc900_spi_txrx; + hw->bitbang.master->setup = nuc900_spi_setup; + + hw->res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (hw->res == NULL) { + dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n"); + err = -ENOENT; + goto err_pdata; + } + + hw->ioarea = request_mem_region(hw->res->start, + resource_size(hw->res), pdev->name); + + if (hw->ioarea == NULL) { + dev_err(&pdev->dev, "Cannot reserve region\n"); + err = -ENXIO; + goto err_pdata; + } + + hw->regs = ioremap(hw->res->start, resource_size(hw->res)); + if (hw->regs == NULL) { + dev_err(&pdev->dev, "Cannot map IO\n"); + err = -ENXIO; + goto err_iomap; + } + + hw->irq = platform_get_irq(pdev, 0); + if (hw->irq < 0) { + dev_err(&pdev->dev, "No IRQ specified\n"); + err = -ENOENT; + goto err_irq; + } + + err = request_irq(hw->irq, nuc900_spi_irq, 0, pdev->name, hw); + if (err) { + dev_err(&pdev->dev, "Cannot claim IRQ\n"); + goto err_irq; + } + + hw->clk = clk_get(&pdev->dev, "spi"); + if (IS_ERR(hw->clk)) { + dev_err(&pdev->dev, "No clock for device\n"); + err = PTR_ERR(hw->clk); + goto err_clk; + } + + mfp_set_groupg(&pdev->dev); + nuc900_init_spi(hw); + + err = spi_bitbang_start(&hw->bitbang); + if (err) { + dev_err(&pdev->dev, "Failed to register SPI master\n"); + goto err_register; + } + + return 0; + +err_register: + clk_disable(hw->clk); + clk_put(hw->clk); +err_clk: + free_irq(hw->irq, hw); +err_irq: + iounmap(hw->regs); +err_iomap: + release_mem_region(hw->res->start, resource_size(hw->res)); + kfree(hw->ioarea); +err_pdata: + spi_master_put(hw->master); + +err_nomem: + return err; +} + +static int __devexit nuc900_spi_remove(struct platform_device *dev) +{ + struct nuc900_spi *hw = platform_get_drvdata(dev); + + free_irq(hw->irq, hw); + + platform_set_drvdata(dev, NULL); + + spi_bitbang_stop(&hw->bitbang); + + clk_disable(hw->clk); + clk_put(hw->clk); + + iounmap(hw->regs); + + release_mem_region(hw->res->start, resource_size(hw->res)); + kfree(hw->ioarea); + + spi_master_put(hw->master); + return 0; +} + +static struct platform_driver nuc900_spi_driver = { + .probe = nuc900_spi_probe, + .remove = __devexit_p(nuc900_spi_remove), + .driver = { + .name = "nuc900-spi", + .owner = THIS_MODULE, + }, +}; + +static int __init nuc900_spi_init(void) +{ + return platform_driver_register(&nuc900_spi_driver); +} + +static void __exit nuc900_spi_exit(void) +{ + platform_driver_unregister(&nuc900_spi_driver); +} + +module_init(nuc900_spi_init); +module_exit(nuc900_spi_exit); + +MODULE_AUTHOR("Wan ZongShun "); +MODULE_DESCRIPTION("nuc900 spi driver!"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:nuc900-spi"); diff --git a/drivers/spi/spi-oc-tiny.c b/drivers/spi/spi-oc-tiny.c new file mode 100644 index 0000000..f1bde66 --- /dev/null +++ b/drivers/spi/spi-oc-tiny.c @@ -0,0 +1,425 @@ +/* + * OpenCores tiny SPI master driver + * + * http://opencores.org/project,tiny_spi + * + * Copyright (C) 2011 Thomas Chou + * + * Based on spi_s3c24xx.c, which is: + * Copyright (c) 2006 Ben Dooks + * Copyright (c) 2006 Simtec Electronics + * Ben Dooks + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRV_NAME "spi_oc_tiny" + +#define TINY_SPI_RXDATA 0 +#define TINY_SPI_TXDATA 4 +#define TINY_SPI_STATUS 8 +#define TINY_SPI_CONTROL 12 +#define TINY_SPI_BAUD 16 + +#define TINY_SPI_STATUS_TXE 0x1 +#define TINY_SPI_STATUS_TXR 0x2 + +struct tiny_spi { + /* bitbang has to be first */ + struct spi_bitbang bitbang; + struct completion done; + + void __iomem *base; + int irq; + unsigned int freq; + unsigned int baudwidth; + unsigned int baud; + unsigned int speed_hz; + unsigned int mode; + unsigned int len; + unsigned int txc, rxc; + const u8 *txp; + u8 *rxp; + unsigned int gpio_cs_count; + int *gpio_cs; +}; + +static inline struct tiny_spi *tiny_spi_to_hw(struct spi_device *sdev) +{ + return spi_master_get_devdata(sdev->master); +} + +static unsigned int tiny_spi_baud(struct spi_device *spi, unsigned int hz) +{ + struct tiny_spi *hw = tiny_spi_to_hw(spi); + + return min(DIV_ROUND_UP(hw->freq, hz * 2), (1U << hw->baudwidth)) - 1; +} + +static void tiny_spi_chipselect(struct spi_device *spi, int is_active) +{ + struct tiny_spi *hw = tiny_spi_to_hw(spi); + + if (hw->gpio_cs_count) { + gpio_set_value(hw->gpio_cs[spi->chip_select], + (spi->mode & SPI_CS_HIGH) ? is_active : !is_active); + } +} + +static int tiny_spi_setup_transfer(struct spi_device *spi, + struct spi_transfer *t) +{ + struct tiny_spi *hw = tiny_spi_to_hw(spi); + unsigned int baud = hw->baud; + + if (t) { + if (t->speed_hz && t->speed_hz != hw->speed_hz) + baud = tiny_spi_baud(spi, t->speed_hz); + } + writel(baud, hw->base + TINY_SPI_BAUD); + writel(hw->mode, hw->base + TINY_SPI_CONTROL); + return 0; +} + +static int tiny_spi_setup(struct spi_device *spi) +{ + struct tiny_spi *hw = tiny_spi_to_hw(spi); + + if (spi->max_speed_hz != hw->speed_hz) { + hw->speed_hz = spi->max_speed_hz; + hw->baud = tiny_spi_baud(spi, hw->speed_hz); + } + hw->mode = spi->mode & (SPI_CPOL | SPI_CPHA); + return 0; +} + +static inline void tiny_spi_wait_txr(struct tiny_spi *hw) +{ + while (!(readb(hw->base + TINY_SPI_STATUS) & + TINY_SPI_STATUS_TXR)) + cpu_relax(); +} + +static inline void tiny_spi_wait_txe(struct tiny_spi *hw) +{ + while (!(readb(hw->base + TINY_SPI_STATUS) & + TINY_SPI_STATUS_TXE)) + cpu_relax(); +} + +static int tiny_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t) +{ + struct tiny_spi *hw = tiny_spi_to_hw(spi); + const u8 *txp = t->tx_buf; + u8 *rxp = t->rx_buf; + unsigned int i; + + if (hw->irq >= 0) { + /* use intrrupt driven data transfer */ + hw->len = t->len; + hw->txp = t->tx_buf; + hw->rxp = t->rx_buf; + hw->txc = 0; + hw->rxc = 0; + + /* send the first byte */ + if (t->len > 1) { + writeb(hw->txp ? *hw->txp++ : 0, + hw->base + TINY_SPI_TXDATA); + hw->txc++; + writeb(hw->txp ? *hw->txp++ : 0, + hw->base + TINY_SPI_TXDATA); + hw->txc++; + writeb(TINY_SPI_STATUS_TXR, hw->base + TINY_SPI_STATUS); + } else { + writeb(hw->txp ? *hw->txp++ : 0, + hw->base + TINY_SPI_TXDATA); + hw->txc++; + writeb(TINY_SPI_STATUS_TXE, hw->base + TINY_SPI_STATUS); + } + + wait_for_completion(&hw->done); + } else if (txp && rxp) { + /* we need to tighten the transfer loop */ + writeb(*txp++, hw->base + TINY_SPI_TXDATA); + if (t->len > 1) { + writeb(*txp++, hw->base + TINY_SPI_TXDATA); + for (i = 2; i < t->len; i++) { + u8 rx, tx = *txp++; + tiny_spi_wait_txr(hw); + rx = readb(hw->base + TINY_SPI_TXDATA); + writeb(tx, hw->base + TINY_SPI_TXDATA); + *rxp++ = rx; + } + tiny_spi_wait_txr(hw); + *rxp++ = readb(hw->base + TINY_SPI_TXDATA); + } + tiny_spi_wait_txe(hw); + *rxp++ = readb(hw->base + TINY_SPI_RXDATA); + } else if (rxp) { + writeb(0, hw->base + TINY_SPI_TXDATA); + if (t->len > 1) { + writeb(0, + hw->base + TINY_SPI_TXDATA); + for (i = 2; i < t->len; i++) { + u8 rx; + tiny_spi_wait_txr(hw); + rx = readb(hw->base + TINY_SPI_TXDATA); + writeb(0, hw->base + TINY_SPI_TXDATA); + *rxp++ = rx; + } + tiny_spi_wait_txr(hw); + *rxp++ = readb(hw->base + TINY_SPI_TXDATA); + } + tiny_spi_wait_txe(hw); + *rxp++ = readb(hw->base + TINY_SPI_RXDATA); + } else if (txp) { + writeb(*txp++, hw->base + TINY_SPI_TXDATA); + if (t->len > 1) { + writeb(*txp++, hw->base + TINY_SPI_TXDATA); + for (i = 2; i < t->len; i++) { + u8 tx = *txp++; + tiny_spi_wait_txr(hw); + writeb(tx, hw->base + TINY_SPI_TXDATA); + } + } + tiny_spi_wait_txe(hw); + } else { + writeb(0, hw->base + TINY_SPI_TXDATA); + if (t->len > 1) { + writeb(0, hw->base + TINY_SPI_TXDATA); + for (i = 2; i < t->len; i++) { + tiny_spi_wait_txr(hw); + writeb(0, hw->base + TINY_SPI_TXDATA); + } + } + tiny_spi_wait_txe(hw); + } + return t->len; +} + +static irqreturn_t tiny_spi_irq(int irq, void *dev) +{ + struct tiny_spi *hw = dev; + + writeb(0, hw->base + TINY_SPI_STATUS); + if (hw->rxc + 1 == hw->len) { + if (hw->rxp) + *hw->rxp++ = readb(hw->base + TINY_SPI_RXDATA); + hw->rxc++; + complete(&hw->done); + } else { + if (hw->rxp) + *hw->rxp++ = readb(hw->base + TINY_SPI_TXDATA); + hw->rxc++; + if (hw->txc < hw->len) { + writeb(hw->txp ? *hw->txp++ : 0, + hw->base + TINY_SPI_TXDATA); + hw->txc++; + writeb(TINY_SPI_STATUS_TXR, + hw->base + TINY_SPI_STATUS); + } else { + writeb(TINY_SPI_STATUS_TXE, + hw->base + TINY_SPI_STATUS); + } + } + return IRQ_HANDLED; +} + +#ifdef CONFIG_OF +#include + +static int __devinit tiny_spi_of_probe(struct platform_device *pdev) +{ + struct tiny_spi *hw = platform_get_drvdata(pdev); + struct device_node *np = pdev->dev.of_node; + unsigned int i; + const __be32 *val; + int len; + + if (!np) + return 0; + hw->gpio_cs_count = of_gpio_count(np); + if (hw->gpio_cs_count) { + hw->gpio_cs = devm_kzalloc(&pdev->dev, + hw->gpio_cs_count * sizeof(unsigned int), + GFP_KERNEL); + if (!hw->gpio_cs) + return -ENOMEM; + } + for (i = 0; i < hw->gpio_cs_count; i++) { + hw->gpio_cs[i] = of_get_gpio_flags(np, i, NULL); + if (hw->gpio_cs[i] < 0) + return -ENODEV; + } + hw->bitbang.master->dev.of_node = pdev->dev.of_node; + val = of_get_property(pdev->dev.of_node, + "clock-frequency", &len); + if (val && len >= sizeof(__be32)) + hw->freq = be32_to_cpup(val); + val = of_get_property(pdev->dev.of_node, "baud-width", &len); + if (val && len >= sizeof(__be32)) + hw->baudwidth = be32_to_cpup(val); + return 0; +} +#else /* !CONFIG_OF */ +static int __devinit tiny_spi_of_probe(struct platform_device *pdev) +{ + return 0; +} +#endif /* CONFIG_OF */ + +static int __devinit tiny_spi_probe(struct platform_device *pdev) +{ + struct tiny_spi_platform_data *platp = pdev->dev.platform_data; + struct tiny_spi *hw; + struct spi_master *master; + struct resource *res; + unsigned int i; + int err = -ENODEV; + + master = spi_alloc_master(&pdev->dev, sizeof(struct tiny_spi)); + if (!master) + return err; + + /* setup the master state. */ + master->bus_num = pdev->id; + master->num_chipselect = 255; + master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; + master->setup = tiny_spi_setup; + + hw = spi_master_get_devdata(master); + platform_set_drvdata(pdev, hw); + + /* setup the state for the bitbang driver */ + hw->bitbang.master = spi_master_get(master); + if (!hw->bitbang.master) + return err; + hw->bitbang.setup_transfer = tiny_spi_setup_transfer; + hw->bitbang.chipselect = tiny_spi_chipselect; + hw->bitbang.txrx_bufs = tiny_spi_txrx_bufs; + + /* find and map our resources */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + goto exit_busy; + if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res), + pdev->name)) + goto exit_busy; + hw->base = devm_ioremap_nocache(&pdev->dev, res->start, + resource_size(res)); + if (!hw->base) + goto exit_busy; + /* irq is optional */ + hw->irq = platform_get_irq(pdev, 0); + if (hw->irq >= 0) { + init_completion(&hw->done); + err = devm_request_irq(&pdev->dev, hw->irq, tiny_spi_irq, 0, + pdev->name, hw); + if (err) + goto exit; + } + /* find platform data */ + if (platp) { + hw->gpio_cs_count = platp->gpio_cs_count; + hw->gpio_cs = platp->gpio_cs; + if (platp->gpio_cs_count && !platp->gpio_cs) + goto exit_busy; + hw->freq = platp->freq; + hw->baudwidth = platp->baudwidth; + } else { + err = tiny_spi_of_probe(pdev); + if (err) + goto exit; + } + for (i = 0; i < hw->gpio_cs_count; i++) { + err = gpio_request(hw->gpio_cs[i], dev_name(&pdev->dev)); + if (err) + goto exit_gpio; + gpio_direction_output(hw->gpio_cs[i], 1); + } + hw->bitbang.master->num_chipselect = max(1U, hw->gpio_cs_count); + + /* register our spi controller */ + err = spi_bitbang_start(&hw->bitbang); + if (err) + goto exit; + dev_info(&pdev->dev, "base %p, irq %d\n", hw->base, hw->irq); + + return 0; + +exit_gpio: + while (i-- > 0) + gpio_free(hw->gpio_cs[i]); +exit_busy: + err = -EBUSY; +exit: + platform_set_drvdata(pdev, NULL); + spi_master_put(master); + return err; +} + +static int __devexit tiny_spi_remove(struct platform_device *pdev) +{ + struct tiny_spi *hw = platform_get_drvdata(pdev); + struct spi_master *master = hw->bitbang.master; + unsigned int i; + + spi_bitbang_stop(&hw->bitbang); + for (i = 0; i < hw->gpio_cs_count; i++) + gpio_free(hw->gpio_cs[i]); + platform_set_drvdata(pdev, NULL); + spi_master_put(master); + return 0; +} + +#ifdef CONFIG_OF +static const struct of_device_id tiny_spi_match[] = { + { .compatible = "opencores,tiny-spi-rtlsvn2", }, + {}, +}; +MODULE_DEVICE_TABLE(of, tiny_spi_match); +#else /* CONFIG_OF */ +#define tiny_spi_match NULL +#endif /* CONFIG_OF */ + +static struct platform_driver tiny_spi_driver = { + .probe = tiny_spi_probe, + .remove = __devexit_p(tiny_spi_remove), + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + .pm = NULL, + .of_match_table = tiny_spi_match, + }, +}; + +static int __init tiny_spi_init(void) +{ + return platform_driver_register(&tiny_spi_driver); +} +module_init(tiny_spi_init); + +static void __exit tiny_spi_exit(void) +{ + platform_driver_unregister(&tiny_spi_driver); +} +module_exit(tiny_spi_exit); + +MODULE_DESCRIPTION("OpenCores tiny SPI driver"); +MODULE_AUTHOR("Thomas Chou "); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:" DRV_NAME); diff --git a/drivers/spi/spi-omap-100k.c b/drivers/spi/spi-omap-100k.c new file mode 100644 index 0000000..9bd1c92 --- /dev/null +++ b/drivers/spi/spi-omap-100k.c @@ -0,0 +1,637 @@ +/* + * OMAP7xx SPI 100k controller driver + * Author: Fabrice Crohas + * from original omap1_mcspi driver + * + * Copyright (C) 2005, 2006 Nokia Corporation + * Author: Samuel Ortiz and + * Juha Yrj�l� + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +#define OMAP1_SPI100K_MAX_FREQ 48000000 + +#define ICR_SPITAS (OMAP7XX_ICR_BASE + 0x12) + +#define SPI_SETUP1 0x00 +#define SPI_SETUP2 0x02 +#define SPI_CTRL 0x04 +#define SPI_STATUS 0x06 +#define SPI_TX_LSB 0x08 +#define SPI_TX_MSB 0x0a +#define SPI_RX_LSB 0x0c +#define SPI_RX_MSB 0x0e + +#define SPI_SETUP1_INT_READ_ENABLE (1UL << 5) +#define SPI_SETUP1_INT_WRITE_ENABLE (1UL << 4) +#define SPI_SETUP1_CLOCK_DIVISOR(x) ((x) << 1) +#define SPI_SETUP1_CLOCK_ENABLE (1UL << 0) + +#define SPI_SETUP2_ACTIVE_EDGE_FALLING (0UL << 0) +#define SPI_SETUP2_ACTIVE_EDGE_RISING (1UL << 0) +#define SPI_SETUP2_NEGATIVE_LEVEL (0UL << 5) +#define SPI_SETUP2_POSITIVE_LEVEL (1UL << 5) +#define SPI_SETUP2_LEVEL_TRIGGER (0UL << 10) +#define SPI_SETUP2_EDGE_TRIGGER (1UL << 10) + +#define SPI_CTRL_SEN(x) ((x) << 7) +#define SPI_CTRL_WORD_SIZE(x) (((x) - 1) << 2) +#define SPI_CTRL_WR (1UL << 1) +#define SPI_CTRL_RD (1UL << 0) + +#define SPI_STATUS_WE (1UL << 1) +#define SPI_STATUS_RD (1UL << 0) + +#define WRITE 0 +#define READ 1 + + +/* use PIO for small transfers, avoiding DMA setup/teardown overhead and + * cache operations; better heuristics consider wordsize and bitrate. + */ +#define DMA_MIN_BYTES 8 + +#define SPI_RUNNING 0 +#define SPI_SHUTDOWN 1 + +struct omap1_spi100k { + struct work_struct work; + + /* lock protects queue and registers */ + spinlock_t lock; + struct list_head msg_queue; + struct spi_master *master; + struct clk *ick; + struct clk *fck; + + /* Virtual base address of the controller */ + void __iomem *base; + + /* State of the SPI */ + unsigned int state; +}; + +struct omap1_spi100k_cs { + void __iomem *base; + int word_len; +}; + +static struct workqueue_struct *omap1_spi100k_wq; + +#define MOD_REG_BIT(val, mask, set) do { \ + if (set) \ + val |= mask; \ + else \ + val &= ~mask; \ +} while (0) + +static void spi100k_enable_clock(struct spi_master *master) +{ + unsigned int val; + struct omap1_spi100k *spi100k = spi_master_get_devdata(master); + + /* enable SPI */ + val = readw(spi100k->base + SPI_SETUP1); + val |= SPI_SETUP1_CLOCK_ENABLE; + writew(val, spi100k->base + SPI_SETUP1); +} + +static void spi100k_disable_clock(struct spi_master *master) +{ + unsigned int val; + struct omap1_spi100k *spi100k = spi_master_get_devdata(master); + + /* disable SPI */ + val = readw(spi100k->base + SPI_SETUP1); + val &= ~SPI_SETUP1_CLOCK_ENABLE; + writew(val, spi100k->base + SPI_SETUP1); +} + +static void spi100k_write_data(struct spi_master *master, int len, int data) +{ + struct omap1_spi100k *spi100k = spi_master_get_devdata(master); + + /* write 16-bit word, shifting 8-bit data if necessary */ + if (len <= 8) { + data <<= 8; + len = 16; + } + + spi100k_enable_clock(master); + writew( data , spi100k->base + SPI_TX_MSB); + + writew(SPI_CTRL_SEN(0) | + SPI_CTRL_WORD_SIZE(len) | + SPI_CTRL_WR, + spi100k->base + SPI_CTRL); + + /* Wait for bit ack send change */ + while((readw(spi100k->base + SPI_STATUS) & SPI_STATUS_WE) != SPI_STATUS_WE); + udelay(1000); + + spi100k_disable_clock(master); +} + +static int spi100k_read_data(struct spi_master *master, int len) +{ + int dataH,dataL; + struct omap1_spi100k *spi100k = spi_master_get_devdata(master); + + /* Always do at least 16 bits */ + if (len <= 8) + len = 16; + + spi100k_enable_clock(master); + writew(SPI_CTRL_SEN(0) | + SPI_CTRL_WORD_SIZE(len) | + SPI_CTRL_RD, + spi100k->base + SPI_CTRL); + + while((readw(spi100k->base + SPI_STATUS) & SPI_STATUS_RD) != SPI_STATUS_RD); + udelay(1000); + + dataL = readw(spi100k->base + SPI_RX_LSB); + dataH = readw(spi100k->base + SPI_RX_MSB); + spi100k_disable_clock(master); + + return dataL; +} + +static void spi100k_open(struct spi_master *master) +{ + /* get control of SPI */ + struct omap1_spi100k *spi100k = spi_master_get_devdata(master); + + writew(SPI_SETUP1_INT_READ_ENABLE | + SPI_SETUP1_INT_WRITE_ENABLE | + SPI_SETUP1_CLOCK_DIVISOR(0), spi100k->base + SPI_SETUP1); + + /* configure clock and interrupts */ + writew(SPI_SETUP2_ACTIVE_EDGE_FALLING | + SPI_SETUP2_NEGATIVE_LEVEL | + SPI_SETUP2_LEVEL_TRIGGER, spi100k->base + SPI_SETUP2); +} + +static void omap1_spi100k_force_cs(struct omap1_spi100k *spi100k, int enable) +{ + if (enable) + writew(0x05fc, spi100k->base + SPI_CTRL); + else + writew(0x05fd, spi100k->base + SPI_CTRL); +} + +static unsigned +omap1_spi100k_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) +{ + struct omap1_spi100k *spi100k; + struct omap1_spi100k_cs *cs = spi->controller_state; + unsigned int count, c; + int word_len; + + spi100k = spi_master_get_devdata(spi->master); + count = xfer->len; + c = count; + word_len = cs->word_len; + + if (word_len <= 8) { + u8 *rx; + const u8 *tx; + + rx = xfer->rx_buf; + tx = xfer->tx_buf; + do { + c-=1; + if (xfer->tx_buf != NULL) + spi100k_write_data(spi->master, word_len, *tx++); + if (xfer->rx_buf != NULL) + *rx++ = spi100k_read_data(spi->master, word_len); + } while(c); + } else if (word_len <= 16) { + u16 *rx; + const u16 *tx; + + rx = xfer->rx_buf; + tx = xfer->tx_buf; + do { + c-=2; + if (xfer->tx_buf != NULL) + spi100k_write_data(spi->master,word_len, *tx++); + if (xfer->rx_buf != NULL) + *rx++ = spi100k_read_data(spi->master,word_len); + } while(c); + } else if (word_len <= 32) { + u32 *rx; + const u32 *tx; + + rx = xfer->rx_buf; + tx = xfer->tx_buf; + do { + c-=4; + if (xfer->tx_buf != NULL) + spi100k_write_data(spi->master,word_len, *tx); + if (xfer->rx_buf != NULL) + *rx = spi100k_read_data(spi->master,word_len); + } while(c); + } + return count - c; +} + +/* called only when no transfer is active to this device */ +static int omap1_spi100k_setup_transfer(struct spi_device *spi, + struct spi_transfer *t) +{ + struct omap1_spi100k *spi100k = spi_master_get_devdata(spi->master); + struct omap1_spi100k_cs *cs = spi->controller_state; + u8 word_len = spi->bits_per_word; + + if (t != NULL && t->bits_per_word) + word_len = t->bits_per_word; + if (!word_len) + word_len = 8; + + if (spi->bits_per_word > 32) + return -EINVAL; + cs->word_len = word_len; + + /* SPI init before transfer */ + writew(0x3e , spi100k->base + SPI_SETUP1); + writew(0x00 , spi100k->base + SPI_STATUS); + writew(0x3e , spi100k->base + SPI_CTRL); + + return 0; +} + +/* the spi->mode bits understood by this driver: */ +#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH) + +static int omap1_spi100k_setup(struct spi_device *spi) +{ + int ret; + struct omap1_spi100k *spi100k; + struct omap1_spi100k_cs *cs = spi->controller_state; + + if (spi->bits_per_word < 4 || spi->bits_per_word > 32) { + dev_dbg(&spi->dev, "setup: unsupported %d bit words\n", + spi->bits_per_word); + return -EINVAL; + } + + spi100k = spi_master_get_devdata(spi->master); + + if (!cs) { + cs = kzalloc(sizeof *cs, GFP_KERNEL); + if (!cs) + return -ENOMEM; + cs->base = spi100k->base + spi->chip_select * 0x14; + spi->controller_state = cs; + } + + spi100k_open(spi->master); + + clk_enable(spi100k->ick); + clk_enable(spi100k->fck); + + ret = omap1_spi100k_setup_transfer(spi, NULL); + + clk_disable(spi100k->ick); + clk_disable(spi100k->fck); + + return ret; +} + +static void omap1_spi100k_work(struct work_struct *work) +{ + struct omap1_spi100k *spi100k; + int status = 0; + + spi100k = container_of(work, struct omap1_spi100k, work); + spin_lock_irq(&spi100k->lock); + + clk_enable(spi100k->ick); + clk_enable(spi100k->fck); + + /* We only enable one channel at a time -- the one whose message is + * at the head of the queue -- although this controller would gladly + * arbitrate among multiple channels. This corresponds to "single + * channel" master mode. As a side effect, we need to manage the + * chipselect with the FORCE bit ... CS != channel enable. + */ + while (!list_empty(&spi100k->msg_queue)) { + struct spi_message *m; + struct spi_device *spi; + struct spi_transfer *t = NULL; + int cs_active = 0; + struct omap1_spi100k_cs *cs; + int par_override = 0; + + m = container_of(spi100k->msg_queue.next, struct spi_message, + queue); + + list_del_init(&m->queue); + spin_unlock_irq(&spi100k->lock); + + spi = m->spi; + cs = spi->controller_state; + + list_for_each_entry(t, &m->transfers, transfer_list) { + if (t->tx_buf == NULL && t->rx_buf == NULL && t->len) { + status = -EINVAL; + break; + } + if (par_override || t->speed_hz || t->bits_per_word) { + par_override = 1; + status = omap1_spi100k_setup_transfer(spi, t); + if (status < 0) + break; + if (!t->speed_hz && !t->bits_per_word) + par_override = 0; + } + + if (!cs_active) { + omap1_spi100k_force_cs(spi100k, 1); + cs_active = 1; + } + + if (t->len) { + unsigned count; + + count = omap1_spi100k_txrx_pio(spi, t); + m->actual_length += count; + + if (count != t->len) { + status = -EIO; + break; + } + } + + if (t->delay_usecs) + udelay(t->delay_usecs); + + /* ignore the "leave it on after last xfer" hint */ + + if (t->cs_change) { + omap1_spi100k_force_cs(spi100k, 0); + cs_active = 0; + } + } + + /* Restore defaults if they were overriden */ + if (par_override) { + par_override = 0; + status = omap1_spi100k_setup_transfer(spi, NULL); + } + + if (cs_active) + omap1_spi100k_force_cs(spi100k, 0); + + m->status = status; + m->complete(m->context); + + spin_lock_irq(&spi100k->lock); + } + + clk_disable(spi100k->ick); + clk_disable(spi100k->fck); + spin_unlock_irq(&spi100k->lock); + + if (status < 0) + printk(KERN_WARNING "spi transfer failed with %d\n", status); +} + +static int omap1_spi100k_transfer(struct spi_device *spi, struct spi_message *m) +{ + struct omap1_spi100k *spi100k; + unsigned long flags; + struct spi_transfer *t; + + m->actual_length = 0; + m->status = -EINPROGRESS; + + spi100k = spi_master_get_devdata(spi->master); + + /* Don't accept new work if we're shutting down */ + if (spi100k->state == SPI_SHUTDOWN) + return -ESHUTDOWN; + + /* reject invalid messages and transfers */ + if (list_empty(&m->transfers) || !m->complete) + return -EINVAL; + + list_for_each_entry(t, &m->transfers, transfer_list) { + const void *tx_buf = t->tx_buf; + void *rx_buf = t->rx_buf; + unsigned len = t->len; + + if (t->speed_hz > OMAP1_SPI100K_MAX_FREQ + || (len && !(rx_buf || tx_buf)) + || (t->bits_per_word && + ( t->bits_per_word < 4 + || t->bits_per_word > 32))) { + dev_dbg(&spi->dev, "transfer: %d Hz, %d %s%s, %d bpw\n", + t->speed_hz, + len, + tx_buf ? "tx" : "", + rx_buf ? "rx" : "", + t->bits_per_word); + return -EINVAL; + } + + if (t->speed_hz && t->speed_hz < OMAP1_SPI100K_MAX_FREQ/(1<<16)) { + dev_dbg(&spi->dev, "%d Hz max exceeds %d\n", + t->speed_hz, + OMAP1_SPI100K_MAX_FREQ/(1<<16)); + return -EINVAL; + } + + } + + spin_lock_irqsave(&spi100k->lock, flags); + list_add_tail(&m->queue, &spi100k->msg_queue); + queue_work(omap1_spi100k_wq, &spi100k->work); + spin_unlock_irqrestore(&spi100k->lock, flags); + + return 0; +} + +static int __init omap1_spi100k_reset(struct omap1_spi100k *spi100k) +{ + return 0; +} + +static int __devinit omap1_spi100k_probe(struct platform_device *pdev) +{ + struct spi_master *master; + struct omap1_spi100k *spi100k; + int status = 0; + + if (!pdev->id) + return -EINVAL; + + master = spi_alloc_master(&pdev->dev, sizeof *spi100k); + if (master == NULL) { + dev_dbg(&pdev->dev, "master allocation failed\n"); + return -ENOMEM; + } + + if (pdev->id != -1) + master->bus_num = pdev->id; + + master->setup = omap1_spi100k_setup; + master->transfer = omap1_spi100k_transfer; + master->cleanup = NULL; + master->num_chipselect = 2; + master->mode_bits = MODEBITS; + + dev_set_drvdata(&pdev->dev, master); + + spi100k = spi_master_get_devdata(master); + spi100k->master = master; + + /* + * The memory region base address is taken as the platform_data. + * You should allocate this with ioremap() before initializing + * the SPI. + */ + spi100k->base = (void __iomem *) pdev->dev.platform_data; + + INIT_WORK(&spi100k->work, omap1_spi100k_work); + + spin_lock_init(&spi100k->lock); + INIT_LIST_HEAD(&spi100k->msg_queue); + spi100k->ick = clk_get(&pdev->dev, "ick"); + if (IS_ERR(spi100k->ick)) { + dev_dbg(&pdev->dev, "can't get spi100k_ick\n"); + status = PTR_ERR(spi100k->ick); + goto err1; + } + + spi100k->fck = clk_get(&pdev->dev, "fck"); + if (IS_ERR(spi100k->fck)) { + dev_dbg(&pdev->dev, "can't get spi100k_fck\n"); + status = PTR_ERR(spi100k->fck); + goto err2; + } + + if (omap1_spi100k_reset(spi100k) < 0) + goto err3; + + status = spi_register_master(master); + if (status < 0) + goto err3; + + spi100k->state = SPI_RUNNING; + + return status; + +err3: + clk_put(spi100k->fck); +err2: + clk_put(spi100k->ick); +err1: + spi_master_put(master); + return status; +} + +static int __exit omap1_spi100k_remove(struct platform_device *pdev) +{ + struct spi_master *master; + struct omap1_spi100k *spi100k; + struct resource *r; + unsigned limit = 500; + unsigned long flags; + int status = 0; + + master = dev_get_drvdata(&pdev->dev); + spi100k = spi_master_get_devdata(master); + + spin_lock_irqsave(&spi100k->lock, flags); + + spi100k->state = SPI_SHUTDOWN; + while (!list_empty(&spi100k->msg_queue) && limit--) { + spin_unlock_irqrestore(&spi100k->lock, flags); + msleep(10); + spin_lock_irqsave(&spi100k->lock, flags); + } + + if (!list_empty(&spi100k->msg_queue)) + status = -EBUSY; + + spin_unlock_irqrestore(&spi100k->lock, flags); + + if (status != 0) + return status; + + clk_put(spi100k->fck); + clk_put(spi100k->ick); + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + spi_unregister_master(master); + + return 0; +} + +static struct platform_driver omap1_spi100k_driver = { + .driver = { + .name = "omap1_spi100k", + .owner = THIS_MODULE, + }, + .remove = __exit_p(omap1_spi100k_remove), +}; + + +static int __init omap1_spi100k_init(void) +{ + omap1_spi100k_wq = create_singlethread_workqueue( + omap1_spi100k_driver.driver.name); + + if (omap1_spi100k_wq == NULL) + return -1; + + return platform_driver_probe(&omap1_spi100k_driver, omap1_spi100k_probe); +} + +static void __exit omap1_spi100k_exit(void) +{ + platform_driver_unregister(&omap1_spi100k_driver); + + destroy_workqueue(omap1_spi100k_wq); +} + +module_init(omap1_spi100k_init); +module_exit(omap1_spi100k_exit); + +MODULE_DESCRIPTION("OMAP7xx SPI 100k controller driver"); +MODULE_AUTHOR("Fabrice Crohas "); +MODULE_LICENSE("GPL"); + diff --git a/drivers/spi/spi-omap-uwire.c b/drivers/spi/spi-omap-uwire.c new file mode 100644 index 0000000..00a8e9d --- /dev/null +++ b/drivers/spi/spi-omap-uwire.c @@ -0,0 +1,593 @@ +/* + * MicroWire interface driver for OMAP + * + * Copyright 2003 MontaVista Software Inc. + * + * Ported to 2.6 OMAP uwire interface. + * Copyright (C) 2004 Texas Instruments. + * + * Generalization patches by Juha Yrjola + * + * Copyright (C) 2005 David Brownell (ported to 2.6 SPI interface) + * Copyright (C) 2006 Nokia + * + * Many updates by Imre Deak + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include + +#include +#include /* OMAP7XX_IO_CONF registers */ + + +/* FIXME address is now a platform device resource, + * and irqs should show there too... + */ +#define UWIRE_BASE_PHYS 0xFFFB3000 + +/* uWire Registers: */ +#define UWIRE_IO_SIZE 0x20 +#define UWIRE_TDR 0x00 +#define UWIRE_RDR 0x00 +#define UWIRE_CSR 0x01 +#define UWIRE_SR1 0x02 +#define UWIRE_SR2 0x03 +#define UWIRE_SR3 0x04 +#define UWIRE_SR4 0x05 +#define UWIRE_SR5 0x06 + +/* CSR bits */ +#define RDRB (1 << 15) +#define CSRB (1 << 14) +#define START (1 << 13) +#define CS_CMD (1 << 12) + +/* SR1 or SR2 bits */ +#define UWIRE_READ_FALLING_EDGE 0x0001 +#define UWIRE_READ_RISING_EDGE 0x0000 +#define UWIRE_WRITE_FALLING_EDGE 0x0000 +#define UWIRE_WRITE_RISING_EDGE 0x0002 +#define UWIRE_CS_ACTIVE_LOW 0x0000 +#define UWIRE_CS_ACTIVE_HIGH 0x0004 +#define UWIRE_FREQ_DIV_2 0x0000 +#define UWIRE_FREQ_DIV_4 0x0008 +#define UWIRE_FREQ_DIV_8 0x0010 +#define UWIRE_CHK_READY 0x0020 +#define UWIRE_CLK_INVERTED 0x0040 + + +struct uwire_spi { + struct spi_bitbang bitbang; + struct clk *ck; +}; + +struct uwire_state { + unsigned bits_per_word; + unsigned div1_idx; +}; + +/* REVISIT compile time constant for idx_shift? */ +/* + * Or, put it in a structure which is used throughout the driver; + * that avoids having to issue two loads for each bit of static data. + */ +static unsigned int uwire_idx_shift; +static void __iomem *uwire_base; + +static inline void uwire_write_reg(int idx, u16 val) +{ + __raw_writew(val, uwire_base + (idx << uwire_idx_shift)); +} + +static inline u16 uwire_read_reg(int idx) +{ + return __raw_readw(uwire_base + (idx << uwire_idx_shift)); +} + +static inline void omap_uwire_configure_mode(u8 cs, unsigned long flags) +{ + u16 w, val = 0; + int shift, reg; + + if (flags & UWIRE_CLK_INVERTED) + val ^= 0x03; + val = flags & 0x3f; + if (cs & 1) + shift = 6; + else + shift = 0; + if (cs <= 1) + reg = UWIRE_SR1; + else + reg = UWIRE_SR2; + + w = uwire_read_reg(reg); + w &= ~(0x3f << shift); + w |= val << shift; + uwire_write_reg(reg, w); +} + +static int wait_uwire_csr_flag(u16 mask, u16 val, int might_not_catch) +{ + u16 w; + int c = 0; + unsigned long max_jiffies = jiffies + HZ; + + for (;;) { + w = uwire_read_reg(UWIRE_CSR); + if ((w & mask) == val) + break; + if (time_after(jiffies, max_jiffies)) { + printk(KERN_ERR "%s: timeout. reg=%#06x " + "mask=%#06x val=%#06x\n", + __func__, w, mask, val); + return -1; + } + c++; + if (might_not_catch && c > 64) + break; + } + return 0; +} + +static void uwire_set_clk1_div(int div1_idx) +{ + u16 w; + + w = uwire_read_reg(UWIRE_SR3); + w &= ~(0x03 << 1); + w |= div1_idx << 1; + uwire_write_reg(UWIRE_SR3, w); +} + +static void uwire_chipselect(struct spi_device *spi, int value) +{ + struct uwire_state *ust = spi->controller_state; + u16 w; + int old_cs; + + + BUG_ON(wait_uwire_csr_flag(CSRB, 0, 0)); + + w = uwire_read_reg(UWIRE_CSR); + old_cs = (w >> 10) & 0x03; + if (value == BITBANG_CS_INACTIVE || old_cs != spi->chip_select) { + /* Deselect this CS, or the previous CS */ + w &= ~CS_CMD; + uwire_write_reg(UWIRE_CSR, w); + } + /* activate specfied chipselect */ + if (value == BITBANG_CS_ACTIVE) { + uwire_set_clk1_div(ust->div1_idx); + /* invert clock? */ + if (spi->mode & SPI_CPOL) + uwire_write_reg(UWIRE_SR4, 1); + else + uwire_write_reg(UWIRE_SR4, 0); + + w = spi->chip_select << 10; + w |= CS_CMD; + uwire_write_reg(UWIRE_CSR, w); + } +} + +static int uwire_txrx(struct spi_device *spi, struct spi_transfer *t) +{ + struct uwire_state *ust = spi->controller_state; + unsigned len = t->len; + unsigned bits = ust->bits_per_word; + unsigned bytes; + u16 val, w; + int status = 0; + + if (!t->tx_buf && !t->rx_buf) + return 0; + + /* Microwire doesn't read and write concurrently */ + if (t->tx_buf && t->rx_buf) + return -EPERM; + + w = spi->chip_select << 10; + w |= CS_CMD; + + if (t->tx_buf) { + const u8 *buf = t->tx_buf; + + /* NOTE: DMA could be used for TX transfers */ + + /* write one or two bytes at a time */ + while (len >= 1) { + /* tx bit 15 is first sent; we byteswap multibyte words + * (msb-first) on the way out from memory. + */ + val = *buf++; + if (bits > 8) { + bytes = 2; + val |= *buf++ << 8; + } else + bytes = 1; + val <<= 16 - bits; + +#ifdef VERBOSE + pr_debug("%s: write-%d =%04x\n", + dev_name(&spi->dev), bits, val); +#endif + if (wait_uwire_csr_flag(CSRB, 0, 0)) + goto eio; + + uwire_write_reg(UWIRE_TDR, val); + + /* start write */ + val = START | w | (bits << 5); + + uwire_write_reg(UWIRE_CSR, val); + len -= bytes; + + /* Wait till write actually starts. + * This is needed with MPU clock 60+ MHz. + * REVISIT: we may not have time to catch it... + */ + if (wait_uwire_csr_flag(CSRB, CSRB, 1)) + goto eio; + + status += bytes; + } + + /* REVISIT: save this for later to get more i/o overlap */ + if (wait_uwire_csr_flag(CSRB, 0, 0)) + goto eio; + + } else if (t->rx_buf) { + u8 *buf = t->rx_buf; + + /* read one or two bytes at a time */ + while (len) { + if (bits > 8) { + bytes = 2; + } else + bytes = 1; + + /* start read */ + val = START | w | (bits << 0); + uwire_write_reg(UWIRE_CSR, val); + len -= bytes; + + /* Wait till read actually starts */ + (void) wait_uwire_csr_flag(CSRB, CSRB, 1); + + if (wait_uwire_csr_flag(RDRB | CSRB, + RDRB, 0)) + goto eio; + + /* rx bit 0 is last received; multibyte words will + * be properly byteswapped on the way to memory. + */ + val = uwire_read_reg(UWIRE_RDR); + val &= (1 << bits) - 1; + *buf++ = (u8) val; + if (bytes == 2) + *buf++ = val >> 8; + status += bytes; +#ifdef VERBOSE + pr_debug("%s: read-%d =%04x\n", + dev_name(&spi->dev), bits, val); +#endif + + } + } + return status; +eio: + return -EIO; +} + +static int uwire_setup_transfer(struct spi_device *spi, struct spi_transfer *t) +{ + struct uwire_state *ust = spi->controller_state; + struct uwire_spi *uwire; + unsigned flags = 0; + unsigned bits; + unsigned hz; + unsigned long rate; + int div1_idx; + int div1; + int div2; + int status; + + uwire = spi_master_get_devdata(spi->master); + + if (spi->chip_select > 3) { + pr_debug("%s: cs%d?\n", dev_name(&spi->dev), spi->chip_select); + status = -ENODEV; + goto done; + } + + bits = spi->bits_per_word; + if (t != NULL && t->bits_per_word) + bits = t->bits_per_word; + + if (bits > 16) { + pr_debug("%s: wordsize %d?\n", dev_name(&spi->dev), bits); + status = -ENODEV; + goto done; + } + ust->bits_per_word = bits; + + /* mode 0..3, clock inverted separately; + * standard nCS signaling; + * don't treat DI=high as "not ready" + */ + if (spi->mode & SPI_CS_HIGH) + flags |= UWIRE_CS_ACTIVE_HIGH; + + if (spi->mode & SPI_CPOL) + flags |= UWIRE_CLK_INVERTED; + + switch (spi->mode & (SPI_CPOL | SPI_CPHA)) { + case SPI_MODE_0: + case SPI_MODE_3: + flags |= UWIRE_WRITE_FALLING_EDGE | UWIRE_READ_RISING_EDGE; + break; + case SPI_MODE_1: + case SPI_MODE_2: + flags |= UWIRE_WRITE_RISING_EDGE | UWIRE_READ_FALLING_EDGE; + break; + } + + /* assume it's already enabled */ + rate = clk_get_rate(uwire->ck); + + hz = spi->max_speed_hz; + if (t != NULL && t->speed_hz) + hz = t->speed_hz; + + if (!hz) { + pr_debug("%s: zero speed?\n", dev_name(&spi->dev)); + status = -EINVAL; + goto done; + } + + /* F_INT = mpu_xor_clk / DIV1 */ + for (div1_idx = 0; div1_idx < 4; div1_idx++) { + switch (div1_idx) { + case 0: + div1 = 2; + break; + case 1: + div1 = 4; + break; + case 2: + div1 = 7; + break; + default: + case 3: + div1 = 10; + break; + } + div2 = (rate / div1 + hz - 1) / hz; + if (div2 <= 8) + break; + } + if (div1_idx == 4) { + pr_debug("%s: lowest clock %ld, need %d\n", + dev_name(&spi->dev), rate / 10 / 8, hz); + status = -EDOM; + goto done; + } + + /* we have to cache this and reset in uwire_chipselect as this is a + * global parameter and another uwire device can change it under + * us */ + ust->div1_idx = div1_idx; + uwire_set_clk1_div(div1_idx); + + rate /= div1; + + switch (div2) { + case 0: + case 1: + case 2: + flags |= UWIRE_FREQ_DIV_2; + rate /= 2; + break; + case 3: + case 4: + flags |= UWIRE_FREQ_DIV_4; + rate /= 4; + break; + case 5: + case 6: + case 7: + case 8: + flags |= UWIRE_FREQ_DIV_8; + rate /= 8; + break; + } + omap_uwire_configure_mode(spi->chip_select, flags); + pr_debug("%s: uwire flags %02x, armxor %lu KHz, SCK %lu KHz\n", + __func__, flags, + clk_get_rate(uwire->ck) / 1000, + rate / 1000); + status = 0; +done: + return status; +} + +static int uwire_setup(struct spi_device *spi) +{ + struct uwire_state *ust = spi->controller_state; + + if (ust == NULL) { + ust = kzalloc(sizeof(*ust), GFP_KERNEL); + if (ust == NULL) + return -ENOMEM; + spi->controller_state = ust; + } + + return uwire_setup_transfer(spi, NULL); +} + +static void uwire_cleanup(struct spi_device *spi) +{ + kfree(spi->controller_state); +} + +static void uwire_off(struct uwire_spi *uwire) +{ + uwire_write_reg(UWIRE_SR3, 0); + clk_disable(uwire->ck); + clk_put(uwire->ck); + spi_master_put(uwire->bitbang.master); +} + +static int __init uwire_probe(struct platform_device *pdev) +{ + struct spi_master *master; + struct uwire_spi *uwire; + int status; + + master = spi_alloc_master(&pdev->dev, sizeof *uwire); + if (!master) + return -ENODEV; + + uwire = spi_master_get_devdata(master); + + uwire_base = ioremap(UWIRE_BASE_PHYS, UWIRE_IO_SIZE); + if (!uwire_base) { + dev_dbg(&pdev->dev, "can't ioremap UWIRE\n"); + spi_master_put(master); + return -ENOMEM; + } + + dev_set_drvdata(&pdev->dev, uwire); + + uwire->ck = clk_get(&pdev->dev, "fck"); + if (IS_ERR(uwire->ck)) { + status = PTR_ERR(uwire->ck); + dev_dbg(&pdev->dev, "no functional clock?\n"); + spi_master_put(master); + return status; + } + clk_enable(uwire->ck); + + if (cpu_is_omap7xx()) + uwire_idx_shift = 1; + else + uwire_idx_shift = 2; + + uwire_write_reg(UWIRE_SR3, 1); + + /* the spi->mode bits understood by this driver: */ + master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; + + master->flags = SPI_MASTER_HALF_DUPLEX; + + master->bus_num = 2; /* "official" */ + master->num_chipselect = 4; + master->setup = uwire_setup; + master->cleanup = uwire_cleanup; + + uwire->bitbang.master = master; + uwire->bitbang.chipselect = uwire_chipselect; + uwire->bitbang.setup_transfer = uwire_setup_transfer; + uwire->bitbang.txrx_bufs = uwire_txrx; + + status = spi_bitbang_start(&uwire->bitbang); + if (status < 0) { + uwire_off(uwire); + iounmap(uwire_base); + } + return status; +} + +static int __exit uwire_remove(struct platform_device *pdev) +{ + struct uwire_spi *uwire = dev_get_drvdata(&pdev->dev); + int status; + + // FIXME remove all child devices, somewhere ... + + status = spi_bitbang_stop(&uwire->bitbang); + uwire_off(uwire); + iounmap(uwire_base); + return status; +} + +/* work with hotplug and coldplug */ +MODULE_ALIAS("platform:omap_uwire"); + +static struct platform_driver uwire_driver = { + .driver = { + .name = "omap_uwire", + .owner = THIS_MODULE, + }, + .remove = __exit_p(uwire_remove), + // suspend ... unuse ck + // resume ... use ck +}; + +static int __init omap_uwire_init(void) +{ + /* FIXME move these into the relevant board init code. also, include + * H3 support; it uses tsc2101 like H2 (on a different chipselect). + */ + + if (machine_is_omap_h2()) { + /* defaults: W21 SDO, U18 SDI, V19 SCL */ + omap_cfg_reg(N14_1610_UWIRE_CS0); + omap_cfg_reg(N15_1610_UWIRE_CS1); + } + if (machine_is_omap_perseus2()) { + /* configure pins: MPU_UW_nSCS1, MPU_UW_SDO, MPU_UW_SCLK */ + int val = omap_readl(OMAP7XX_IO_CONF_9) & ~0x00EEE000; + omap_writel(val | 0x00AAA000, OMAP7XX_IO_CONF_9); + } + + return platform_driver_probe(&uwire_driver, uwire_probe); +} + +static void __exit omap_uwire_exit(void) +{ + platform_driver_unregister(&uwire_driver); +} + +subsys_initcall(omap_uwire_init); +module_exit(omap_uwire_exit); + +MODULE_LICENSE("GPL"); + diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c new file mode 100644 index 0000000..969cdd2 --- /dev/null +++ b/drivers/spi/spi-omap2-mcspi.c @@ -0,0 +1,1293 @@ +/* + * OMAP2 McSPI controller driver + * + * Copyright (C) 2005, 2006 Nokia Corporation + * Author: Samuel Ortiz and + * Juha Yrj�l� + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include + +#define OMAP2_MCSPI_MAX_FREQ 48000000 + +/* OMAP2 has 3 SPI controllers, while OMAP3 has 4 */ +#define OMAP2_MCSPI_MAX_CTRL 4 + +#define OMAP2_MCSPI_REVISION 0x00 +#define OMAP2_MCSPI_SYSSTATUS 0x14 +#define OMAP2_MCSPI_IRQSTATUS 0x18 +#define OMAP2_MCSPI_IRQENABLE 0x1c +#define OMAP2_MCSPI_WAKEUPENABLE 0x20 +#define OMAP2_MCSPI_SYST 0x24 +#define OMAP2_MCSPI_MODULCTRL 0x28 + +/* per-channel banks, 0x14 bytes each, first is: */ +#define OMAP2_MCSPI_CHCONF0 0x2c +#define OMAP2_MCSPI_CHSTAT0 0x30 +#define OMAP2_MCSPI_CHCTRL0 0x34 +#define OMAP2_MCSPI_TX0 0x38 +#define OMAP2_MCSPI_RX0 0x3c + +/* per-register bitmasks: */ + +#define OMAP2_MCSPI_MODULCTRL_SINGLE BIT(0) +#define OMAP2_MCSPI_MODULCTRL_MS BIT(2) +#define OMAP2_MCSPI_MODULCTRL_STEST BIT(3) + +#define OMAP2_MCSPI_CHCONF_PHA BIT(0) +#define OMAP2_MCSPI_CHCONF_POL BIT(1) +#define OMAP2_MCSPI_CHCONF_CLKD_MASK (0x0f << 2) +#define OMAP2_MCSPI_CHCONF_EPOL BIT(6) +#define OMAP2_MCSPI_CHCONF_WL_MASK (0x1f << 7) +#define OMAP2_MCSPI_CHCONF_TRM_RX_ONLY BIT(12) +#define OMAP2_MCSPI_CHCONF_TRM_TX_ONLY BIT(13) +#define OMAP2_MCSPI_CHCONF_TRM_MASK (0x03 << 12) +#define OMAP2_MCSPI_CHCONF_DMAW BIT(14) +#define OMAP2_MCSPI_CHCONF_DMAR BIT(15) +#define OMAP2_MCSPI_CHCONF_DPE0 BIT(16) +#define OMAP2_MCSPI_CHCONF_DPE1 BIT(17) +#define OMAP2_MCSPI_CHCONF_IS BIT(18) +#define OMAP2_MCSPI_CHCONF_TURBO BIT(19) +#define OMAP2_MCSPI_CHCONF_FORCE BIT(20) + +#define OMAP2_MCSPI_CHSTAT_RXS BIT(0) +#define OMAP2_MCSPI_CHSTAT_TXS BIT(1) +#define OMAP2_MCSPI_CHSTAT_EOT BIT(2) + +#define OMAP2_MCSPI_CHCTRL_EN BIT(0) + +#define OMAP2_MCSPI_WAKEUPENABLE_WKEN BIT(0) + +/* We have 2 DMA channels per CS, one for RX and one for TX */ +struct omap2_mcspi_dma { + int dma_tx_channel; + int dma_rx_channel; + + int dma_tx_sync_dev; + int dma_rx_sync_dev; + + struct completion dma_tx_completion; + struct completion dma_rx_completion; +}; + +/* use PIO for small transfers, avoiding DMA setup/teardown overhead and + * cache operations; better heuristics consider wordsize and bitrate. + */ +#define DMA_MIN_BYTES 160 + + +struct omap2_mcspi { + struct work_struct work; + /* lock protects queue and registers */ + spinlock_t lock; + struct list_head msg_queue; + struct spi_master *master; + /* Virtual base address of the controller */ + void __iomem *base; + unsigned long phys; + /* SPI1 has 4 channels, while SPI2 has 2 */ + struct omap2_mcspi_dma *dma_channels; + struct device *dev; +}; + +struct omap2_mcspi_cs { + void __iomem *base; + unsigned long phys; + int word_len; + struct list_head node; + /* Context save and restore shadow register */ + u32 chconf0; +}; + +/* used for context save and restore, structure members to be updated whenever + * corresponding registers are modified. + */ +struct omap2_mcspi_regs { + u32 modulctrl; + u32 wakeupenable; + struct list_head cs; +}; + +static struct omap2_mcspi_regs omap2_mcspi_ctx[OMAP2_MCSPI_MAX_CTRL]; + +static struct workqueue_struct *omap2_mcspi_wq; + +#define MOD_REG_BIT(val, mask, set) do { \ + if (set) \ + val |= mask; \ + else \ + val &= ~mask; \ +} while (0) + +static inline void mcspi_write_reg(struct spi_master *master, + int idx, u32 val) +{ + struct omap2_mcspi *mcspi = spi_master_get_devdata(master); + + __raw_writel(val, mcspi->base + idx); +} + +static inline u32 mcspi_read_reg(struct spi_master *master, int idx) +{ + struct omap2_mcspi *mcspi = spi_master_get_devdata(master); + + return __raw_readl(mcspi->base + idx); +} + +static inline void mcspi_write_cs_reg(const struct spi_device *spi, + int idx, u32 val) +{ + struct omap2_mcspi_cs *cs = spi->controller_state; + + __raw_writel(val, cs->base + idx); +} + +static inline u32 mcspi_read_cs_reg(const struct spi_device *spi, int idx) +{ + struct omap2_mcspi_cs *cs = spi->controller_state; + + return __raw_readl(cs->base + idx); +} + +static inline u32 mcspi_cached_chconf0(const struct spi_device *spi) +{ + struct omap2_mcspi_cs *cs = spi->controller_state; + + return cs->chconf0; +} + +static inline void mcspi_write_chconf0(const struct spi_device *spi, u32 val) +{ + struct omap2_mcspi_cs *cs = spi->controller_state; + + cs->chconf0 = val; + mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCONF0, val); + mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0); +} + +static void omap2_mcspi_set_dma_req(const struct spi_device *spi, + int is_read, int enable) +{ + u32 l, rw; + + l = mcspi_cached_chconf0(spi); + + if (is_read) /* 1 is read, 0 write */ + rw = OMAP2_MCSPI_CHCONF_DMAR; + else + rw = OMAP2_MCSPI_CHCONF_DMAW; + + MOD_REG_BIT(l, rw, enable); + mcspi_write_chconf0(spi, l); +} + +static void omap2_mcspi_set_enable(const struct spi_device *spi, int enable) +{ + u32 l; + + l = enable ? OMAP2_MCSPI_CHCTRL_EN : 0; + mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCTRL0, l); + /* Flash post-writes */ + mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCTRL0); +} + +static void omap2_mcspi_force_cs(struct spi_device *spi, int cs_active) +{ + u32 l; + + l = mcspi_cached_chconf0(spi); + MOD_REG_BIT(l, OMAP2_MCSPI_CHCONF_FORCE, cs_active); + mcspi_write_chconf0(spi, l); +} + +static void omap2_mcspi_set_master_mode(struct spi_master *master) +{ + u32 l; + + /* setup when switching from (reset default) slave mode + * to single-channel master mode + */ + l = mcspi_read_reg(master, OMAP2_MCSPI_MODULCTRL); + MOD_REG_BIT(l, OMAP2_MCSPI_MODULCTRL_STEST, 0); + MOD_REG_BIT(l, OMAP2_MCSPI_MODULCTRL_MS, 0); + MOD_REG_BIT(l, OMAP2_MCSPI_MODULCTRL_SINGLE, 1); + mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, l); + + omap2_mcspi_ctx[master->bus_num - 1].modulctrl = l; +} + +static void omap2_mcspi_restore_ctx(struct omap2_mcspi *mcspi) +{ + struct spi_master *spi_cntrl; + struct omap2_mcspi_cs *cs; + spi_cntrl = mcspi->master; + + /* McSPI: context restore */ + mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_MODULCTRL, + omap2_mcspi_ctx[spi_cntrl->bus_num - 1].modulctrl); + + mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_WAKEUPENABLE, + omap2_mcspi_ctx[spi_cntrl->bus_num - 1].wakeupenable); + + list_for_each_entry(cs, &omap2_mcspi_ctx[spi_cntrl->bus_num - 1].cs, + node) + __raw_writel(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0); +} +static void omap2_mcspi_disable_clocks(struct omap2_mcspi *mcspi) +{ + pm_runtime_put_sync(mcspi->dev); +} + +static int omap2_mcspi_enable_clocks(struct omap2_mcspi *mcspi) +{ + return pm_runtime_get_sync(mcspi->dev); +} + +static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit) +{ + unsigned long timeout; + + timeout = jiffies + msecs_to_jiffies(1000); + while (!(__raw_readl(reg) & bit)) { + if (time_after(jiffies, timeout)) + return -1; + cpu_relax(); + } + return 0; +} + +static unsigned +omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer) +{ + struct omap2_mcspi *mcspi; + struct omap2_mcspi_cs *cs = spi->controller_state; + struct omap2_mcspi_dma *mcspi_dma; + unsigned int count, c; + unsigned long base, tx_reg, rx_reg; + int word_len, data_type, element_count; + int elements = 0; + u32 l; + u8 * rx; + const u8 * tx; + void __iomem *chstat_reg; + + mcspi = spi_master_get_devdata(spi->master); + mcspi_dma = &mcspi->dma_channels[spi->chip_select]; + l = mcspi_cached_chconf0(spi); + + chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0; + + count = xfer->len; + c = count; + word_len = cs->word_len; + + base = cs->phys; + tx_reg = base + OMAP2_MCSPI_TX0; + rx_reg = base + OMAP2_MCSPI_RX0; + rx = xfer->rx_buf; + tx = xfer->tx_buf; + + if (word_len <= 8) { + data_type = OMAP_DMA_DATA_TYPE_S8; + element_count = count; + } else if (word_len <= 16) { + data_type = OMAP_DMA_DATA_TYPE_S16; + element_count = count >> 1; + } else /* word_len <= 32 */ { + data_type = OMAP_DMA_DATA_TYPE_S32; + element_count = count >> 2; + } + + if (tx != NULL) { + omap_set_dma_transfer_params(mcspi_dma->dma_tx_channel, + data_type, element_count, 1, + OMAP_DMA_SYNC_ELEMENT, + mcspi_dma->dma_tx_sync_dev, 0); + + omap_set_dma_dest_params(mcspi_dma->dma_tx_channel, 0, + OMAP_DMA_AMODE_CONSTANT, + tx_reg, 0, 0); + + omap_set_dma_src_params(mcspi_dma->dma_tx_channel, 0, + OMAP_DMA_AMODE_POST_INC, + xfer->tx_dma, 0, 0); + } + + if (rx != NULL) { + elements = element_count - 1; + if (l & OMAP2_MCSPI_CHCONF_TURBO) + elements--; + + omap_set_dma_transfer_params(mcspi_dma->dma_rx_channel, + data_type, elements, 1, + OMAP_DMA_SYNC_ELEMENT, + mcspi_dma->dma_rx_sync_dev, 1); + + omap_set_dma_src_params(mcspi_dma->dma_rx_channel, 0, + OMAP_DMA_AMODE_CONSTANT, + rx_reg, 0, 0); + + omap_set_dma_dest_params(mcspi_dma->dma_rx_channel, 0, + OMAP_DMA_AMODE_POST_INC, + xfer->rx_dma, 0, 0); + } + + if (tx != NULL) { + omap_start_dma(mcspi_dma->dma_tx_channel); + omap2_mcspi_set_dma_req(spi, 0, 1); + } + + if (rx != NULL) { + omap_start_dma(mcspi_dma->dma_rx_channel); + omap2_mcspi_set_dma_req(spi, 1, 1); + } + + if (tx != NULL) { + wait_for_completion(&mcspi_dma->dma_tx_completion); + dma_unmap_single(&spi->dev, xfer->tx_dma, count, DMA_TO_DEVICE); + + /* for TX_ONLY mode, be sure all words have shifted out */ + if (rx == NULL) { + if (mcspi_wait_for_reg_bit(chstat_reg, + OMAP2_MCSPI_CHSTAT_TXS) < 0) + dev_err(&spi->dev, "TXS timed out\n"); + else if (mcspi_wait_for_reg_bit(chstat_reg, + OMAP2_MCSPI_CHSTAT_EOT) < 0) + dev_err(&spi->dev, "EOT timed out\n"); + } + } + + if (rx != NULL) { + wait_for_completion(&mcspi_dma->dma_rx_completion); + dma_unmap_single(&spi->dev, xfer->rx_dma, count, DMA_FROM_DEVICE); + omap2_mcspi_set_enable(spi, 0); + + if (l & OMAP2_MCSPI_CHCONF_TURBO) { + + if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0) + & OMAP2_MCSPI_CHSTAT_RXS)) { + u32 w; + + w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0); + if (word_len <= 8) + ((u8 *)xfer->rx_buf)[elements++] = w; + else if (word_len <= 16) + ((u16 *)xfer->rx_buf)[elements++] = w; + else /* word_len <= 32 */ + ((u32 *)xfer->rx_buf)[elements++] = w; + } else { + dev_err(&spi->dev, + "DMA RX penultimate word empty"); + count -= (word_len <= 8) ? 2 : + (word_len <= 16) ? 4 : + /* word_len <= 32 */ 8; + omap2_mcspi_set_enable(spi, 1); + return count; + } + } + + if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0) + & OMAP2_MCSPI_CHSTAT_RXS)) { + u32 w; + + w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0); + if (word_len <= 8) + ((u8 *)xfer->rx_buf)[elements] = w; + else if (word_len <= 16) + ((u16 *)xfer->rx_buf)[elements] = w; + else /* word_len <= 32 */ + ((u32 *)xfer->rx_buf)[elements] = w; + } else { + dev_err(&spi->dev, "DMA RX last word empty"); + count -= (word_len <= 8) ? 1 : + (word_len <= 16) ? 2 : + /* word_len <= 32 */ 4; + } + omap2_mcspi_set_enable(spi, 1); + } + return count; +} + +static unsigned +omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) +{ + struct omap2_mcspi *mcspi; + struct omap2_mcspi_cs *cs = spi->controller_state; + unsigned int count, c; + u32 l; + void __iomem *base = cs->base; + void __iomem *tx_reg; + void __iomem *rx_reg; + void __iomem *chstat_reg; + int word_len; + + mcspi = spi_master_get_devdata(spi->master); + count = xfer->len; + c = count; + word_len = cs->word_len; + + l = mcspi_cached_chconf0(spi); + + /* We store the pre-calculated register addresses on stack to speed + * up the transfer loop. */ + tx_reg = base + OMAP2_MCSPI_TX0; + rx_reg = base + OMAP2_MCSPI_RX0; + chstat_reg = base + OMAP2_MCSPI_CHSTAT0; + + if (c < (word_len>>3)) + return 0; + + if (word_len <= 8) { + u8 *rx; + const u8 *tx; + + rx = xfer->rx_buf; + tx = xfer->tx_buf; + + do { + c -= 1; + if (tx != NULL) { + if (mcspi_wait_for_reg_bit(chstat_reg, + OMAP2_MCSPI_CHSTAT_TXS) < 0) { + dev_err(&spi->dev, "TXS timed out\n"); + goto out; + } + dev_vdbg(&spi->dev, "write-%d %02x\n", + word_len, *tx); + __raw_writel(*tx++, tx_reg); + } + if (rx != NULL) { + if (mcspi_wait_for_reg_bit(chstat_reg, + OMAP2_MCSPI_CHSTAT_RXS) < 0) { + dev_err(&spi->dev, "RXS timed out\n"); + goto out; + } + + if (c == 1 && tx == NULL && + (l & OMAP2_MCSPI_CHCONF_TURBO)) { + omap2_mcspi_set_enable(spi, 0); + *rx++ = __raw_readl(rx_reg); + dev_vdbg(&spi->dev, "read-%d %02x\n", + word_len, *(rx - 1)); + if (mcspi_wait_for_reg_bit(chstat_reg, + OMAP2_MCSPI_CHSTAT_RXS) < 0) { + dev_err(&spi->dev, + "RXS timed out\n"); + goto out; + } + c = 0; + } else if (c == 0 && tx == NULL) { + omap2_mcspi_set_enable(spi, 0); + } + + *rx++ = __raw_readl(rx_reg); + dev_vdbg(&spi->dev, "read-%d %02x\n", + word_len, *(rx - 1)); + } + } while (c); + } else if (word_len <= 16) { + u16 *rx; + const u16 *tx; + + rx = xfer->rx_buf; + tx = xfer->tx_buf; + do { + c -= 2; + if (tx != NULL) { + if (mcspi_wait_for_reg_bit(chstat_reg, + OMAP2_MCSPI_CHSTAT_TXS) < 0) { + dev_err(&spi->dev, "TXS timed out\n"); + goto out; + } + dev_vdbg(&spi->dev, "write-%d %04x\n", + word_len, *tx); + __raw_writel(*tx++, tx_reg); + } + if (rx != NULL) { + if (mcspi_wait_for_reg_bit(chstat_reg, + OMAP2_MCSPI_CHSTAT_RXS) < 0) { + dev_err(&spi->dev, "RXS timed out\n"); + goto out; + } + + if (c == 2 && tx == NULL && + (l & OMAP2_MCSPI_CHCONF_TURBO)) { + omap2_mcspi_set_enable(spi, 0); + *rx++ = __raw_readl(rx_reg); + dev_vdbg(&spi->dev, "read-%d %04x\n", + word_len, *(rx - 1)); + if (mcspi_wait_for_reg_bit(chstat_reg, + OMAP2_MCSPI_CHSTAT_RXS) < 0) { + dev_err(&spi->dev, + "RXS timed out\n"); + goto out; + } + c = 0; + } else if (c == 0 && tx == NULL) { + omap2_mcspi_set_enable(spi, 0); + } + + *rx++ = __raw_readl(rx_reg); + dev_vdbg(&spi->dev, "read-%d %04x\n", + word_len, *(rx - 1)); + } + } while (c >= 2); + } else if (word_len <= 32) { + u32 *rx; + const u32 *tx; + + rx = xfer->rx_buf; + tx = xfer->tx_buf; + do { + c -= 4; + if (tx != NULL) { + if (mcspi_wait_for_reg_bit(chstat_reg, + OMAP2_MCSPI_CHSTAT_TXS) < 0) { + dev_err(&spi->dev, "TXS timed out\n"); + goto out; + } + dev_vdbg(&spi->dev, "write-%d %08x\n", + word_len, *tx); + __raw_writel(*tx++, tx_reg); + } + if (rx != NULL) { + if (mcspi_wait_for_reg_bit(chstat_reg, + OMAP2_MCSPI_CHSTAT_RXS) < 0) { + dev_err(&spi->dev, "RXS timed out\n"); + goto out; + } + + if (c == 4 && tx == NULL && + (l & OMAP2_MCSPI_CHCONF_TURBO)) { + omap2_mcspi_set_enable(spi, 0); + *rx++ = __raw_readl(rx_reg); + dev_vdbg(&spi->dev, "read-%d %08x\n", + word_len, *(rx - 1)); + if (mcspi_wait_for_reg_bit(chstat_reg, + OMAP2_MCSPI_CHSTAT_RXS) < 0) { + dev_err(&spi->dev, + "RXS timed out\n"); + goto out; + } + c = 0; + } else if (c == 0 && tx == NULL) { + omap2_mcspi_set_enable(spi, 0); + } + + *rx++ = __raw_readl(rx_reg); + dev_vdbg(&spi->dev, "read-%d %08x\n", + word_len, *(rx - 1)); + } + } while (c >= 4); + } + + /* for TX_ONLY mode, be sure all words have shifted out */ + if (xfer->rx_buf == NULL) { + if (mcspi_wait_for_reg_bit(chstat_reg, + OMAP2_MCSPI_CHSTAT_TXS) < 0) { + dev_err(&spi->dev, "TXS timed out\n"); + } else if (mcspi_wait_for_reg_bit(chstat_reg, + OMAP2_MCSPI_CHSTAT_EOT) < 0) + dev_err(&spi->dev, "EOT timed out\n"); + + /* disable chan to purge rx datas received in TX_ONLY transfer, + * otherwise these rx datas will affect the direct following + * RX_ONLY transfer. + */ + omap2_mcspi_set_enable(spi, 0); + } +out: + omap2_mcspi_set_enable(spi, 1); + return count - c; +} + +static u32 omap2_mcspi_calc_divisor(u32 speed_hz) +{ + u32 div; + + for (div = 0; div < 15; div++) + if (speed_hz >= (OMAP2_MCSPI_MAX_FREQ >> div)) + return div; + + return 15; +} + +/* called only when no transfer is active to this device */ +static int omap2_mcspi_setup_transfer(struct spi_device *spi, + struct spi_transfer *t) +{ + struct omap2_mcspi_cs *cs = spi->controller_state; + struct omap2_mcspi *mcspi; + struct spi_master *spi_cntrl; + u32 l = 0, div = 0; + u8 word_len = spi->bits_per_word; + u32 speed_hz = spi->max_speed_hz; + + mcspi = spi_master_get_devdata(spi->master); + spi_cntrl = mcspi->master; + + if (t != NULL && t->bits_per_word) + word_len = t->bits_per_word; + + cs->word_len = word_len; + + if (t && t->speed_hz) + speed_hz = t->speed_hz; + + speed_hz = min_t(u32, speed_hz, OMAP2_MCSPI_MAX_FREQ); + div = omap2_mcspi_calc_divisor(speed_hz); + + l = mcspi_cached_chconf0(spi); + + /* standard 4-wire master mode: SCK, MOSI/out, MISO/in, nCS + * REVISIT: this controller could support SPI_3WIRE mode. + */ + l &= ~(OMAP2_MCSPI_CHCONF_IS|OMAP2_MCSPI_CHCONF_DPE1); + l |= OMAP2_MCSPI_CHCONF_DPE0; + + /* wordlength */ + l &= ~OMAP2_MCSPI_CHCONF_WL_MASK; + l |= (word_len - 1) << 7; + + /* set chipselect polarity; manage with FORCE */ + if (!(spi->mode & SPI_CS_HIGH)) + l |= OMAP2_MCSPI_CHCONF_EPOL; /* active-low; normal */ + else + l &= ~OMAP2_MCSPI_CHCONF_EPOL; + + /* set clock divisor */ + l &= ~OMAP2_MCSPI_CHCONF_CLKD_MASK; + l |= div << 2; + + /* set SPI mode 0..3 */ + if (spi->mode & SPI_CPOL) + l |= OMAP2_MCSPI_CHCONF_POL; + else + l &= ~OMAP2_MCSPI_CHCONF_POL; + if (spi->mode & SPI_CPHA) + l |= OMAP2_MCSPI_CHCONF_PHA; + else + l &= ~OMAP2_MCSPI_CHCONF_PHA; + + mcspi_write_chconf0(spi, l); + + dev_dbg(&spi->dev, "setup: speed %d, sample %s edge, clk %s\n", + OMAP2_MCSPI_MAX_FREQ >> div, + (spi->mode & SPI_CPHA) ? "trailing" : "leading", + (spi->mode & SPI_CPOL) ? "inverted" : "normal"); + + return 0; +} + +static void omap2_mcspi_dma_rx_callback(int lch, u16 ch_status, void *data) +{ + struct spi_device *spi = data; + struct omap2_mcspi *mcspi; + struct omap2_mcspi_dma *mcspi_dma; + + mcspi = spi_master_get_devdata(spi->master); + mcspi_dma = &(mcspi->dma_channels[spi->chip_select]); + + complete(&mcspi_dma->dma_rx_completion); + + /* We must disable the DMA RX request */ + omap2_mcspi_set_dma_req(spi, 1, 0); +} + +static void omap2_mcspi_dma_tx_callback(int lch, u16 ch_status, void *data) +{ + struct spi_device *spi = data; + struct omap2_mcspi *mcspi; + struct omap2_mcspi_dma *mcspi_dma; + + mcspi = spi_master_get_devdata(spi->master); + mcspi_dma = &(mcspi->dma_channels[spi->chip_select]); + + complete(&mcspi_dma->dma_tx_completion); + + /* We must disable the DMA TX request */ + omap2_mcspi_set_dma_req(spi, 0, 0); +} + +static int omap2_mcspi_request_dma(struct spi_device *spi) +{ + struct spi_master *master = spi->master; + struct omap2_mcspi *mcspi; + struct omap2_mcspi_dma *mcspi_dma; + + mcspi = spi_master_get_devdata(master); + mcspi_dma = mcspi->dma_channels + spi->chip_select; + + if (omap_request_dma(mcspi_dma->dma_rx_sync_dev, "McSPI RX", + omap2_mcspi_dma_rx_callback, spi, + &mcspi_dma->dma_rx_channel)) { + dev_err(&spi->dev, "no RX DMA channel for McSPI\n"); + return -EAGAIN; + } + + if (omap_request_dma(mcspi_dma->dma_tx_sync_dev, "McSPI TX", + omap2_mcspi_dma_tx_callback, spi, + &mcspi_dma->dma_tx_channel)) { + omap_free_dma(mcspi_dma->dma_rx_channel); + mcspi_dma->dma_rx_channel = -1; + dev_err(&spi->dev, "no TX DMA channel for McSPI\n"); + return -EAGAIN; + } + + init_completion(&mcspi_dma->dma_rx_completion); + init_completion(&mcspi_dma->dma_tx_completion); + + return 0; +} + +static int omap2_mcspi_setup(struct spi_device *spi) +{ + int ret; + struct omap2_mcspi *mcspi; + struct omap2_mcspi_dma *mcspi_dma; + struct omap2_mcspi_cs *cs = spi->controller_state; + + if (spi->bits_per_word < 4 || spi->bits_per_word > 32) { + dev_dbg(&spi->dev, "setup: unsupported %d bit words\n", + spi->bits_per_word); + return -EINVAL; + } + + mcspi = spi_master_get_devdata(spi->master); + mcspi_dma = &mcspi->dma_channels[spi->chip_select]; + + if (!cs) { + cs = kzalloc(sizeof *cs, GFP_KERNEL); + if (!cs) + return -ENOMEM; + cs->base = mcspi->base + spi->chip_select * 0x14; + cs->phys = mcspi->phys + spi->chip_select * 0x14; + cs->chconf0 = 0; + spi->controller_state = cs; + /* Link this to context save list */ + list_add_tail(&cs->node, + &omap2_mcspi_ctx[mcspi->master->bus_num - 1].cs); + } + + if (mcspi_dma->dma_rx_channel == -1 + || mcspi_dma->dma_tx_channel == -1) { + ret = omap2_mcspi_request_dma(spi); + if (ret < 0) + return ret; + } + + ret = omap2_mcspi_enable_clocks(mcspi); + if (ret < 0) + return ret; + + ret = omap2_mcspi_setup_transfer(spi, NULL); + omap2_mcspi_disable_clocks(mcspi); + + return ret; +} + +static void omap2_mcspi_cleanup(struct spi_device *spi) +{ + struct omap2_mcspi *mcspi; + struct omap2_mcspi_dma *mcspi_dma; + struct omap2_mcspi_cs *cs; + + mcspi = spi_master_get_devdata(spi->master); + + if (spi->controller_state) { + /* Unlink controller state from context save list */ + cs = spi->controller_state; + list_del(&cs->node); + + kfree(spi->controller_state); + } + + if (spi->chip_select < spi->master->num_chipselect) { + mcspi_dma = &mcspi->dma_channels[spi->chip_select]; + + if (mcspi_dma->dma_rx_channel != -1) { + omap_free_dma(mcspi_dma->dma_rx_channel); + mcspi_dma->dma_rx_channel = -1; + } + if (mcspi_dma->dma_tx_channel != -1) { + omap_free_dma(mcspi_dma->dma_tx_channel); + mcspi_dma->dma_tx_channel = -1; + } + } +} + +static void omap2_mcspi_work(struct work_struct *work) +{ + struct omap2_mcspi *mcspi; + + mcspi = container_of(work, struct omap2_mcspi, work); + + if (omap2_mcspi_enable_clocks(mcspi) < 0) + return; + + spin_lock_irq(&mcspi->lock); + + /* We only enable one channel at a time -- the one whose message is + * at the head of the queue -- although this controller would gladly + * arbitrate among multiple channels. This corresponds to "single + * channel" master mode. As a side effect, we need to manage the + * chipselect with the FORCE bit ... CS != channel enable. + */ + while (!list_empty(&mcspi->msg_queue)) { + struct spi_message *m; + struct spi_device *spi; + struct spi_transfer *t = NULL; + int cs_active = 0; + struct omap2_mcspi_cs *cs; + struct omap2_mcspi_device_config *cd; + int par_override = 0; + int status = 0; + u32 chconf; + + m = container_of(mcspi->msg_queue.next, struct spi_message, + queue); + + list_del_init(&m->queue); + spin_unlock_irq(&mcspi->lock); + + spi = m->spi; + cs = spi->controller_state; + cd = spi->controller_data; + + omap2_mcspi_set_enable(spi, 1); + list_for_each_entry(t, &m->transfers, transfer_list) { + if (t->tx_buf == NULL && t->rx_buf == NULL && t->len) { + status = -EINVAL; + break; + } + if (par_override || t->speed_hz || t->bits_per_word) { + par_override = 1; + status = omap2_mcspi_setup_transfer(spi, t); + if (status < 0) + break; + if (!t->speed_hz && !t->bits_per_word) + par_override = 0; + } + + if (!cs_active) { + omap2_mcspi_force_cs(spi, 1); + cs_active = 1; + } + + chconf = mcspi_cached_chconf0(spi); + chconf &= ~OMAP2_MCSPI_CHCONF_TRM_MASK; + chconf &= ~OMAP2_MCSPI_CHCONF_TURBO; + + if (t->tx_buf == NULL) + chconf |= OMAP2_MCSPI_CHCONF_TRM_RX_ONLY; + else if (t->rx_buf == NULL) + chconf |= OMAP2_MCSPI_CHCONF_TRM_TX_ONLY; + + if (cd && cd->turbo_mode && t->tx_buf == NULL) { + /* Turbo mode is for more than one word */ + if (t->len > ((cs->word_len + 7) >> 3)) + chconf |= OMAP2_MCSPI_CHCONF_TURBO; + } + + mcspi_write_chconf0(spi, chconf); + + if (t->len) { + unsigned count; + + /* RX_ONLY mode needs dummy data in TX reg */ + if (t->tx_buf == NULL) + __raw_writel(0, cs->base + + OMAP2_MCSPI_TX0); + + if (m->is_dma_mapped || t->len >= DMA_MIN_BYTES) + count = omap2_mcspi_txrx_dma(spi, t); + else + count = omap2_mcspi_txrx_pio(spi, t); + m->actual_length += count; + + if (count != t->len) { + status = -EIO; + break; + } + } + + if (t->delay_usecs) + udelay(t->delay_usecs); + + /* ignore the "leave it on after last xfer" hint */ + if (t->cs_change) { + omap2_mcspi_force_cs(spi, 0); + cs_active = 0; + } + } + + /* Restore defaults if they were overriden */ + if (par_override) { + par_override = 0; + status = omap2_mcspi_setup_transfer(spi, NULL); + } + + if (cs_active) + omap2_mcspi_force_cs(spi, 0); + + omap2_mcspi_set_enable(spi, 0); + + m->status = status; + m->complete(m->context); + + spin_lock_irq(&mcspi->lock); + } + + spin_unlock_irq(&mcspi->lock); + + omap2_mcspi_disable_clocks(mcspi); +} + +static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m) +{ + struct omap2_mcspi *mcspi; + unsigned long flags; + struct spi_transfer *t; + + m->actual_length = 0; + m->status = 0; + + /* reject invalid messages and transfers */ + if (list_empty(&m->transfers) || !m->complete) + return -EINVAL; + list_for_each_entry(t, &m->transfers, transfer_list) { + const void *tx_buf = t->tx_buf; + void *rx_buf = t->rx_buf; + unsigned len = t->len; + + if (t->speed_hz > OMAP2_MCSPI_MAX_FREQ + || (len && !(rx_buf || tx_buf)) + || (t->bits_per_word && + ( t->bits_per_word < 4 + || t->bits_per_word > 32))) { + dev_dbg(&spi->dev, "transfer: %d Hz, %d %s%s, %d bpw\n", + t->speed_hz, + len, + tx_buf ? "tx" : "", + rx_buf ? "rx" : "", + t->bits_per_word); + return -EINVAL; + } + if (t->speed_hz && t->speed_hz < (OMAP2_MCSPI_MAX_FREQ >> 15)) { + dev_dbg(&spi->dev, "speed_hz %d below minimum %d Hz\n", + t->speed_hz, + OMAP2_MCSPI_MAX_FREQ >> 15); + return -EINVAL; + } + + if (m->is_dma_mapped || len < DMA_MIN_BYTES) + continue; + + if (tx_buf != NULL) { + t->tx_dma = dma_map_single(&spi->dev, (void *) tx_buf, + len, DMA_TO_DEVICE); + if (dma_mapping_error(&spi->dev, t->tx_dma)) { + dev_dbg(&spi->dev, "dma %cX %d bytes error\n", + 'T', len); + return -EINVAL; + } + } + if (rx_buf != NULL) { + t->rx_dma = dma_map_single(&spi->dev, rx_buf, t->len, + DMA_FROM_DEVICE); + if (dma_mapping_error(&spi->dev, t->rx_dma)) { + dev_dbg(&spi->dev, "dma %cX %d bytes error\n", + 'R', len); + if (tx_buf != NULL) + dma_unmap_single(&spi->dev, t->tx_dma, + len, DMA_TO_DEVICE); + return -EINVAL; + } + } + } + + mcspi = spi_master_get_devdata(spi->master); + + spin_lock_irqsave(&mcspi->lock, flags); + list_add_tail(&m->queue, &mcspi->msg_queue); + queue_work(omap2_mcspi_wq, &mcspi->work); + spin_unlock_irqrestore(&mcspi->lock, flags); + + return 0; +} + +static int __init omap2_mcspi_master_setup(struct omap2_mcspi *mcspi) +{ + struct spi_master *master = mcspi->master; + u32 tmp; + int ret = 0; + + ret = omap2_mcspi_enable_clocks(mcspi); + if (ret < 0) + return ret; + + tmp = OMAP2_MCSPI_WAKEUPENABLE_WKEN; + mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE, tmp); + omap2_mcspi_ctx[master->bus_num - 1].wakeupenable = tmp; + + omap2_mcspi_set_master_mode(master); + omap2_mcspi_disable_clocks(mcspi); + return 0; +} + +static int omap_mcspi_runtime_resume(struct device *dev) +{ + struct omap2_mcspi *mcspi; + struct spi_master *master; + + master = dev_get_drvdata(dev); + mcspi = spi_master_get_devdata(master); + omap2_mcspi_restore_ctx(mcspi); + + return 0; +} + + +static int __init omap2_mcspi_probe(struct platform_device *pdev) +{ + struct spi_master *master; + struct omap2_mcspi_platform_config *pdata = pdev->dev.platform_data; + struct omap2_mcspi *mcspi; + struct resource *r; + int status = 0, i; + + master = spi_alloc_master(&pdev->dev, sizeof *mcspi); + if (master == NULL) { + dev_dbg(&pdev->dev, "master allocation failed\n"); + return -ENOMEM; + } + + /* the spi->mode bits understood by this driver: */ + master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; + + if (pdev->id != -1) + master->bus_num = pdev->id; + + master->setup = omap2_mcspi_setup; + master->transfer = omap2_mcspi_transfer; + master->cleanup = omap2_mcspi_cleanup; + master->num_chipselect = pdata->num_cs; + + dev_set_drvdata(&pdev->dev, master); + + mcspi = spi_master_get_devdata(master); + mcspi->master = master; + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (r == NULL) { + status = -ENODEV; + goto err1; + } + if (!request_mem_region(r->start, (r->end - r->start) + 1, + dev_name(&pdev->dev))) { + status = -EBUSY; + goto err1; + } + + r->start += pdata->regs_offset; + r->end += pdata->regs_offset; + mcspi->phys = r->start; + mcspi->base = ioremap(r->start, r->end - r->start + 1); + if (!mcspi->base) { + dev_dbg(&pdev->dev, "can't ioremap MCSPI\n"); + status = -ENOMEM; + goto err2; + } + + mcspi->dev = &pdev->dev; + INIT_WORK(&mcspi->work, omap2_mcspi_work); + + spin_lock_init(&mcspi->lock); + INIT_LIST_HEAD(&mcspi->msg_queue); + INIT_LIST_HEAD(&omap2_mcspi_ctx[master->bus_num - 1].cs); + + mcspi->dma_channels = kcalloc(master->num_chipselect, + sizeof(struct omap2_mcspi_dma), + GFP_KERNEL); + + if (mcspi->dma_channels == NULL) + goto err2; + + for (i = 0; i < master->num_chipselect; i++) { + char dma_ch_name[14]; + struct resource *dma_res; + + sprintf(dma_ch_name, "rx%d", i); + dma_res = platform_get_resource_byname(pdev, IORESOURCE_DMA, + dma_ch_name); + if (!dma_res) { + dev_dbg(&pdev->dev, "cannot get DMA RX channel\n"); + status = -ENODEV; + break; + } + + mcspi->dma_channels[i].dma_rx_channel = -1; + mcspi->dma_channels[i].dma_rx_sync_dev = dma_res->start; + sprintf(dma_ch_name, "tx%d", i); + dma_res = platform_get_resource_byname(pdev, IORESOURCE_DMA, + dma_ch_name); + if (!dma_res) { + dev_dbg(&pdev->dev, "cannot get DMA TX channel\n"); + status = -ENODEV; + break; + } + + mcspi->dma_channels[i].dma_tx_channel = -1; + mcspi->dma_channels[i].dma_tx_sync_dev = dma_res->start; + } + + pm_runtime_enable(&pdev->dev); + + if (status || omap2_mcspi_master_setup(mcspi) < 0) + goto err3; + + status = spi_register_master(master); + if (status < 0) + goto err4; + + return status; + +err4: + spi_master_put(master); +err3: + kfree(mcspi->dma_channels); +err2: + release_mem_region(r->start, (r->end - r->start) + 1); + iounmap(mcspi->base); +err1: + return status; +} + +static int __exit omap2_mcspi_remove(struct platform_device *pdev) +{ + struct spi_master *master; + struct omap2_mcspi *mcspi; + struct omap2_mcspi_dma *dma_channels; + struct resource *r; + void __iomem *base; + + master = dev_get_drvdata(&pdev->dev); + mcspi = spi_master_get_devdata(master); + dma_channels = mcspi->dma_channels; + + omap2_mcspi_disable_clocks(mcspi); + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + release_mem_region(r->start, (r->end - r->start) + 1); + + base = mcspi->base; + spi_unregister_master(master); + iounmap(base); + kfree(dma_channels); + + return 0; +} + +/* work with hotplug and coldplug */ +MODULE_ALIAS("platform:omap2_mcspi"); + +#ifdef CONFIG_SUSPEND +/* + * When SPI wake up from off-mode, CS is in activate state. If it was in + * unactive state when driver was suspend, then force it to unactive state at + * wake up. + */ +static int omap2_mcspi_resume(struct device *dev) +{ + struct spi_master *master = dev_get_drvdata(dev); + struct omap2_mcspi *mcspi = spi_master_get_devdata(master); + struct omap2_mcspi_cs *cs; + + omap2_mcspi_enable_clocks(mcspi); + list_for_each_entry(cs, &omap2_mcspi_ctx[master->bus_num - 1].cs, + node) { + if ((cs->chconf0 & OMAP2_MCSPI_CHCONF_FORCE) == 0) { + + /* + * We need to toggle CS state for OMAP take this + * change in account. + */ + MOD_REG_BIT(cs->chconf0, OMAP2_MCSPI_CHCONF_FORCE, 1); + __raw_writel(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0); + MOD_REG_BIT(cs->chconf0, OMAP2_MCSPI_CHCONF_FORCE, 0); + __raw_writel(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0); + } + } + omap2_mcspi_disable_clocks(mcspi); + return 0; +} +#else +#define omap2_mcspi_resume NULL +#endif + +static const struct dev_pm_ops omap2_mcspi_pm_ops = { + .resume = omap2_mcspi_resume, + .runtime_resume = omap_mcspi_runtime_resume, +}; + +static struct platform_driver omap2_mcspi_driver = { + .driver = { + .name = "omap2_mcspi", + .owner = THIS_MODULE, + .pm = &omap2_mcspi_pm_ops + }, + .remove = __exit_p(omap2_mcspi_remove), +}; + + +static int __init omap2_mcspi_init(void) +{ + omap2_mcspi_wq = create_singlethread_workqueue( + omap2_mcspi_driver.driver.name); + if (omap2_mcspi_wq == NULL) + return -1; + return platform_driver_probe(&omap2_mcspi_driver, omap2_mcspi_probe); +} +subsys_initcall(omap2_mcspi_init); + +static void __exit omap2_mcspi_exit(void) +{ + platform_driver_unregister(&omap2_mcspi_driver); + + destroy_workqueue(omap2_mcspi_wq); +} +module_exit(omap2_mcspi_exit); + +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c new file mode 100644 index 0000000..d482628 --- /dev/null +++ b/drivers/spi/spi-orion.c @@ -0,0 +1,573 @@ +/* + * Marvell Orion SPI controller driver + * + * Author: Shadi Ammouri + * Copyright (C) 2007-2008 Marvell Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRIVER_NAME "orion_spi" + +#define ORION_NUM_CHIPSELECTS 1 /* only one slave is supported*/ +#define ORION_SPI_WAIT_RDY_MAX_LOOP 2000 /* in usec */ + +#define ORION_SPI_IF_CTRL_REG 0x00 +#define ORION_SPI_IF_CONFIG_REG 0x04 +#define ORION_SPI_DATA_OUT_REG 0x08 +#define ORION_SPI_DATA_IN_REG 0x0c +#define ORION_SPI_INT_CAUSE_REG 0x10 + +#define ORION_SPI_IF_8_16_BIT_MODE (1 << 5) +#define ORION_SPI_CLK_PRESCALE_MASK 0x1F + +struct orion_spi { + struct work_struct work; + + /* Lock access to transfer list. */ + spinlock_t lock; + + struct list_head msg_queue; + struct spi_master *master; + void __iomem *base; + unsigned int max_speed; + unsigned int min_speed; + struct orion_spi_info *spi_info; +}; + +static struct workqueue_struct *orion_spi_wq; + +static inline void __iomem *spi_reg(struct orion_spi *orion_spi, u32 reg) +{ + return orion_spi->base + reg; +} + +static inline void +orion_spi_setbits(struct orion_spi *orion_spi, u32 reg, u32 mask) +{ + void __iomem *reg_addr = spi_reg(orion_spi, reg); + u32 val; + + val = readl(reg_addr); + val |= mask; + writel(val, reg_addr); +} + +static inline void +orion_spi_clrbits(struct orion_spi *orion_spi, u32 reg, u32 mask) +{ + void __iomem *reg_addr = spi_reg(orion_spi, reg); + u32 val; + + val = readl(reg_addr); + val &= ~mask; + writel(val, reg_addr); +} + +static int orion_spi_set_transfer_size(struct orion_spi *orion_spi, int size) +{ + if (size == 16) { + orion_spi_setbits(orion_spi, ORION_SPI_IF_CONFIG_REG, + ORION_SPI_IF_8_16_BIT_MODE); + } else if (size == 8) { + orion_spi_clrbits(orion_spi, ORION_SPI_IF_CONFIG_REG, + ORION_SPI_IF_8_16_BIT_MODE); + } else { + pr_debug("Bad bits per word value %d (only 8 or 16 are " + "allowed).\n", size); + return -EINVAL; + } + + return 0; +} + +static int orion_spi_baudrate_set(struct spi_device *spi, unsigned int speed) +{ + u32 tclk_hz; + u32 rate; + u32 prescale; + u32 reg; + struct orion_spi *orion_spi; + + orion_spi = spi_master_get_devdata(spi->master); + + tclk_hz = orion_spi->spi_info->tclk; + + /* + * the supported rates are: 4,6,8...30 + * round up as we look for equal or less speed + */ + rate = DIV_ROUND_UP(tclk_hz, speed); + rate = roundup(rate, 2); + + /* check if requested speed is too small */ + if (rate > 30) + return -EINVAL; + + if (rate < 4) + rate = 4; + + /* Convert the rate to SPI clock divisor value. */ + prescale = 0x10 + rate/2; + + reg = readl(spi_reg(orion_spi, ORION_SPI_IF_CONFIG_REG)); + reg = ((reg & ~ORION_SPI_CLK_PRESCALE_MASK) | prescale); + writel(reg, spi_reg(orion_spi, ORION_SPI_IF_CONFIG_REG)); + + return 0; +} + +/* + * called only when no transfer is active on the bus + */ +static int +orion_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t) +{ + struct orion_spi *orion_spi; + unsigned int speed = spi->max_speed_hz; + unsigned int bits_per_word = spi->bits_per_word; + int rc; + + orion_spi = spi_master_get_devdata(spi->master); + + if ((t != NULL) && t->speed_hz) + speed = t->speed_hz; + + if ((t != NULL) && t->bits_per_word) + bits_per_word = t->bits_per_word; + + rc = orion_spi_baudrate_set(spi, speed); + if (rc) + return rc; + + return orion_spi_set_transfer_size(orion_spi, bits_per_word); +} + +static void orion_spi_set_cs(struct orion_spi *orion_spi, int enable) +{ + if (enable) + orion_spi_setbits(orion_spi, ORION_SPI_IF_CTRL_REG, 0x1); + else + orion_spi_clrbits(orion_spi, ORION_SPI_IF_CTRL_REG, 0x1); +} + +static inline int orion_spi_wait_till_ready(struct orion_spi *orion_spi) +{ + int i; + + for (i = 0; i < ORION_SPI_WAIT_RDY_MAX_LOOP; i++) { + if (readl(spi_reg(orion_spi, ORION_SPI_INT_CAUSE_REG))) + return 1; + else + udelay(1); + } + + return -1; +} + +static inline int +orion_spi_write_read_8bit(struct spi_device *spi, + const u8 **tx_buf, u8 **rx_buf) +{ + void __iomem *tx_reg, *rx_reg, *int_reg; + struct orion_spi *orion_spi; + + orion_spi = spi_master_get_devdata(spi->master); + tx_reg = spi_reg(orion_spi, ORION_SPI_DATA_OUT_REG); + rx_reg = spi_reg(orion_spi, ORION_SPI_DATA_IN_REG); + int_reg = spi_reg(orion_spi, ORION_SPI_INT_CAUSE_REG); + + /* clear the interrupt cause register */ + writel(0x0, int_reg); + + if (tx_buf && *tx_buf) + writel(*(*tx_buf)++, tx_reg); + else + writel(0, tx_reg); + + if (orion_spi_wait_till_ready(orion_spi) < 0) { + dev_err(&spi->dev, "TXS timed out\n"); + return -1; + } + + if (rx_buf && *rx_buf) + *(*rx_buf)++ = readl(rx_reg); + + return 1; +} + +static inline int +orion_spi_write_read_16bit(struct spi_device *spi, + const u16 **tx_buf, u16 **rx_buf) +{ + void __iomem *tx_reg, *rx_reg, *int_reg; + struct orion_spi *orion_spi; + + orion_spi = spi_master_get_devdata(spi->master); + tx_reg = spi_reg(orion_spi, ORION_SPI_DATA_OUT_REG); + rx_reg = spi_reg(orion_spi, ORION_SPI_DATA_IN_REG); + int_reg = spi_reg(orion_spi, ORION_SPI_INT_CAUSE_REG); + + /* clear the interrupt cause register */ + writel(0x0, int_reg); + + if (tx_buf && *tx_buf) + writel(__cpu_to_le16(get_unaligned((*tx_buf)++)), tx_reg); + else + writel(0, tx_reg); + + if (orion_spi_wait_till_ready(orion_spi) < 0) { + dev_err(&spi->dev, "TXS timed out\n"); + return -1; + } + + if (rx_buf && *rx_buf) + put_unaligned(__le16_to_cpu(readl(rx_reg)), (*rx_buf)++); + + return 1; +} + +static unsigned int +orion_spi_write_read(struct spi_device *spi, struct spi_transfer *xfer) +{ + struct orion_spi *orion_spi; + unsigned int count; + int word_len; + + orion_spi = spi_master_get_devdata(spi->master); + word_len = spi->bits_per_word; + count = xfer->len; + + if (word_len == 8) { + const u8 *tx = xfer->tx_buf; + u8 *rx = xfer->rx_buf; + + do { + if (orion_spi_write_read_8bit(spi, &tx, &rx) < 0) + goto out; + count--; + } while (count); + } else if (word_len == 16) { + const u16 *tx = xfer->tx_buf; + u16 *rx = xfer->rx_buf; + + do { + if (orion_spi_write_read_16bit(spi, &tx, &rx) < 0) + goto out; + count -= 2; + } while (count); + } + +out: + return xfer->len - count; +} + + +static void orion_spi_work(struct work_struct *work) +{ + struct orion_spi *orion_spi = + container_of(work, struct orion_spi, work); + + spin_lock_irq(&orion_spi->lock); + while (!list_empty(&orion_spi->msg_queue)) { + struct spi_message *m; + struct spi_device *spi; + struct spi_transfer *t = NULL; + int par_override = 0; + int status = 0; + int cs_active = 0; + + m = container_of(orion_spi->msg_queue.next, struct spi_message, + queue); + + list_del_init(&m->queue); + spin_unlock_irq(&orion_spi->lock); + + spi = m->spi; + + /* Load defaults */ + status = orion_spi_setup_transfer(spi, NULL); + + if (status < 0) + goto msg_done; + + list_for_each_entry(t, &m->transfers, transfer_list) { + if (par_override || t->speed_hz || t->bits_per_word) { + par_override = 1; + status = orion_spi_setup_transfer(spi, t); + if (status < 0) + break; + if (!t->speed_hz && !t->bits_per_word) + par_override = 0; + } + + if (!cs_active) { + orion_spi_set_cs(orion_spi, 1); + cs_active = 1; + } + + if (t->len) + m->actual_length += + orion_spi_write_read(spi, t); + + if (t->delay_usecs) + udelay(t->delay_usecs); + + if (t->cs_change) { + orion_spi_set_cs(orion_spi, 0); + cs_active = 0; + } + } + +msg_done: + if (cs_active) + orion_spi_set_cs(orion_spi, 0); + + m->status = status; + m->complete(m->context); + + spin_lock_irq(&orion_spi->lock); + } + + spin_unlock_irq(&orion_spi->lock); +} + +static int __init orion_spi_reset(struct orion_spi *orion_spi) +{ + /* Verify that the CS is deasserted */ + orion_spi_set_cs(orion_spi, 0); + + return 0; +} + +static int orion_spi_setup(struct spi_device *spi) +{ + struct orion_spi *orion_spi; + + orion_spi = spi_master_get_devdata(spi->master); + + /* Fix ac timing if required. */ + if (orion_spi->spi_info->enable_clock_fix) + orion_spi_setbits(orion_spi, ORION_SPI_IF_CONFIG_REG, + (1 << 14)); + + if ((spi->max_speed_hz == 0) + || (spi->max_speed_hz > orion_spi->max_speed)) + spi->max_speed_hz = orion_spi->max_speed; + + if (spi->max_speed_hz < orion_spi->min_speed) { + dev_err(&spi->dev, "setup: requested speed too low %d Hz\n", + spi->max_speed_hz); + return -EINVAL; + } + + /* + * baudrate & width will be set orion_spi_setup_transfer + */ + return 0; +} + +static int orion_spi_transfer(struct spi_device *spi, struct spi_message *m) +{ + struct orion_spi *orion_spi; + struct spi_transfer *t = NULL; + unsigned long flags; + + m->actual_length = 0; + m->status = 0; + + /* reject invalid messages and transfers */ + if (list_empty(&m->transfers) || !m->complete) + return -EINVAL; + + orion_spi = spi_master_get_devdata(spi->master); + + list_for_each_entry(t, &m->transfers, transfer_list) { + unsigned int bits_per_word = spi->bits_per_word; + + if (t->tx_buf == NULL && t->rx_buf == NULL && t->len) { + dev_err(&spi->dev, + "message rejected : " + "invalid transfer data buffers\n"); + goto msg_rejected; + } + + if (t->bits_per_word) + bits_per_word = t->bits_per_word; + + if ((bits_per_word != 8) && (bits_per_word != 16)) { + dev_err(&spi->dev, + "message rejected : " + "invalid transfer bits_per_word (%d bits)\n", + bits_per_word); + goto msg_rejected; + } + /*make sure buffer length is even when working in 16 bit mode*/ + if ((t->bits_per_word == 16) && (t->len & 1)) { + dev_err(&spi->dev, + "message rejected : " + "odd data length (%d) while in 16 bit mode\n", + t->len); + goto msg_rejected; + } + + if (t->speed_hz && t->speed_hz < orion_spi->min_speed) { + dev_err(&spi->dev, + "message rejected : " + "device min speed (%d Hz) exceeds " + "required transfer speed (%d Hz)\n", + orion_spi->min_speed, t->speed_hz); + goto msg_rejected; + } + } + + + spin_lock_irqsave(&orion_spi->lock, flags); + list_add_tail(&m->queue, &orion_spi->msg_queue); + queue_work(orion_spi_wq, &orion_spi->work); + spin_unlock_irqrestore(&orion_spi->lock, flags); + + return 0; +msg_rejected: + /* Message rejected and not queued */ + m->status = -EINVAL; + if (m->complete) + m->complete(m->context); + return -EINVAL; +} + +static int __init orion_spi_probe(struct platform_device *pdev) +{ + struct spi_master *master; + struct orion_spi *spi; + struct resource *r; + struct orion_spi_info *spi_info; + int status = 0; + + spi_info = pdev->dev.platform_data; + + master = spi_alloc_master(&pdev->dev, sizeof *spi); + if (master == NULL) { + dev_dbg(&pdev->dev, "master allocation failed\n"); + return -ENOMEM; + } + + if (pdev->id != -1) + master->bus_num = pdev->id; + + /* we support only mode 0, and no options */ + master->mode_bits = 0; + + master->setup = orion_spi_setup; + master->transfer = orion_spi_transfer; + master->num_chipselect = ORION_NUM_CHIPSELECTS; + + dev_set_drvdata(&pdev->dev, master); + + spi = spi_master_get_devdata(master); + spi->master = master; + spi->spi_info = spi_info; + + spi->max_speed = DIV_ROUND_UP(spi_info->tclk, 4); + spi->min_speed = DIV_ROUND_UP(spi_info->tclk, 30); + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (r == NULL) { + status = -ENODEV; + goto out; + } + + if (!request_mem_region(r->start, (r->end - r->start) + 1, + dev_name(&pdev->dev))) { + status = -EBUSY; + goto out; + } + spi->base = ioremap(r->start, SZ_1K); + + INIT_WORK(&spi->work, orion_spi_work); + + spin_lock_init(&spi->lock); + INIT_LIST_HEAD(&spi->msg_queue); + + if (orion_spi_reset(spi) < 0) + goto out_rel_mem; + + status = spi_register_master(master); + if (status < 0) + goto out_rel_mem; + + return status; + +out_rel_mem: + release_mem_region(r->start, (r->end - r->start) + 1); + +out: + spi_master_put(master); + return status; +} + + +static int __exit orion_spi_remove(struct platform_device *pdev) +{ + struct spi_master *master; + struct orion_spi *spi; + struct resource *r; + + master = dev_get_drvdata(&pdev->dev); + spi = spi_master_get_devdata(master); + + cancel_work_sync(&spi->work); + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + release_mem_region(r->start, (r->end - r->start) + 1); + + spi_unregister_master(master); + + return 0; +} + +MODULE_ALIAS("platform:" DRIVER_NAME); + +static struct platform_driver orion_spi_driver = { + .driver = { + .name = DRIVER_NAME, + .owner = THIS_MODULE, + }, + .remove = __exit_p(orion_spi_remove), +}; + +static int __init orion_spi_init(void) +{ + orion_spi_wq = create_singlethread_workqueue( + orion_spi_driver.driver.name); + if (orion_spi_wq == NULL) + return -ENOMEM; + + return platform_driver_probe(&orion_spi_driver, orion_spi_probe); +} +module_init(orion_spi_init); + +static void __exit orion_spi_exit(void) +{ + flush_workqueue(orion_spi_wq); + platform_driver_unregister(&orion_spi_driver); + + destroy_workqueue(orion_spi_wq); +} +module_exit(orion_spi_exit); + +MODULE_DESCRIPTION("Orion SPI driver"); +MODULE_AUTHOR("Shadi Ammouri "); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c new file mode 100644 index 0000000..2541705 --- /dev/null +++ b/drivers/spi/spi-pl022.c @@ -0,0 +1,2342 @@ +/* + * A driver for the ARM PL022 PrimeCell SSP/SPI bus master. + * + * Copyright (C) 2008-2009 ST-Ericsson AB + * Copyright (C) 2006 STMicroelectronics Pvt. Ltd. + * + * Author: Linus Walleij + * + * Initial version inspired by: + * linux-2.6.17-rc3-mm1/drivers/spi/pxa2xx_spi.c + * Initial adoption to PL022 by: + * Sachin Verma + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * This macro is used to define some register default values. + * reg is masked with mask, the OR:ed with an (again masked) + * val shifted sb steps to the left. + */ +#define SSP_WRITE_BITS(reg, val, mask, sb) \ + ((reg) = (((reg) & ~(mask)) | (((val)<<(sb)) & (mask)))) + +/* + * This macro is also used to define some default values. + * It will just shift val by sb steps to the left and mask + * the result with mask. + */ +#define GEN_MASK_BITS(val, mask, sb) \ + (((val)<<(sb)) & (mask)) + +#define DRIVE_TX 0 +#define DO_NOT_DRIVE_TX 1 + +#define DO_NOT_QUEUE_DMA 0 +#define QUEUE_DMA 1 + +#define RX_TRANSFER 1 +#define TX_TRANSFER 2 + +/* + * Macros to access SSP Registers with their offsets + */ +#define SSP_CR0(r) (r + 0x000) +#define SSP_CR1(r) (r + 0x004) +#define SSP_DR(r) (r + 0x008) +#define SSP_SR(r) (r + 0x00C) +#define SSP_CPSR(r) (r + 0x010) +#define SSP_IMSC(r) (r + 0x014) +#define SSP_RIS(r) (r + 0x018) +#define SSP_MIS(r) (r + 0x01C) +#define SSP_ICR(r) (r + 0x020) +#define SSP_DMACR(r) (r + 0x024) +#define SSP_ITCR(r) (r + 0x080) +#define SSP_ITIP(r) (r + 0x084) +#define SSP_ITOP(r) (r + 0x088) +#define SSP_TDR(r) (r + 0x08C) + +#define SSP_PID0(r) (r + 0xFE0) +#define SSP_PID1(r) (r + 0xFE4) +#define SSP_PID2(r) (r + 0xFE8) +#define SSP_PID3(r) (r + 0xFEC) + +#define SSP_CID0(r) (r + 0xFF0) +#define SSP_CID1(r) (r + 0xFF4) +#define SSP_CID2(r) (r + 0xFF8) +#define SSP_CID3(r) (r + 0xFFC) + +/* + * SSP Control Register 0 - SSP_CR0 + */ +#define SSP_CR0_MASK_DSS (0x0FUL << 0) +#define SSP_CR0_MASK_FRF (0x3UL << 4) +#define SSP_CR0_MASK_SPO (0x1UL << 6) +#define SSP_CR0_MASK_SPH (0x1UL << 7) +#define SSP_CR0_MASK_SCR (0xFFUL << 8) + +/* + * The ST version of this block moves som bits + * in SSP_CR0 and extends it to 32 bits + */ +#define SSP_CR0_MASK_DSS_ST (0x1FUL << 0) +#define SSP_CR0_MASK_HALFDUP_ST (0x1UL << 5) +#define SSP_CR0_MASK_CSS_ST (0x1FUL << 16) +#define SSP_CR0_MASK_FRF_ST (0x3UL << 21) + + +/* + * SSP Control Register 0 - SSP_CR1 + */ +#define SSP_CR1_MASK_LBM (0x1UL << 0) +#define SSP_CR1_MASK_SSE (0x1UL << 1) +#define SSP_CR1_MASK_MS (0x1UL << 2) +#define SSP_CR1_MASK_SOD (0x1UL << 3) + +/* + * The ST version of this block adds some bits + * in SSP_CR1 + */ +#define SSP_CR1_MASK_RENDN_ST (0x1UL << 4) +#define SSP_CR1_MASK_TENDN_ST (0x1UL << 5) +#define SSP_CR1_MASK_MWAIT_ST (0x1UL << 6) +#define SSP_CR1_MASK_RXIFLSEL_ST (0x7UL << 7) +#define SSP_CR1_MASK_TXIFLSEL_ST (0x7UL << 10) +/* This one is only in the PL023 variant */ +#define SSP_CR1_MASK_FBCLKDEL_ST (0x7UL << 13) + +/* + * SSP Status Register - SSP_SR + */ +#define SSP_SR_MASK_TFE (0x1UL << 0) /* Transmit FIFO empty */ +#define SSP_SR_MASK_TNF (0x1UL << 1) /* Transmit FIFO not full */ +#define SSP_SR_MASK_RNE (0x1UL << 2) /* Receive FIFO not empty */ +#define SSP_SR_MASK_RFF (0x1UL << 3) /* Receive FIFO full */ +#define SSP_SR_MASK_BSY (0x1UL << 4) /* Busy Flag */ + +/* + * SSP Clock Prescale Register - SSP_CPSR + */ +#define SSP_CPSR_MASK_CPSDVSR (0xFFUL << 0) + +/* + * SSP Interrupt Mask Set/Clear Register - SSP_IMSC + */ +#define SSP_IMSC_MASK_RORIM (0x1UL << 0) /* Receive Overrun Interrupt mask */ +#define SSP_IMSC_MASK_RTIM (0x1UL << 1) /* Receive timeout Interrupt mask */ +#define SSP_IMSC_MASK_RXIM (0x1UL << 2) /* Receive FIFO Interrupt mask */ +#define SSP_IMSC_MASK_TXIM (0x1UL << 3) /* Transmit FIFO Interrupt mask */ + +/* + * SSP Raw Interrupt Status Register - SSP_RIS + */ +/* Receive Overrun Raw Interrupt status */ +#define SSP_RIS_MASK_RORRIS (0x1UL << 0) +/* Receive Timeout Raw Interrupt status */ +#define SSP_RIS_MASK_RTRIS (0x1UL << 1) +/* Receive FIFO Raw Interrupt status */ +#define SSP_RIS_MASK_RXRIS (0x1UL << 2) +/* Transmit FIFO Raw Interrupt status */ +#define SSP_RIS_MASK_TXRIS (0x1UL << 3) + +/* + * SSP Masked Interrupt Status Register - SSP_MIS + */ +/* Receive Overrun Masked Interrupt status */ +#define SSP_MIS_MASK_RORMIS (0x1UL << 0) +/* Receive Timeout Masked Interrupt status */ +#define SSP_MIS_MASK_RTMIS (0x1UL << 1) +/* Receive FIFO Masked Interrupt status */ +#define SSP_MIS_MASK_RXMIS (0x1UL << 2) +/* Transmit FIFO Masked Interrupt status */ +#define SSP_MIS_MASK_TXMIS (0x1UL << 3) + +/* + * SSP Interrupt Clear Register - SSP_ICR + */ +/* Receive Overrun Raw Clear Interrupt bit */ +#define SSP_ICR_MASK_RORIC (0x1UL << 0) +/* Receive Timeout Clear Interrupt bit */ +#define SSP_ICR_MASK_RTIC (0x1UL << 1) + +/* + * SSP DMA Control Register - SSP_DMACR + */ +/* Receive DMA Enable bit */ +#define SSP_DMACR_MASK_RXDMAE (0x1UL << 0) +/* Transmit DMA Enable bit */ +#define SSP_DMACR_MASK_TXDMAE (0x1UL << 1) + +/* + * SSP Integration Test control Register - SSP_ITCR + */ +#define SSP_ITCR_MASK_ITEN (0x1UL << 0) +#define SSP_ITCR_MASK_TESTFIFO (0x1UL << 1) + +/* + * SSP Integration Test Input Register - SSP_ITIP + */ +#define ITIP_MASK_SSPRXD (0x1UL << 0) +#define ITIP_MASK_SSPFSSIN (0x1UL << 1) +#define ITIP_MASK_SSPCLKIN (0x1UL << 2) +#define ITIP_MASK_RXDMAC (0x1UL << 3) +#define ITIP_MASK_TXDMAC (0x1UL << 4) +#define ITIP_MASK_SSPTXDIN (0x1UL << 5) + +/* + * SSP Integration Test output Register - SSP_ITOP + */ +#define ITOP_MASK_SSPTXD (0x1UL << 0) +#define ITOP_MASK_SSPFSSOUT (0x1UL << 1) +#define ITOP_MASK_SSPCLKOUT (0x1UL << 2) +#define ITOP_MASK_SSPOEn (0x1UL << 3) +#define ITOP_MASK_SSPCTLOEn (0x1UL << 4) +#define ITOP_MASK_RORINTR (0x1UL << 5) +#define ITOP_MASK_RTINTR (0x1UL << 6) +#define ITOP_MASK_RXINTR (0x1UL << 7) +#define ITOP_MASK_TXINTR (0x1UL << 8) +#define ITOP_MASK_INTR (0x1UL << 9) +#define ITOP_MASK_RXDMABREQ (0x1UL << 10) +#define ITOP_MASK_RXDMASREQ (0x1UL << 11) +#define ITOP_MASK_TXDMABREQ (0x1UL << 12) +#define ITOP_MASK_TXDMASREQ (0x1UL << 13) + +/* + * SSP Test Data Register - SSP_TDR + */ +#define TDR_MASK_TESTDATA (0xFFFFFFFF) + +/* + * Message State + * we use the spi_message.state (void *) pointer to + * hold a single state value, that's why all this + * (void *) casting is done here. + */ +#define STATE_START ((void *) 0) +#define STATE_RUNNING ((void *) 1) +#define STATE_DONE ((void *) 2) +#define STATE_ERROR ((void *) -1) + +/* + * SSP State - Whether Enabled or Disabled + */ +#define SSP_DISABLED (0) +#define SSP_ENABLED (1) + +/* + * SSP DMA State - Whether DMA Enabled or Disabled + */ +#define SSP_DMA_DISABLED (0) +#define SSP_DMA_ENABLED (1) + +/* + * SSP Clock Defaults + */ +#define SSP_DEFAULT_CLKRATE 0x2 +#define SSP_DEFAULT_PRESCALE 0x40 + +/* + * SSP Clock Parameter ranges + */ +#define CPSDVR_MIN 0x02 +#define CPSDVR_MAX 0xFE +#define SCR_MIN 0x00 +#define SCR_MAX 0xFF + +/* + * SSP Interrupt related Macros + */ +#define DEFAULT_SSP_REG_IMSC 0x0UL +#define DISABLE_ALL_INTERRUPTS DEFAULT_SSP_REG_IMSC +#define ENABLE_ALL_INTERRUPTS (~DEFAULT_SSP_REG_IMSC) + +#define CLEAR_ALL_INTERRUPTS 0x3 + +#define SPI_POLLING_TIMEOUT 1000 + + +/* + * The type of reading going on on this chip + */ +enum ssp_reading { + READING_NULL, + READING_U8, + READING_U16, + READING_U32 +}; + +/** + * The type of writing going on on this chip + */ +enum ssp_writing { + WRITING_NULL, + WRITING_U8, + WRITING_U16, + WRITING_U32 +}; + +/** + * struct vendor_data - vendor-specific config parameters + * for PL022 derivates + * @fifodepth: depth of FIFOs (both) + * @max_bpw: maximum number of bits per word + * @unidir: supports unidirection transfers + * @extended_cr: 32 bit wide control register 0 with extra + * features and extra features in CR1 as found in the ST variants + * @pl023: supports a subset of the ST extensions called "PL023" + */ +struct vendor_data { + int fifodepth; + int max_bpw; + bool unidir; + bool extended_cr; + bool pl023; + bool loopback; +}; + +/** + * struct pl022 - This is the private SSP driver data structure + * @adev: AMBA device model hookup + * @vendor: vendor data for the IP block + * @phybase: the physical memory where the SSP device resides + * @virtbase: the virtual memory where the SSP is mapped + * @clk: outgoing clock "SPICLK" for the SPI bus + * @master: SPI framework hookup + * @master_info: controller-specific data from machine setup + * @workqueue: a workqueue on which any spi_message request is queued + * @pump_messages: work struct for scheduling work to the workqueue + * @queue_lock: spinlock to syncronise access to message queue + * @queue: message queue + * @busy: workqueue is busy + * @running: workqueue is running + * @pump_transfers: Tasklet used in Interrupt Transfer mode + * @cur_msg: Pointer to current spi_message being processed + * @cur_transfer: Pointer to current spi_transfer + * @cur_chip: pointer to current clients chip(assigned from controller_state) + * @tx: current position in TX buffer to be read + * @tx_end: end position in TX buffer to be read + * @rx: current position in RX buffer to be written + * @rx_end: end position in RX buffer to be written + * @read: the type of read currently going on + * @write: the type of write currently going on + * @exp_fifo_level: expected FIFO level + * @dma_rx_channel: optional channel for RX DMA + * @dma_tx_channel: optional channel for TX DMA + * @sgt_rx: scattertable for the RX transfer + * @sgt_tx: scattertable for the TX transfer + * @dummypage: a dummy page used for driving data on the bus with DMA + */ +struct pl022 { + struct amba_device *adev; + struct vendor_data *vendor; + resource_size_t phybase; + void __iomem *virtbase; + struct clk *clk; + struct spi_master *master; + struct pl022_ssp_controller *master_info; + /* Driver message queue */ + struct workqueue_struct *workqueue; + struct work_struct pump_messages; + spinlock_t queue_lock; + struct list_head queue; + bool busy; + bool running; + /* Message transfer pump */ + struct tasklet_struct pump_transfers; + struct spi_message *cur_msg; + struct spi_transfer *cur_transfer; + struct chip_data *cur_chip; + void *tx; + void *tx_end; + void *rx; + void *rx_end; + enum ssp_reading read; + enum ssp_writing write; + u32 exp_fifo_level; + /* DMA settings */ +#ifdef CONFIG_DMA_ENGINE + struct dma_chan *dma_rx_channel; + struct dma_chan *dma_tx_channel; + struct sg_table sgt_rx; + struct sg_table sgt_tx; + char *dummypage; +#endif +}; + +/** + * struct chip_data - To maintain runtime state of SSP for each client chip + * @cr0: Value of control register CR0 of SSP - on later ST variants this + * register is 32 bits wide rather than just 16 + * @cr1: Value of control register CR1 of SSP + * @dmacr: Value of DMA control Register of SSP + * @cpsr: Value of Clock prescale register + * @n_bytes: how many bytes(power of 2) reqd for a given data width of client + * @enable_dma: Whether to enable DMA or not + * @read: function ptr to be used to read when doing xfer for this chip + * @write: function ptr to be used to write when doing xfer for this chip + * @cs_control: chip select callback provided by chip + * @xfer_type: polling/interrupt/DMA + * + * Runtime state of the SSP controller, maintained per chip, + * This would be set according to the current message that would be served + */ +struct chip_data { + u32 cr0; + u16 cr1; + u16 dmacr; + u16 cpsr; + u8 n_bytes; + bool enable_dma; + enum ssp_reading read; + enum ssp_writing write; + void (*cs_control) (u32 command); + int xfer_type; +}; + +/** + * null_cs_control - Dummy chip select function + * @command: select/delect the chip + * + * If no chip select function is provided by client this is used as dummy + * chip select + */ +static void null_cs_control(u32 command) +{ + pr_debug("pl022: dummy chip select control, CS=0x%x\n", command); +} + +/** + * giveback - current spi_message is over, schedule next message and call + * callback of this message. Assumes that caller already + * set message->status; dma and pio irqs are blocked + * @pl022: SSP driver private data structure + */ +static void giveback(struct pl022 *pl022) +{ + struct spi_transfer *last_transfer; + unsigned long flags; + struct spi_message *msg; + void (*curr_cs_control) (u32 command); + + /* + * This local reference to the chip select function + * is needed because we set curr_chip to NULL + * as a step toward termininating the message. + */ + curr_cs_control = pl022->cur_chip->cs_control; + spin_lock_irqsave(&pl022->queue_lock, flags); + msg = pl022->cur_msg; + pl022->cur_msg = NULL; + pl022->cur_transfer = NULL; + pl022->cur_chip = NULL; + queue_work(pl022->workqueue, &pl022->pump_messages); + spin_unlock_irqrestore(&pl022->queue_lock, flags); + + last_transfer = list_entry(msg->transfers.prev, + struct spi_transfer, + transfer_list); + + /* Delay if requested before any change in chip select */ + if (last_transfer->delay_usecs) + /* + * FIXME: This runs in interrupt context. + * Is this really smart? + */ + udelay(last_transfer->delay_usecs); + + /* + * Drop chip select UNLESS cs_change is true or we are returning + * a message with an error, or next message is for another chip + */ + if (!last_transfer->cs_change) + curr_cs_control(SSP_CHIP_DESELECT); + else { + struct spi_message *next_msg; + + /* Holding of cs was hinted, but we need to make sure + * the next message is for the same chip. Don't waste + * time with the following tests unless this was hinted. + * + * We cannot postpone this until pump_messages, because + * after calling msg->complete (below) the driver that + * sent the current message could be unloaded, which + * could invalidate the cs_control() callback... + */ + + /* get a pointer to the next message, if any */ + spin_lock_irqsave(&pl022->queue_lock, flags); + if (list_empty(&pl022->queue)) + next_msg = NULL; + else + next_msg = list_entry(pl022->queue.next, + struct spi_message, queue); + spin_unlock_irqrestore(&pl022->queue_lock, flags); + + /* see if the next and current messages point + * to the same chip + */ + if (next_msg && next_msg->spi != msg->spi) + next_msg = NULL; + if (!next_msg || msg->state == STATE_ERROR) + curr_cs_control(SSP_CHIP_DESELECT); + } + msg->state = NULL; + if (msg->complete) + msg->complete(msg->context); + /* This message is completed, so let's turn off the clocks & power */ + clk_disable(pl022->clk); + amba_pclk_disable(pl022->adev); + amba_vcore_disable(pl022->adev); +} + +/** + * flush - flush the FIFO to reach a clean state + * @pl022: SSP driver private data structure + */ +static int flush(struct pl022 *pl022) +{ + unsigned long limit = loops_per_jiffy << 1; + + dev_dbg(&pl022->adev->dev, "flush\n"); + do { + while (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE) + readw(SSP_DR(pl022->virtbase)); + } while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_BSY) && limit--); + + pl022->exp_fifo_level = 0; + + return limit; +} + +/** + * restore_state - Load configuration of current chip + * @pl022: SSP driver private data structure + */ +static void restore_state(struct pl022 *pl022) +{ + struct chip_data *chip = pl022->cur_chip; + + if (pl022->vendor->extended_cr) + writel(chip->cr0, SSP_CR0(pl022->virtbase)); + else + writew(chip->cr0, SSP_CR0(pl022->virtbase)); + writew(chip->cr1, SSP_CR1(pl022->virtbase)); + writew(chip->dmacr, SSP_DMACR(pl022->virtbase)); + writew(chip->cpsr, SSP_CPSR(pl022->virtbase)); + writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase)); + writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase)); +} + +/* + * Default SSP Register Values + */ +#define DEFAULT_SSP_REG_CR0 ( \ + GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS, 0) | \ + GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF, 4) | \ + GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \ + GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \ + GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) \ +) + +/* ST versions have slightly different bit layout */ +#define DEFAULT_SSP_REG_CR0_ST ( \ + GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS_ST, 0) | \ + GEN_MASK_BITS(SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, SSP_CR0_MASK_HALFDUP_ST, 5) | \ + GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \ + GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \ + GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) | \ + GEN_MASK_BITS(SSP_BITS_8, SSP_CR0_MASK_CSS_ST, 16) | \ + GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF_ST, 21) \ +) + +/* The PL023 version is slightly different again */ +#define DEFAULT_SSP_REG_CR0_ST_PL023 ( \ + GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS_ST, 0) | \ + GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \ + GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \ + GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) \ +) + +#define DEFAULT_SSP_REG_CR1 ( \ + GEN_MASK_BITS(LOOPBACK_DISABLED, SSP_CR1_MASK_LBM, 0) | \ + GEN_MASK_BITS(SSP_DISABLED, SSP_CR1_MASK_SSE, 1) | \ + GEN_MASK_BITS(SSP_MASTER, SSP_CR1_MASK_MS, 2) | \ + GEN_MASK_BITS(DO_NOT_DRIVE_TX, SSP_CR1_MASK_SOD, 3) \ +) + +/* ST versions extend this register to use all 16 bits */ +#define DEFAULT_SSP_REG_CR1_ST ( \ + DEFAULT_SSP_REG_CR1 | \ + GEN_MASK_BITS(SSP_RX_MSB, SSP_CR1_MASK_RENDN_ST, 4) | \ + GEN_MASK_BITS(SSP_TX_MSB, SSP_CR1_MASK_TENDN_ST, 5) | \ + GEN_MASK_BITS(SSP_MWIRE_WAIT_ZERO, SSP_CR1_MASK_MWAIT_ST, 6) |\ + GEN_MASK_BITS(SSP_RX_1_OR_MORE_ELEM, SSP_CR1_MASK_RXIFLSEL_ST, 7) | \ + GEN_MASK_BITS(SSP_TX_1_OR_MORE_EMPTY_LOC, SSP_CR1_MASK_TXIFLSEL_ST, 10) \ +) + +/* + * The PL023 variant has further differences: no loopback mode, no microwire + * support, and a new clock feedback delay setting. + */ +#define DEFAULT_SSP_REG_CR1_ST_PL023 ( \ + GEN_MASK_BITS(SSP_DISABLED, SSP_CR1_MASK_SSE, 1) | \ + GEN_MASK_BITS(SSP_MASTER, SSP_CR1_MASK_MS, 2) | \ + GEN_MASK_BITS(DO_NOT_DRIVE_TX, SSP_CR1_MASK_SOD, 3) | \ + GEN_MASK_BITS(SSP_RX_MSB, SSP_CR1_MASK_RENDN_ST, 4) | \ + GEN_MASK_BITS(SSP_TX_MSB, SSP_CR1_MASK_TENDN_ST, 5) | \ + GEN_MASK_BITS(SSP_RX_1_OR_MORE_ELEM, SSP_CR1_MASK_RXIFLSEL_ST, 7) | \ + GEN_MASK_BITS(SSP_TX_1_OR_MORE_EMPTY_LOC, SSP_CR1_MASK_TXIFLSEL_ST, 10) | \ + GEN_MASK_BITS(SSP_FEEDBACK_CLK_DELAY_NONE, SSP_CR1_MASK_FBCLKDEL_ST, 13) \ +) + +#define DEFAULT_SSP_REG_CPSR ( \ + GEN_MASK_BITS(SSP_DEFAULT_PRESCALE, SSP_CPSR_MASK_CPSDVSR, 0) \ +) + +#define DEFAULT_SSP_REG_DMACR (\ + GEN_MASK_BITS(SSP_DMA_DISABLED, SSP_DMACR_MASK_RXDMAE, 0) | \ + GEN_MASK_BITS(SSP_DMA_DISABLED, SSP_DMACR_MASK_TXDMAE, 1) \ +) + +/** + * load_ssp_default_config - Load default configuration for SSP + * @pl022: SSP driver private data structure + */ +static void load_ssp_default_config(struct pl022 *pl022) +{ + if (pl022->vendor->pl023) { + writel(DEFAULT_SSP_REG_CR0_ST_PL023, SSP_CR0(pl022->virtbase)); + writew(DEFAULT_SSP_REG_CR1_ST_PL023, SSP_CR1(pl022->virtbase)); + } else if (pl022->vendor->extended_cr) { + writel(DEFAULT_SSP_REG_CR0_ST, SSP_CR0(pl022->virtbase)); + writew(DEFAULT_SSP_REG_CR1_ST, SSP_CR1(pl022->virtbase)); + } else { + writew(DEFAULT_SSP_REG_CR0, SSP_CR0(pl022->virtbase)); + writew(DEFAULT_SSP_REG_CR1, SSP_CR1(pl022->virtbase)); + } + writew(DEFAULT_SSP_REG_DMACR, SSP_DMACR(pl022->virtbase)); + writew(DEFAULT_SSP_REG_CPSR, SSP_CPSR(pl022->virtbase)); + writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase)); + writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase)); +} + +/** + * This will write to TX and read from RX according to the parameters + * set in pl022. + */ +static void readwriter(struct pl022 *pl022) +{ + + /* + * The FIFO depth is different between primecell variants. + * I believe filling in too much in the FIFO might cause + * errons in 8bit wide transfers on ARM variants (just 8 words + * FIFO, means only 8x8 = 64 bits in FIFO) at least. + * + * To prevent this issue, the TX FIFO is only filled to the + * unused RX FIFO fill length, regardless of what the TX + * FIFO status flag indicates. + */ + dev_dbg(&pl022->adev->dev, + "%s, rx: %p, rxend: %p, tx: %p, txend: %p\n", + __func__, pl022->rx, pl022->rx_end, pl022->tx, pl022->tx_end); + + /* Read as much as you can */ + while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE) + && (pl022->rx < pl022->rx_end)) { + switch (pl022->read) { + case READING_NULL: + readw(SSP_DR(pl022->virtbase)); + break; + case READING_U8: + *(u8 *) (pl022->rx) = + readw(SSP_DR(pl022->virtbase)) & 0xFFU; + break; + case READING_U16: + *(u16 *) (pl022->rx) = + (u16) readw(SSP_DR(pl022->virtbase)); + break; + case READING_U32: + *(u32 *) (pl022->rx) = + readl(SSP_DR(pl022->virtbase)); + break; + } + pl022->rx += (pl022->cur_chip->n_bytes); + pl022->exp_fifo_level--; + } + /* + * Write as much as possible up to the RX FIFO size + */ + while ((pl022->exp_fifo_level < pl022->vendor->fifodepth) + && (pl022->tx < pl022->tx_end)) { + switch (pl022->write) { + case WRITING_NULL: + writew(0x0, SSP_DR(pl022->virtbase)); + break; + case WRITING_U8: + writew(*(u8 *) (pl022->tx), SSP_DR(pl022->virtbase)); + break; + case WRITING_U16: + writew((*(u16 *) (pl022->tx)), SSP_DR(pl022->virtbase)); + break; + case WRITING_U32: + writel(*(u32 *) (pl022->tx), SSP_DR(pl022->virtbase)); + break; + } + pl022->tx += (pl022->cur_chip->n_bytes); + pl022->exp_fifo_level++; + /* + * This inner reader takes care of things appearing in the RX + * FIFO as we're transmitting. This will happen a lot since the + * clock starts running when you put things into the TX FIFO, + * and then things are continuously clocked into the RX FIFO. + */ + while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE) + && (pl022->rx < pl022->rx_end)) { + switch (pl022->read) { + case READING_NULL: + readw(SSP_DR(pl022->virtbase)); + break; + case READING_U8: + *(u8 *) (pl022->rx) = + readw(SSP_DR(pl022->virtbase)) & 0xFFU; + break; + case READING_U16: + *(u16 *) (pl022->rx) = + (u16) readw(SSP_DR(pl022->virtbase)); + break; + case READING_U32: + *(u32 *) (pl022->rx) = + readl(SSP_DR(pl022->virtbase)); + break; + } + pl022->rx += (pl022->cur_chip->n_bytes); + pl022->exp_fifo_level--; + } + } + /* + * When we exit here the TX FIFO should be full and the RX FIFO + * should be empty + */ +} + + +/** + * next_transfer - Move to the Next transfer in the current spi message + * @pl022: SSP driver private data structure + * + * This function moves though the linked list of spi transfers in the + * current spi message and returns with the state of current spi + * message i.e whether its last transfer is done(STATE_DONE) or + * Next transfer is ready(STATE_RUNNING) + */ +static void *next_transfer(struct pl022 *pl022) +{ + struct spi_message *msg = pl022->cur_msg; + struct spi_transfer *trans = pl022->cur_transfer; + + /* Move to next transfer */ + if (trans->transfer_list.next != &msg->transfers) { + pl022->cur_transfer = + list_entry(trans->transfer_list.next, + struct spi_transfer, transfer_list); + return STATE_RUNNING; + } + return STATE_DONE; +} + +/* + * This DMA functionality is only compiled in if we have + * access to the generic DMA devices/DMA engine. + */ +#ifdef CONFIG_DMA_ENGINE +static void unmap_free_dma_scatter(struct pl022 *pl022) +{ + /* Unmap and free the SG tables */ + dma_unmap_sg(pl022->dma_tx_channel->device->dev, pl022->sgt_tx.sgl, + pl022->sgt_tx.nents, DMA_TO_DEVICE); + dma_unmap_sg(pl022->dma_rx_channel->device->dev, pl022->sgt_rx.sgl, + pl022->sgt_rx.nents, DMA_FROM_DEVICE); + sg_free_table(&pl022->sgt_rx); + sg_free_table(&pl022->sgt_tx); +} + +static void dma_callback(void *data) +{ + struct pl022 *pl022 = data; + struct spi_message *msg = pl022->cur_msg; + + BUG_ON(!pl022->sgt_rx.sgl); + +#ifdef VERBOSE_DEBUG + /* + * Optionally dump out buffers to inspect contents, this is + * good if you want to convince yourself that the loopback + * read/write contents are the same, when adopting to a new + * DMA engine. + */ + { + struct scatterlist *sg; + unsigned int i; + + dma_sync_sg_for_cpu(&pl022->adev->dev, + pl022->sgt_rx.sgl, + pl022->sgt_rx.nents, + DMA_FROM_DEVICE); + + for_each_sg(pl022->sgt_rx.sgl, sg, pl022->sgt_rx.nents, i) { + dev_dbg(&pl022->adev->dev, "SPI RX SG ENTRY: %d", i); + print_hex_dump(KERN_ERR, "SPI RX: ", + DUMP_PREFIX_OFFSET, + 16, + 1, + sg_virt(sg), + sg_dma_len(sg), + 1); + } + for_each_sg(pl022->sgt_tx.sgl, sg, pl022->sgt_tx.nents, i) { + dev_dbg(&pl022->adev->dev, "SPI TX SG ENTRY: %d", i); + print_hex_dump(KERN_ERR, "SPI TX: ", + DUMP_PREFIX_OFFSET, + 16, + 1, + sg_virt(sg), + sg_dma_len(sg), + 1); + } + } +#endif + + unmap_free_dma_scatter(pl022); + + /* Update total bytes transferred */ + msg->actual_length += pl022->cur_transfer->len; + if (pl022->cur_transfer->cs_change) + pl022->cur_chip-> + cs_control(SSP_CHIP_DESELECT); + + /* Move to next transfer */ + msg->state = next_transfer(pl022); + tasklet_schedule(&pl022->pump_transfers); +} + +static void setup_dma_scatter(struct pl022 *pl022, + void *buffer, + unsigned int length, + struct sg_table *sgtab) +{ + struct scatterlist *sg; + int bytesleft = length; + void *bufp = buffer; + int mapbytes; + int i; + + if (buffer) { + for_each_sg(sgtab->sgl, sg, sgtab->nents, i) { + /* + * If there are less bytes left than what fits + * in the current page (plus page alignment offset) + * we just feed in this, else we stuff in as much + * as we can. + */ + if (bytesleft < (PAGE_SIZE - offset_in_page(bufp))) + mapbytes = bytesleft; + else + mapbytes = PAGE_SIZE - offset_in_page(bufp); + sg_set_page(sg, virt_to_page(bufp), + mapbytes, offset_in_page(bufp)); + bufp += mapbytes; + bytesleft -= mapbytes; + dev_dbg(&pl022->adev->dev, + "set RX/TX target page @ %p, %d bytes, %d left\n", + bufp, mapbytes, bytesleft); + } + } else { + /* Map the dummy buffer on every page */ + for_each_sg(sgtab->sgl, sg, sgtab->nents, i) { + if (bytesleft < PAGE_SIZE) + mapbytes = bytesleft; + else + mapbytes = PAGE_SIZE; + sg_set_page(sg, virt_to_page(pl022->dummypage), + mapbytes, 0); + bytesleft -= mapbytes; + dev_dbg(&pl022->adev->dev, + "set RX/TX to dummy page %d bytes, %d left\n", + mapbytes, bytesleft); + + } + } + BUG_ON(bytesleft); +} + +/** + * configure_dma - configures the channels for the next transfer + * @pl022: SSP driver's private data structure + */ +static int configure_dma(struct pl022 *pl022) +{ + struct dma_slave_config rx_conf = { + .src_addr = SSP_DR(pl022->phybase), + .direction = DMA_FROM_DEVICE, + .src_maxburst = pl022->vendor->fifodepth >> 1, + }; + struct dma_slave_config tx_conf = { + .dst_addr = SSP_DR(pl022->phybase), + .direction = DMA_TO_DEVICE, + .dst_maxburst = pl022->vendor->fifodepth >> 1, + }; + unsigned int pages; + int ret; + int rx_sglen, tx_sglen; + struct dma_chan *rxchan = pl022->dma_rx_channel; + struct dma_chan *txchan = pl022->dma_tx_channel; + struct dma_async_tx_descriptor *rxdesc; + struct dma_async_tx_descriptor *txdesc; + + /* Check that the channels are available */ + if (!rxchan || !txchan) + return -ENODEV; + + switch (pl022->read) { + case READING_NULL: + /* Use the same as for writing */ + rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_UNDEFINED; + break; + case READING_U8: + rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; + break; + case READING_U16: + rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; + break; + case READING_U32: + rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + break; + } + + switch (pl022->write) { + case WRITING_NULL: + /* Use the same as for reading */ + tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_UNDEFINED; + break; + case WRITING_U8: + tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; + break; + case WRITING_U16: + tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; + break; + case WRITING_U32: + tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + break; + } + + /* SPI pecularity: we need to read and write the same width */ + if (rx_conf.src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) + rx_conf.src_addr_width = tx_conf.dst_addr_width; + if (tx_conf.dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) + tx_conf.dst_addr_width = rx_conf.src_addr_width; + BUG_ON(rx_conf.src_addr_width != tx_conf.dst_addr_width); + + dmaengine_slave_config(rxchan, &rx_conf); + dmaengine_slave_config(txchan, &tx_conf); + + /* Create sglists for the transfers */ + pages = (pl022->cur_transfer->len >> PAGE_SHIFT) + 1; + dev_dbg(&pl022->adev->dev, "using %d pages for transfer\n", pages); + + ret = sg_alloc_table(&pl022->sgt_rx, pages, GFP_KERNEL); + if (ret) + goto err_alloc_rx_sg; + + ret = sg_alloc_table(&pl022->sgt_tx, pages, GFP_KERNEL); + if (ret) + goto err_alloc_tx_sg; + + /* Fill in the scatterlists for the RX+TX buffers */ + setup_dma_scatter(pl022, pl022->rx, + pl022->cur_transfer->len, &pl022->sgt_rx); + setup_dma_scatter(pl022, pl022->tx, + pl022->cur_transfer->len, &pl022->sgt_tx); + + /* Map DMA buffers */ + rx_sglen = dma_map_sg(rxchan->device->dev, pl022->sgt_rx.sgl, + pl022->sgt_rx.nents, DMA_FROM_DEVICE); + if (!rx_sglen) + goto err_rx_sgmap; + + tx_sglen = dma_map_sg(txchan->device->dev, pl022->sgt_tx.sgl, + pl022->sgt_tx.nents, DMA_TO_DEVICE); + if (!tx_sglen) + goto err_tx_sgmap; + + /* Send both scatterlists */ + rxdesc = rxchan->device->device_prep_slave_sg(rxchan, + pl022->sgt_rx.sgl, + rx_sglen, + DMA_FROM_DEVICE, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!rxdesc) + goto err_rxdesc; + + txdesc = txchan->device->device_prep_slave_sg(txchan, + pl022->sgt_tx.sgl, + tx_sglen, + DMA_TO_DEVICE, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!txdesc) + goto err_txdesc; + + /* Put the callback on the RX transfer only, that should finish last */ + rxdesc->callback = dma_callback; + rxdesc->callback_param = pl022; + + /* Submit and fire RX and TX with TX last so we're ready to read! */ + dmaengine_submit(rxdesc); + dmaengine_submit(txdesc); + dma_async_issue_pending(rxchan); + dma_async_issue_pending(txchan); + + return 0; + +err_txdesc: + dmaengine_terminate_all(txchan); +err_rxdesc: + dmaengine_terminate_all(rxchan); + dma_unmap_sg(txchan->device->dev, pl022->sgt_tx.sgl, + pl022->sgt_tx.nents, DMA_TO_DEVICE); +err_tx_sgmap: + dma_unmap_sg(rxchan->device->dev, pl022->sgt_rx.sgl, + pl022->sgt_tx.nents, DMA_FROM_DEVICE); +err_rx_sgmap: + sg_free_table(&pl022->sgt_tx); +err_alloc_tx_sg: + sg_free_table(&pl022->sgt_rx); +err_alloc_rx_sg: + return -ENOMEM; +} + +static int __init pl022_dma_probe(struct pl022 *pl022) +{ + dma_cap_mask_t mask; + + /* Try to acquire a generic DMA engine slave channel */ + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + /* + * We need both RX and TX channels to do DMA, else do none + * of them. + */ + pl022->dma_rx_channel = dma_request_channel(mask, + pl022->master_info->dma_filter, + pl022->master_info->dma_rx_param); + if (!pl022->dma_rx_channel) { + dev_dbg(&pl022->adev->dev, "no RX DMA channel!\n"); + goto err_no_rxchan; + } + + pl022->dma_tx_channel = dma_request_channel(mask, + pl022->master_info->dma_filter, + pl022->master_info->dma_tx_param); + if (!pl022->dma_tx_channel) { + dev_dbg(&pl022->adev->dev, "no TX DMA channel!\n"); + goto err_no_txchan; + } + + pl022->dummypage = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!pl022->dummypage) { + dev_dbg(&pl022->adev->dev, "no DMA dummypage!\n"); + goto err_no_dummypage; + } + + dev_info(&pl022->adev->dev, "setup for DMA on RX %s, TX %s\n", + dma_chan_name(pl022->dma_rx_channel), + dma_chan_name(pl022->dma_tx_channel)); + + return 0; + +err_no_dummypage: + dma_release_channel(pl022->dma_tx_channel); +err_no_txchan: + dma_release_channel(pl022->dma_rx_channel); + pl022->dma_rx_channel = NULL; +err_no_rxchan: + dev_err(&pl022->adev->dev, + "Failed to work in dma mode, work without dma!\n"); + return -ENODEV; +} + +static void terminate_dma(struct pl022 *pl022) +{ + struct dma_chan *rxchan = pl022->dma_rx_channel; + struct dma_chan *txchan = pl022->dma_tx_channel; + + dmaengine_terminate_all(rxchan); + dmaengine_terminate_all(txchan); + unmap_free_dma_scatter(pl022); +} + +static void pl022_dma_remove(struct pl022 *pl022) +{ + if (pl022->busy) + terminate_dma(pl022); + if (pl022->dma_tx_channel) + dma_release_channel(pl022->dma_tx_channel); + if (pl022->dma_rx_channel) + dma_release_channel(pl022->dma_rx_channel); + kfree(pl022->dummypage); +} + +#else +static inline int configure_dma(struct pl022 *pl022) +{ + return -ENODEV; +} + +static inline int pl022_dma_probe(struct pl022 *pl022) +{ + return 0; +} + +static inline void pl022_dma_remove(struct pl022 *pl022) +{ +} +#endif + +/** + * pl022_interrupt_handler - Interrupt handler for SSP controller + * + * This function handles interrupts generated for an interrupt based transfer. + * If a receive overrun (ROR) interrupt is there then we disable SSP, flag the + * current message's state as STATE_ERROR and schedule the tasklet + * pump_transfers which will do the postprocessing of the current message by + * calling giveback(). Otherwise it reads data from RX FIFO till there is no + * more data, and writes data in TX FIFO till it is not full. If we complete + * the transfer we move to the next transfer and schedule the tasklet. + */ +static irqreturn_t pl022_interrupt_handler(int irq, void *dev_id) +{ + struct pl022 *pl022 = dev_id; + struct spi_message *msg = pl022->cur_msg; + u16 irq_status = 0; + u16 flag = 0; + + if (unlikely(!msg)) { + dev_err(&pl022->adev->dev, + "bad message state in interrupt handler"); + /* Never fail */ + return IRQ_HANDLED; + } + + /* Read the Interrupt Status Register */ + irq_status = readw(SSP_MIS(pl022->virtbase)); + + if (unlikely(!irq_status)) + return IRQ_NONE; + + /* + * This handles the FIFO interrupts, the timeout + * interrupts are flatly ignored, they cannot be + * trusted. + */ + if (unlikely(irq_status & SSP_MIS_MASK_RORMIS)) { + /* + * Overrun interrupt - bail out since our Data has been + * corrupted + */ + dev_err(&pl022->adev->dev, "FIFO overrun\n"); + if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RFF) + dev_err(&pl022->adev->dev, + "RXFIFO is full\n"); + if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_TNF) + dev_err(&pl022->adev->dev, + "TXFIFO is full\n"); + + /* + * Disable and clear interrupts, disable SSP, + * mark message with bad status so it can be + * retried. + */ + writew(DISABLE_ALL_INTERRUPTS, + SSP_IMSC(pl022->virtbase)); + writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase)); + writew((readw(SSP_CR1(pl022->virtbase)) & + (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase)); + msg->state = STATE_ERROR; + + /* Schedule message queue handler */ + tasklet_schedule(&pl022->pump_transfers); + return IRQ_HANDLED; + } + + readwriter(pl022); + + if ((pl022->tx == pl022->tx_end) && (flag == 0)) { + flag = 1; + /* Disable Transmit interrupt */ + writew(readw(SSP_IMSC(pl022->virtbase)) & + (~SSP_IMSC_MASK_TXIM), + SSP_IMSC(pl022->virtbase)); + } + + /* + * Since all transactions must write as much as shall be read, + * we can conclude the entire transaction once RX is complete. + * At this point, all TX will always be finished. + */ + if (pl022->rx >= pl022->rx_end) { + writew(DISABLE_ALL_INTERRUPTS, + SSP_IMSC(pl022->virtbase)); + writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase)); + if (unlikely(pl022->rx > pl022->rx_end)) { + dev_warn(&pl022->adev->dev, "read %u surplus " + "bytes (did you request an odd " + "number of bytes on a 16bit bus?)\n", + (u32) (pl022->rx - pl022->rx_end)); + } + /* Update total bytes transferred */ + msg->actual_length += pl022->cur_transfer->len; + if (pl022->cur_transfer->cs_change) + pl022->cur_chip-> + cs_control(SSP_CHIP_DESELECT); + /* Move to next transfer */ + msg->state = next_transfer(pl022); + tasklet_schedule(&pl022->pump_transfers); + return IRQ_HANDLED; + } + + return IRQ_HANDLED; +} + +/** + * This sets up the pointers to memory for the next message to + * send out on the SPI bus. + */ +static int set_up_next_transfer(struct pl022 *pl022, + struct spi_transfer *transfer) +{ + int residue; + + /* Sanity check the message for this bus width */ + residue = pl022->cur_transfer->len % pl022->cur_chip->n_bytes; + if (unlikely(residue != 0)) { + dev_err(&pl022->adev->dev, + "message of %u bytes to transmit but the current " + "chip bus has a data width of %u bytes!\n", + pl022->cur_transfer->len, + pl022->cur_chip->n_bytes); + dev_err(&pl022->adev->dev, "skipping this message\n"); + return -EIO; + } + pl022->tx = (void *)transfer->tx_buf; + pl022->tx_end = pl022->tx + pl022->cur_transfer->len; + pl022->rx = (void *)transfer->rx_buf; + pl022->rx_end = pl022->rx + pl022->cur_transfer->len; + pl022->write = + pl022->tx ? pl022->cur_chip->write : WRITING_NULL; + pl022->read = pl022->rx ? pl022->cur_chip->read : READING_NULL; + return 0; +} + +/** + * pump_transfers - Tasklet function which schedules next transfer + * when running in interrupt or DMA transfer mode. + * @data: SSP driver private data structure + * + */ +static void pump_transfers(unsigned long data) +{ + struct pl022 *pl022 = (struct pl022 *) data; + struct spi_message *message = NULL; + struct spi_transfer *transfer = NULL; + struct spi_transfer *previous = NULL; + + /* Get current state information */ + message = pl022->cur_msg; + transfer = pl022->cur_transfer; + + /* Handle for abort */ + if (message->state == STATE_ERROR) { + message->status = -EIO; + giveback(pl022); + return; + } + + /* Handle end of message */ + if (message->state == STATE_DONE) { + message->status = 0; + giveback(pl022); + return; + } + + /* Delay if requested at end of transfer before CS change */ + if (message->state == STATE_RUNNING) { + previous = list_entry(transfer->transfer_list.prev, + struct spi_transfer, + transfer_list); + if (previous->delay_usecs) + /* + * FIXME: This runs in interrupt context. + * Is this really smart? + */ + udelay(previous->delay_usecs); + + /* Drop chip select only if cs_change is requested */ + if (previous->cs_change) + pl022->cur_chip->cs_control(SSP_CHIP_SELECT); + } else { + /* STATE_START */ + message->state = STATE_RUNNING; + } + + if (set_up_next_transfer(pl022, transfer)) { + message->state = STATE_ERROR; + message->status = -EIO; + giveback(pl022); + return; + } + /* Flush the FIFOs and let's go! */ + flush(pl022); + + if (pl022->cur_chip->enable_dma) { + if (configure_dma(pl022)) { + dev_dbg(&pl022->adev->dev, + "configuration of DMA failed, fall back to interrupt mode\n"); + goto err_config_dma; + } + return; + } + +err_config_dma: + writew(ENABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase)); +} + +static void do_interrupt_dma_transfer(struct pl022 *pl022) +{ + u32 irqflags = ENABLE_ALL_INTERRUPTS; + + /* Enable target chip */ + pl022->cur_chip->cs_control(SSP_CHIP_SELECT); + if (set_up_next_transfer(pl022, pl022->cur_transfer)) { + /* Error path */ + pl022->cur_msg->state = STATE_ERROR; + pl022->cur_msg->status = -EIO; + giveback(pl022); + return; + } + /* If we're using DMA, set up DMA here */ + if (pl022->cur_chip->enable_dma) { + /* Configure DMA transfer */ + if (configure_dma(pl022)) { + dev_dbg(&pl022->adev->dev, + "configuration of DMA failed, fall back to interrupt mode\n"); + goto err_config_dma; + } + /* Disable interrupts in DMA mode, IRQ from DMA controller */ + irqflags = DISABLE_ALL_INTERRUPTS; + } +err_config_dma: + /* Enable SSP, turn on interrupts */ + writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE), + SSP_CR1(pl022->virtbase)); + writew(irqflags, SSP_IMSC(pl022->virtbase)); +} + +static void do_polling_transfer(struct pl022 *pl022) +{ + struct spi_message *message = NULL; + struct spi_transfer *transfer = NULL; + struct spi_transfer *previous = NULL; + struct chip_data *chip; + unsigned long time, timeout; + + chip = pl022->cur_chip; + message = pl022->cur_msg; + + while (message->state != STATE_DONE) { + /* Handle for abort */ + if (message->state == STATE_ERROR) + break; + transfer = pl022->cur_transfer; + + /* Delay if requested at end of transfer */ + if (message->state == STATE_RUNNING) { + previous = + list_entry(transfer->transfer_list.prev, + struct spi_transfer, transfer_list); + if (previous->delay_usecs) + udelay(previous->delay_usecs); + if (previous->cs_change) + pl022->cur_chip->cs_control(SSP_CHIP_SELECT); + } else { + /* STATE_START */ + message->state = STATE_RUNNING; + pl022->cur_chip->cs_control(SSP_CHIP_SELECT); + } + + /* Configuration Changing Per Transfer */ + if (set_up_next_transfer(pl022, transfer)) { + /* Error path */ + message->state = STATE_ERROR; + break; + } + /* Flush FIFOs and enable SSP */ + flush(pl022); + writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE), + SSP_CR1(pl022->virtbase)); + + dev_dbg(&pl022->adev->dev, "polling transfer ongoing ...\n"); + + timeout = jiffies + msecs_to_jiffies(SPI_POLLING_TIMEOUT); + while (pl022->tx < pl022->tx_end || pl022->rx < pl022->rx_end) { + time = jiffies; + readwriter(pl022); + if (time_after(time, timeout)) { + dev_warn(&pl022->adev->dev, + "%s: timeout!\n", __func__); + message->state = STATE_ERROR; + goto out; + } + cpu_relax(); + } + + /* Update total byte transferred */ + message->actual_length += pl022->cur_transfer->len; + if (pl022->cur_transfer->cs_change) + pl022->cur_chip->cs_control(SSP_CHIP_DESELECT); + /* Move to next transfer */ + message->state = next_transfer(pl022); + } +out: + /* Handle end of message */ + if (message->state == STATE_DONE) + message->status = 0; + else + message->status = -EIO; + + giveback(pl022); + return; +} + +/** + * pump_messages - Workqueue function which processes spi message queue + * @data: pointer to private data of SSP driver + * + * This function checks if there is any spi message in the queue that + * needs processing and delegate control to appropriate function + * do_polling_transfer()/do_interrupt_dma_transfer() + * based on the kind of the transfer + * + */ +static void pump_messages(struct work_struct *work) +{ + struct pl022 *pl022 = + container_of(work, struct pl022, pump_messages); + unsigned long flags; + + /* Lock queue and check for queue work */ + spin_lock_irqsave(&pl022->queue_lock, flags); + if (list_empty(&pl022->queue) || !pl022->running) { + pl022->busy = false; + spin_unlock_irqrestore(&pl022->queue_lock, flags); + return; + } + /* Make sure we are not already running a message */ + if (pl022->cur_msg) { + spin_unlock_irqrestore(&pl022->queue_lock, flags); + return; + } + /* Extract head of queue */ + pl022->cur_msg = + list_entry(pl022->queue.next, struct spi_message, queue); + + list_del_init(&pl022->cur_msg->queue); + pl022->busy = true; + spin_unlock_irqrestore(&pl022->queue_lock, flags); + + /* Initial message state */ + pl022->cur_msg->state = STATE_START; + pl022->cur_transfer = list_entry(pl022->cur_msg->transfers.next, + struct spi_transfer, + transfer_list); + + /* Setup the SPI using the per chip configuration */ + pl022->cur_chip = spi_get_ctldata(pl022->cur_msg->spi); + /* + * We enable the core voltage and clocks here, then the clocks + * and core will be disabled when giveback() is called in each method + * (poll/interrupt/DMA) + */ + amba_vcore_enable(pl022->adev); + amba_pclk_enable(pl022->adev); + clk_enable(pl022->clk); + restore_state(pl022); + flush(pl022); + + if (pl022->cur_chip->xfer_type == POLLING_TRANSFER) + do_polling_transfer(pl022); + else + do_interrupt_dma_transfer(pl022); +} + + +static int __init init_queue(struct pl022 *pl022) +{ + INIT_LIST_HEAD(&pl022->queue); + spin_lock_init(&pl022->queue_lock); + + pl022->running = false; + pl022->busy = false; + + tasklet_init(&pl022->pump_transfers, + pump_transfers, (unsigned long)pl022); + + INIT_WORK(&pl022->pump_messages, pump_messages); + pl022->workqueue = create_singlethread_workqueue( + dev_name(pl022->master->dev.parent)); + if (pl022->workqueue == NULL) + return -EBUSY; + + return 0; +} + + +static int start_queue(struct pl022 *pl022) +{ + unsigned long flags; + + spin_lock_irqsave(&pl022->queue_lock, flags); + + if (pl022->running || pl022->busy) { + spin_unlock_irqrestore(&pl022->queue_lock, flags); + return -EBUSY; + } + + pl022->running = true; + pl022->cur_msg = NULL; + pl022->cur_transfer = NULL; + pl022->cur_chip = NULL; + spin_unlock_irqrestore(&pl022->queue_lock, flags); + + queue_work(pl022->workqueue, &pl022->pump_messages); + + return 0; +} + + +static int stop_queue(struct pl022 *pl022) +{ + unsigned long flags; + unsigned limit = 500; + int status = 0; + + spin_lock_irqsave(&pl022->queue_lock, flags); + + /* This is a bit lame, but is optimized for the common execution path. + * A wait_queue on the pl022->busy could be used, but then the common + * execution path (pump_messages) would be required to call wake_up or + * friends on every SPI message. Do this instead */ + while ((!list_empty(&pl022->queue) || pl022->busy) && limit--) { + spin_unlock_irqrestore(&pl022->queue_lock, flags); + msleep(10); + spin_lock_irqsave(&pl022->queue_lock, flags); + } + + if (!list_empty(&pl022->queue) || pl022->busy) + status = -EBUSY; + else + pl022->running = false; + + spin_unlock_irqrestore(&pl022->queue_lock, flags); + + return status; +} + +static int destroy_queue(struct pl022 *pl022) +{ + int status; + + status = stop_queue(pl022); + /* we are unloading the module or failing to load (only two calls + * to this routine), and neither call can handle a return value. + * However, destroy_workqueue calls flush_workqueue, and that will + * block until all work is done. If the reason that stop_queue + * timed out is that the work will never finish, then it does no + * good to call destroy_workqueue, so return anyway. */ + if (status != 0) + return status; + + destroy_workqueue(pl022->workqueue); + + return 0; +} + +static int verify_controller_parameters(struct pl022 *pl022, + struct pl022_config_chip const *chip_info) +{ + if ((chip_info->iface < SSP_INTERFACE_MOTOROLA_SPI) + || (chip_info->iface > SSP_INTERFACE_UNIDIRECTIONAL)) { + dev_err(&pl022->adev->dev, + "interface is configured incorrectly\n"); + return -EINVAL; + } + if ((chip_info->iface == SSP_INTERFACE_UNIDIRECTIONAL) && + (!pl022->vendor->unidir)) { + dev_err(&pl022->adev->dev, + "unidirectional mode not supported in this " + "hardware version\n"); + return -EINVAL; + } + if ((chip_info->hierarchy != SSP_MASTER) + && (chip_info->hierarchy != SSP_SLAVE)) { + dev_err(&pl022->adev->dev, + "hierarchy is configured incorrectly\n"); + return -EINVAL; + } + if ((chip_info->com_mode != INTERRUPT_TRANSFER) + && (chip_info->com_mode != DMA_TRANSFER) + && (chip_info->com_mode != POLLING_TRANSFER)) { + dev_err(&pl022->adev->dev, + "Communication mode is configured incorrectly\n"); + return -EINVAL; + } + if ((chip_info->rx_lev_trig < SSP_RX_1_OR_MORE_ELEM) + || (chip_info->rx_lev_trig > SSP_RX_32_OR_MORE_ELEM)) { + dev_err(&pl022->adev->dev, + "RX FIFO Trigger Level is configured incorrectly\n"); + return -EINVAL; + } + if ((chip_info->tx_lev_trig < SSP_TX_1_OR_MORE_EMPTY_LOC) + || (chip_info->tx_lev_trig > SSP_TX_32_OR_MORE_EMPTY_LOC)) { + dev_err(&pl022->adev->dev, + "TX FIFO Trigger Level is configured incorrectly\n"); + return -EINVAL; + } + if (chip_info->iface == SSP_INTERFACE_NATIONAL_MICROWIRE) { + if ((chip_info->ctrl_len < SSP_BITS_4) + || (chip_info->ctrl_len > SSP_BITS_32)) { + dev_err(&pl022->adev->dev, + "CTRL LEN is configured incorrectly\n"); + return -EINVAL; + } + if ((chip_info->wait_state != SSP_MWIRE_WAIT_ZERO) + && (chip_info->wait_state != SSP_MWIRE_WAIT_ONE)) { + dev_err(&pl022->adev->dev, + "Wait State is configured incorrectly\n"); + return -EINVAL; + } + /* Half duplex is only available in the ST Micro version */ + if (pl022->vendor->extended_cr) { + if ((chip_info->duplex != + SSP_MICROWIRE_CHANNEL_FULL_DUPLEX) + && (chip_info->duplex != + SSP_MICROWIRE_CHANNEL_HALF_DUPLEX)) { + dev_err(&pl022->adev->dev, + "Microwire duplex mode is configured incorrectly\n"); + return -EINVAL; + } + } else { + if (chip_info->duplex != SSP_MICROWIRE_CHANNEL_FULL_DUPLEX) + dev_err(&pl022->adev->dev, + "Microwire half duplex mode requested," + " but this is only available in the" + " ST version of PL022\n"); + return -EINVAL; + } + } + return 0; +} + +/** + * pl022_transfer - transfer function registered to SPI master framework + * @spi: spi device which is requesting transfer + * @msg: spi message which is to handled is queued to driver queue + * + * This function is registered to the SPI framework for this SPI master + * controller. It will queue the spi_message in the queue of driver if + * the queue is not stopped and return. + */ +static int pl022_transfer(struct spi_device *spi, struct spi_message *msg) +{ + struct pl022 *pl022 = spi_master_get_devdata(spi->master); + unsigned long flags; + + spin_lock_irqsave(&pl022->queue_lock, flags); + + if (!pl022->running) { + spin_unlock_irqrestore(&pl022->queue_lock, flags); + return -ESHUTDOWN; + } + msg->actual_length = 0; + msg->status = -EINPROGRESS; + msg->state = STATE_START; + + list_add_tail(&msg->queue, &pl022->queue); + if (pl022->running && !pl022->busy) + queue_work(pl022->workqueue, &pl022->pump_messages); + + spin_unlock_irqrestore(&pl022->queue_lock, flags); + return 0; +} + +static int calculate_effective_freq(struct pl022 *pl022, + int freq, + struct ssp_clock_params *clk_freq) +{ + /* Lets calculate the frequency parameters */ + u16 cpsdvsr = 2; + u16 scr = 0; + bool freq_found = false; + u32 rate; + u32 max_tclk; + u32 min_tclk; + + rate = clk_get_rate(pl022->clk); + /* cpsdvscr = 2 & scr 0 */ + max_tclk = (rate / (CPSDVR_MIN * (1 + SCR_MIN))); + /* cpsdvsr = 254 & scr = 255 */ + min_tclk = (rate / (CPSDVR_MAX * (1 + SCR_MAX))); + + if ((freq <= max_tclk) && (freq >= min_tclk)) { + while (cpsdvsr <= CPSDVR_MAX && !freq_found) { + while (scr <= SCR_MAX && !freq_found) { + if ((rate / + (cpsdvsr * (1 + scr))) > freq) + scr += 1; + else { + /* + * This bool is made true when + * effective frequency >= + * target frequency is found + */ + freq_found = true; + if ((rate / + (cpsdvsr * (1 + scr))) != freq) { + if (scr == SCR_MIN) { + cpsdvsr -= 2; + scr = SCR_MAX; + } else + scr -= 1; + } + } + } + if (!freq_found) { + cpsdvsr += 2; + scr = SCR_MIN; + } + } + if (cpsdvsr != 0) { + dev_dbg(&pl022->adev->dev, + "SSP Effective Frequency is %u\n", + (rate / (cpsdvsr * (1 + scr)))); + clk_freq->cpsdvsr = (u8) (cpsdvsr & 0xFF); + clk_freq->scr = (u8) (scr & 0xFF); + dev_dbg(&pl022->adev->dev, + "SSP cpsdvsr = %d, scr = %d\n", + clk_freq->cpsdvsr, clk_freq->scr); + } + } else { + dev_err(&pl022->adev->dev, + "controller data is incorrect: out of range frequency"); + return -EINVAL; + } + return 0; +} + + +/* + * A piece of default chip info unless the platform + * supplies it. + */ +static const struct pl022_config_chip pl022_default_chip_info = { + .com_mode = POLLING_TRANSFER, + .iface = SSP_INTERFACE_MOTOROLA_SPI, + .hierarchy = SSP_SLAVE, + .slave_tx_disable = DO_NOT_DRIVE_TX, + .rx_lev_trig = SSP_RX_1_OR_MORE_ELEM, + .tx_lev_trig = SSP_TX_1_OR_MORE_EMPTY_LOC, + .ctrl_len = SSP_BITS_8, + .wait_state = SSP_MWIRE_WAIT_ZERO, + .duplex = SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, + .cs_control = null_cs_control, +}; + + +/** + * pl022_setup - setup function registered to SPI master framework + * @spi: spi device which is requesting setup + * + * This function is registered to the SPI framework for this SPI master + * controller. If it is the first time when setup is called by this device, + * this function will initialize the runtime state for this chip and save + * the same in the device structure. Else it will update the runtime info + * with the updated chip info. Nothing is really being written to the + * controller hardware here, that is not done until the actual transfer + * commence. + */ +static int pl022_setup(struct spi_device *spi) +{ + struct pl022_config_chip const *chip_info; + struct chip_data *chip; + struct ssp_clock_params clk_freq = {0, }; + int status = 0; + struct pl022 *pl022 = spi_master_get_devdata(spi->master); + unsigned int bits = spi->bits_per_word; + u32 tmp; + + if (!spi->max_speed_hz) + return -EINVAL; + + /* Get controller_state if one is supplied */ + chip = spi_get_ctldata(spi); + + if (chip == NULL) { + chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); + if (!chip) { + dev_err(&spi->dev, + "cannot allocate controller state\n"); + return -ENOMEM; + } + dev_dbg(&spi->dev, + "allocated memory for controller's runtime state\n"); + } + + /* Get controller data if one is supplied */ + chip_info = spi->controller_data; + + if (chip_info == NULL) { + chip_info = &pl022_default_chip_info; + /* spi_board_info.controller_data not is supplied */ + dev_dbg(&spi->dev, + "using default controller_data settings\n"); + } else + dev_dbg(&spi->dev, + "using user supplied controller_data settings\n"); + + /* + * We can override with custom divisors, else we use the board + * frequency setting + */ + if ((0 == chip_info->clk_freq.cpsdvsr) + && (0 == chip_info->clk_freq.scr)) { + status = calculate_effective_freq(pl022, + spi->max_speed_hz, + &clk_freq); + if (status < 0) + goto err_config_params; + } else { + memcpy(&clk_freq, &chip_info->clk_freq, sizeof(clk_freq)); + if ((clk_freq.cpsdvsr % 2) != 0) + clk_freq.cpsdvsr = + clk_freq.cpsdvsr - 1; + } + if ((clk_freq.cpsdvsr < CPSDVR_MIN) + || (clk_freq.cpsdvsr > CPSDVR_MAX)) { + dev_err(&spi->dev, + "cpsdvsr is configured incorrectly\n"); + goto err_config_params; + } + + + status = verify_controller_parameters(pl022, chip_info); + if (status) { + dev_err(&spi->dev, "controller data is incorrect"); + goto err_config_params; + } + + /* Now set controller state based on controller data */ + chip->xfer_type = chip_info->com_mode; + if (!chip_info->cs_control) { + chip->cs_control = null_cs_control; + dev_warn(&spi->dev, + "chip select function is NULL for this chip\n"); + } else + chip->cs_control = chip_info->cs_control; + + if (bits <= 3) { + /* PL022 doesn't support less than 4-bits */ + status = -ENOTSUPP; + goto err_config_params; + } else if (bits <= 8) { + dev_dbg(&spi->dev, "4 <= n <=8 bits per word\n"); + chip->n_bytes = 1; + chip->read = READING_U8; + chip->write = WRITING_U8; + } else if (bits <= 16) { + dev_dbg(&spi->dev, "9 <= n <= 16 bits per word\n"); + chip->n_bytes = 2; + chip->read = READING_U16; + chip->write = WRITING_U16; + } else { + if (pl022->vendor->max_bpw >= 32) { + dev_dbg(&spi->dev, "17 <= n <= 32 bits per word\n"); + chip->n_bytes = 4; + chip->read = READING_U32; + chip->write = WRITING_U32; + } else { + dev_err(&spi->dev, + "illegal data size for this controller!\n"); + dev_err(&spi->dev, + "a standard pl022 can only handle " + "1 <= n <= 16 bit words\n"); + status = -ENOTSUPP; + goto err_config_params; + } + } + + /* Now Initialize all register settings required for this chip */ + chip->cr0 = 0; + chip->cr1 = 0; + chip->dmacr = 0; + chip->cpsr = 0; + if ((chip_info->com_mode == DMA_TRANSFER) + && ((pl022->master_info)->enable_dma)) { + chip->enable_dma = true; + dev_dbg(&spi->dev, "DMA mode set in controller state\n"); + SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED, + SSP_DMACR_MASK_RXDMAE, 0); + SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED, + SSP_DMACR_MASK_TXDMAE, 1); + } else { + chip->enable_dma = false; + dev_dbg(&spi->dev, "DMA mode NOT set in controller state\n"); + SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED, + SSP_DMACR_MASK_RXDMAE, 0); + SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED, + SSP_DMACR_MASK_TXDMAE, 1); + } + + chip->cpsr = clk_freq.cpsdvsr; + + /* Special setup for the ST micro extended control registers */ + if (pl022->vendor->extended_cr) { + u32 etx; + + if (pl022->vendor->pl023) { + /* These bits are only in the PL023 */ + SSP_WRITE_BITS(chip->cr1, chip_info->clkdelay, + SSP_CR1_MASK_FBCLKDEL_ST, 13); + } else { + /* These bits are in the PL022 but not PL023 */ + SSP_WRITE_BITS(chip->cr0, chip_info->duplex, + SSP_CR0_MASK_HALFDUP_ST, 5); + SSP_WRITE_BITS(chip->cr0, chip_info->ctrl_len, + SSP_CR0_MASK_CSS_ST, 16); + SSP_WRITE_BITS(chip->cr0, chip_info->iface, + SSP_CR0_MASK_FRF_ST, 21); + SSP_WRITE_BITS(chip->cr1, chip_info->wait_state, + SSP_CR1_MASK_MWAIT_ST, 6); + } + SSP_WRITE_BITS(chip->cr0, bits - 1, + SSP_CR0_MASK_DSS_ST, 0); + + if (spi->mode & SPI_LSB_FIRST) { + tmp = SSP_RX_LSB; + etx = SSP_TX_LSB; + } else { + tmp = SSP_RX_MSB; + etx = SSP_TX_MSB; + } + SSP_WRITE_BITS(chip->cr1, tmp, SSP_CR1_MASK_RENDN_ST, 4); + SSP_WRITE_BITS(chip->cr1, etx, SSP_CR1_MASK_TENDN_ST, 5); + SSP_WRITE_BITS(chip->cr1, chip_info->rx_lev_trig, + SSP_CR1_MASK_RXIFLSEL_ST, 7); + SSP_WRITE_BITS(chip->cr1, chip_info->tx_lev_trig, + SSP_CR1_MASK_TXIFLSEL_ST, 10); + } else { + SSP_WRITE_BITS(chip->cr0, bits - 1, + SSP_CR0_MASK_DSS, 0); + SSP_WRITE_BITS(chip->cr0, chip_info->iface, + SSP_CR0_MASK_FRF, 4); + } + + /* Stuff that is common for all versions */ + if (spi->mode & SPI_CPOL) + tmp = SSP_CLK_POL_IDLE_HIGH; + else + tmp = SSP_CLK_POL_IDLE_LOW; + SSP_WRITE_BITS(chip->cr0, tmp, SSP_CR0_MASK_SPO, 6); + + if (spi->mode & SPI_CPHA) + tmp = SSP_CLK_SECOND_EDGE; + else + tmp = SSP_CLK_FIRST_EDGE; + SSP_WRITE_BITS(chip->cr0, tmp, SSP_CR0_MASK_SPH, 7); + + SSP_WRITE_BITS(chip->cr0, clk_freq.scr, SSP_CR0_MASK_SCR, 8); + /* Loopback is available on all versions except PL023 */ + if (pl022->vendor->loopback) { + if (spi->mode & SPI_LOOP) + tmp = LOOPBACK_ENABLED; + else + tmp = LOOPBACK_DISABLED; + SSP_WRITE_BITS(chip->cr1, tmp, SSP_CR1_MASK_LBM, 0); + } + SSP_WRITE_BITS(chip->cr1, SSP_DISABLED, SSP_CR1_MASK_SSE, 1); + SSP_WRITE_BITS(chip->cr1, chip_info->hierarchy, SSP_CR1_MASK_MS, 2); + SSP_WRITE_BITS(chip->cr1, chip_info->slave_tx_disable, SSP_CR1_MASK_SOD, 3); + + /* Save controller_state */ + spi_set_ctldata(spi, chip); + return status; + err_config_params: + spi_set_ctldata(spi, NULL); + kfree(chip); + return status; +} + +/** + * pl022_cleanup - cleanup function registered to SPI master framework + * @spi: spi device which is requesting cleanup + * + * This function is registered to the SPI framework for this SPI master + * controller. It will free the runtime state of chip. + */ +static void pl022_cleanup(struct spi_device *spi) +{ + struct chip_data *chip = spi_get_ctldata(spi); + + spi_set_ctldata(spi, NULL); + kfree(chip); +} + + +static int __devinit +pl022_probe(struct amba_device *adev, const struct amba_id *id) +{ + struct device *dev = &adev->dev; + struct pl022_ssp_controller *platform_info = adev->dev.platform_data; + struct spi_master *master; + struct pl022 *pl022 = NULL; /*Data for this driver */ + int status = 0; + + dev_info(&adev->dev, + "ARM PL022 driver, device ID: 0x%08x\n", adev->periphid); + if (platform_info == NULL) { + dev_err(&adev->dev, "probe - no platform data supplied\n"); + status = -ENODEV; + goto err_no_pdata; + } + + /* Allocate master with space for data */ + master = spi_alloc_master(dev, sizeof(struct pl022)); + if (master == NULL) { + dev_err(&adev->dev, "probe - cannot alloc SPI master\n"); + status = -ENOMEM; + goto err_no_master; + } + + pl022 = spi_master_get_devdata(master); + pl022->master = master; + pl022->master_info = platform_info; + pl022->adev = adev; + pl022->vendor = id->data; + + /* + * Bus Number Which has been Assigned to this SSP controller + * on this board + */ + master->bus_num = platform_info->bus_id; + master->num_chipselect = platform_info->num_chipselect; + master->cleanup = pl022_cleanup; + master->setup = pl022_setup; + master->transfer = pl022_transfer; + + /* + * Supports mode 0-3, loopback, and active low CS. Transfers are + * always MS bit first on the original pl022. + */ + master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP; + if (pl022->vendor->extended_cr) + master->mode_bits |= SPI_LSB_FIRST; + + dev_dbg(&adev->dev, "BUSNO: %d\n", master->bus_num); + + status = amba_request_regions(adev, NULL); + if (status) + goto err_no_ioregion; + + pl022->phybase = adev->res.start; + pl022->virtbase = ioremap(adev->res.start, resource_size(&adev->res)); + if (pl022->virtbase == NULL) { + status = -ENOMEM; + goto err_no_ioremap; + } + printk(KERN_INFO "pl022: mapped registers from 0x%08x to %p\n", + adev->res.start, pl022->virtbase); + + pl022->clk = clk_get(&adev->dev, NULL); + if (IS_ERR(pl022->clk)) { + status = PTR_ERR(pl022->clk); + dev_err(&adev->dev, "could not retrieve SSP/SPI bus clock\n"); + goto err_no_clk; + } + + /* Disable SSP */ + writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)), + SSP_CR1(pl022->virtbase)); + load_ssp_default_config(pl022); + + status = request_irq(adev->irq[0], pl022_interrupt_handler, 0, "pl022", + pl022); + if (status < 0) { + dev_err(&adev->dev, "probe - cannot get IRQ (%d)\n", status); + goto err_no_irq; + } + + /* Get DMA channels */ + if (platform_info->enable_dma) { + status = pl022_dma_probe(pl022); + if (status != 0) + platform_info->enable_dma = 0; + } + + /* Initialize and start queue */ + status = init_queue(pl022); + if (status != 0) { + dev_err(&adev->dev, "probe - problem initializing queue\n"); + goto err_init_queue; + } + status = start_queue(pl022); + if (status != 0) { + dev_err(&adev->dev, "probe - problem starting queue\n"); + goto err_start_queue; + } + /* Register with the SPI framework */ + amba_set_drvdata(adev, pl022); + status = spi_register_master(master); + if (status != 0) { + dev_err(&adev->dev, + "probe - problem registering spi master\n"); + goto err_spi_register; + } + dev_dbg(dev, "probe succeeded\n"); + /* + * Disable the silicon block pclk and any voltage domain and just + * power it up and clock it when it's needed + */ + amba_pclk_disable(adev); + amba_vcore_disable(adev); + return 0; + + err_spi_register: + err_start_queue: + err_init_queue: + destroy_queue(pl022); + pl022_dma_remove(pl022); + free_irq(adev->irq[0], pl022); + err_no_irq: + clk_put(pl022->clk); + err_no_clk: + iounmap(pl022->virtbase); + err_no_ioremap: + amba_release_regions(adev); + err_no_ioregion: + spi_master_put(master); + err_no_master: + err_no_pdata: + return status; +} + +static int __devexit +pl022_remove(struct amba_device *adev) +{ + struct pl022 *pl022 = amba_get_drvdata(adev); + int status = 0; + if (!pl022) + return 0; + + /* Remove the queue */ + status = destroy_queue(pl022); + if (status != 0) { + dev_err(&adev->dev, + "queue remove failed (%d)\n", status); + return status; + } + load_ssp_default_config(pl022); + pl022_dma_remove(pl022); + free_irq(adev->irq[0], pl022); + clk_disable(pl022->clk); + clk_put(pl022->clk); + iounmap(pl022->virtbase); + amba_release_regions(adev); + tasklet_disable(&pl022->pump_transfers); + spi_unregister_master(pl022->master); + spi_master_put(pl022->master); + amba_set_drvdata(adev, NULL); + dev_dbg(&adev->dev, "remove succeeded\n"); + return 0; +} + +#ifdef CONFIG_PM +static int pl022_suspend(struct amba_device *adev, pm_message_t state) +{ + struct pl022 *pl022 = amba_get_drvdata(adev); + int status = 0; + + status = stop_queue(pl022); + if (status) { + dev_warn(&adev->dev, "suspend cannot stop queue\n"); + return status; + } + + amba_vcore_enable(adev); + amba_pclk_enable(adev); + load_ssp_default_config(pl022); + amba_pclk_disable(adev); + amba_vcore_disable(adev); + dev_dbg(&adev->dev, "suspended\n"); + return 0; +} + +static int pl022_resume(struct amba_device *adev) +{ + struct pl022 *pl022 = amba_get_drvdata(adev); + int status = 0; + + /* Start the queue running */ + status = start_queue(pl022); + if (status) + dev_err(&adev->dev, "problem starting queue (%d)\n", status); + else + dev_dbg(&adev->dev, "resumed\n"); + + return status; +} +#else +#define pl022_suspend NULL +#define pl022_resume NULL +#endif /* CONFIG_PM */ + +static struct vendor_data vendor_arm = { + .fifodepth = 8, + .max_bpw = 16, + .unidir = false, + .extended_cr = false, + .pl023 = false, + .loopback = true, +}; + + +static struct vendor_data vendor_st = { + .fifodepth = 32, + .max_bpw = 32, + .unidir = false, + .extended_cr = true, + .pl023 = false, + .loopback = true, +}; + +static struct vendor_data vendor_st_pl023 = { + .fifodepth = 32, + .max_bpw = 32, + .unidir = false, + .extended_cr = true, + .pl023 = true, + .loopback = false, +}; + +static struct vendor_data vendor_db5500_pl023 = { + .fifodepth = 32, + .max_bpw = 32, + .unidir = false, + .extended_cr = true, + .pl023 = true, + .loopback = true, +}; + +static struct amba_id pl022_ids[] = { + { + /* + * ARM PL022 variant, this has a 16bit wide + * and 8 locations deep TX/RX FIFO + */ + .id = 0x00041022, + .mask = 0x000fffff, + .data = &vendor_arm, + }, + { + /* + * ST Micro derivative, this has 32bit wide + * and 32 locations deep TX/RX FIFO + */ + .id = 0x01080022, + .mask = 0xffffffff, + .data = &vendor_st, + }, + { + /* + * ST-Ericsson derivative "PL023" (this is not + * an official ARM number), this is a PL022 SSP block + * stripped to SPI mode only, it has 32bit wide + * and 32 locations deep TX/RX FIFO but no extended + * CR0/CR1 register + */ + .id = 0x00080023, + .mask = 0xffffffff, + .data = &vendor_st_pl023, + }, + { + .id = 0x10080023, + .mask = 0xffffffff, + .data = &vendor_db5500_pl023, + }, + { 0, 0 }, +}; + +static struct amba_driver pl022_driver = { + .drv = { + .name = "ssp-pl022", + }, + .id_table = pl022_ids, + .probe = pl022_probe, + .remove = __devexit_p(pl022_remove), + .suspend = pl022_suspend, + .resume = pl022_resume, +}; + + +static int __init pl022_init(void) +{ + return amba_driver_register(&pl022_driver); +} + +subsys_initcall(pl022_init); + +static void __exit pl022_exit(void) +{ + amba_driver_unregister(&pl022_driver); +} + +module_exit(pl022_exit); + +MODULE_AUTHOR("Linus Walleij "); +MODULE_DESCRIPTION("PL022 SSP Controller Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-ppc4xx.c b/drivers/spi/spi-ppc4xx.c new file mode 100644 index 0000000..2a298c0 --- /dev/null +++ b/drivers/spi/spi-ppc4xx.c @@ -0,0 +1,612 @@ +/* + * SPI_PPC4XX SPI controller driver. + * + * Copyright (C) 2007 Gary Jennejohn + * Copyright 2008 Stefan Roese , DENX Software Engineering + * Copyright 2009 Harris Corporation, Steven A. Falco + * + * Based in part on drivers/spi/spi_s3c24xx.c + * + * Copyright (c) 2006 Ben Dooks + * Copyright (c) 2006 Simtec Electronics + * Ben Dooks + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + */ + +/* + * The PPC4xx SPI controller has no FIFO so each sent/received byte will + * generate an interrupt to the CPU. This can cause high CPU utilization. + * This driver allows platforms to reduce the interrupt load on the CPU + * during SPI transfers by setting max_speed_hz via the device tree. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include + +/* bits in mode register - bit 0 is MSb */ + +/* + * SPI_PPC4XX_MODE_SCP = 0 means "data latched on trailing edge of clock" + * SPI_PPC4XX_MODE_SCP = 1 means "data latched on leading edge of clock" + * Note: This is the inverse of CPHA. + */ +#define SPI_PPC4XX_MODE_SCP (0x80 >> 3) + +/* SPI_PPC4XX_MODE_SPE = 1 means "port enabled" */ +#define SPI_PPC4XX_MODE_SPE (0x80 >> 4) + +/* + * SPI_PPC4XX_MODE_RD = 0 means "MSB first" - this is the normal mode + * SPI_PPC4XX_MODE_RD = 1 means "LSB first" - this is bit-reversed mode + * Note: This is identical to SPI_LSB_FIRST. + */ +#define SPI_PPC4XX_MODE_RD (0x80 >> 5) + +/* + * SPI_PPC4XX_MODE_CI = 0 means "clock idles low" + * SPI_PPC4XX_MODE_CI = 1 means "clock idles high" + * Note: This is identical to CPOL. + */ +#define SPI_PPC4XX_MODE_CI (0x80 >> 6) + +/* + * SPI_PPC4XX_MODE_IL = 0 means "loopback disable" + * SPI_PPC4XX_MODE_IL = 1 means "loopback enable" + */ +#define SPI_PPC4XX_MODE_IL (0x80 >> 7) + +/* bits in control register */ +/* starts a transfer when set */ +#define SPI_PPC4XX_CR_STR (0x80 >> 7) + +/* bits in status register */ +/* port is busy with a transfer */ +#define SPI_PPC4XX_SR_BSY (0x80 >> 6) +/* RxD ready */ +#define SPI_PPC4XX_SR_RBR (0x80 >> 7) + +/* clock settings (SCP and CI) for various SPI modes */ +#define SPI_CLK_MODE0 (SPI_PPC4XX_MODE_SCP | 0) +#define SPI_CLK_MODE1 (0 | 0) +#define SPI_CLK_MODE2 (SPI_PPC4XX_MODE_SCP | SPI_PPC4XX_MODE_CI) +#define SPI_CLK_MODE3 (0 | SPI_PPC4XX_MODE_CI) + +#define DRIVER_NAME "spi_ppc4xx_of" + +struct spi_ppc4xx_regs { + u8 mode; + u8 rxd; + u8 txd; + u8 cr; + u8 sr; + u8 dummy; + /* + * Clock divisor modulus register + * This uses the follwing formula: + * SCPClkOut = OPBCLK/(4(CDM + 1)) + * or + * CDM = (OPBCLK/4*SCPClkOut) - 1 + * bit 0 is the MSb! + */ + u8 cdm; +}; + +/* SPI Controller driver's private data. */ +struct ppc4xx_spi { + /* bitbang has to be first */ + struct spi_bitbang bitbang; + struct completion done; + + u64 mapbase; + u64 mapsize; + int irqnum; + /* need this to set the SPI clock */ + unsigned int opb_freq; + + /* for transfers */ + int len; + int count; + /* data buffers */ + const unsigned char *tx; + unsigned char *rx; + + int *gpios; + + struct spi_ppc4xx_regs __iomem *regs; /* pointer to the registers */ + struct spi_master *master; + struct device *dev; +}; + +/* need this so we can set the clock in the chipselect routine */ +struct spi_ppc4xx_cs { + u8 mode; +}; + +static int spi_ppc4xx_txrx(struct spi_device *spi, struct spi_transfer *t) +{ + struct ppc4xx_spi *hw; + u8 data; + + dev_dbg(&spi->dev, "txrx: tx %p, rx %p, len %d\n", + t->tx_buf, t->rx_buf, t->len); + + hw = spi_master_get_devdata(spi->master); + + hw->tx = t->tx_buf; + hw->rx = t->rx_buf; + hw->len = t->len; + hw->count = 0; + + /* send the first byte */ + data = hw->tx ? hw->tx[0] : 0; + out_8(&hw->regs->txd, data); + out_8(&hw->regs->cr, SPI_PPC4XX_CR_STR); + wait_for_completion(&hw->done); + + return hw->count; +} + +static int spi_ppc4xx_setupxfer(struct spi_device *spi, struct spi_transfer *t) +{ + struct ppc4xx_spi *hw = spi_master_get_devdata(spi->master); + struct spi_ppc4xx_cs *cs = spi->controller_state; + int scr; + u8 cdm = 0; + u32 speed; + u8 bits_per_word; + + /* Start with the generic configuration for this device. */ + bits_per_word = spi->bits_per_word; + speed = spi->max_speed_hz; + + /* + * Modify the configuration if the transfer overrides it. Do not allow + * the transfer to overwrite the generic configuration with zeros. + */ + if (t) { + if (t->bits_per_word) + bits_per_word = t->bits_per_word; + + if (t->speed_hz) + speed = min(t->speed_hz, spi->max_speed_hz); + } + + if (bits_per_word != 8) { + dev_err(&spi->dev, "invalid bits-per-word (%d)\n", + bits_per_word); + return -EINVAL; + } + + if (!speed || (speed > spi->max_speed_hz)) { + dev_err(&spi->dev, "invalid speed_hz (%d)\n", speed); + return -EINVAL; + } + + /* Write new configration */ + out_8(&hw->regs->mode, cs->mode); + + /* Set the clock */ + /* opb_freq was already divided by 4 */ + scr = (hw->opb_freq / speed) - 1; + if (scr > 0) + cdm = min(scr, 0xff); + + dev_dbg(&spi->dev, "setting pre-scaler to %d (hz %d)\n", cdm, speed); + + if (in_8(&hw->regs->cdm) != cdm) + out_8(&hw->regs->cdm, cdm); + + spin_lock(&hw->bitbang.lock); + if (!hw->bitbang.busy) { + hw->bitbang.chipselect(spi, BITBANG_CS_INACTIVE); + /* Need to ndelay here? */ + } + spin_unlock(&hw->bitbang.lock); + + return 0; +} + +static int spi_ppc4xx_setup(struct spi_device *spi) +{ + struct spi_ppc4xx_cs *cs = spi->controller_state; + + if (spi->bits_per_word != 8) { + dev_err(&spi->dev, "invalid bits-per-word (%d)\n", + spi->bits_per_word); + return -EINVAL; + } + + if (!spi->max_speed_hz) { + dev_err(&spi->dev, "invalid max_speed_hz (must be non-zero)\n"); + return -EINVAL; + } + + if (cs == NULL) { + cs = kzalloc(sizeof *cs, GFP_KERNEL); + if (!cs) + return -ENOMEM; + spi->controller_state = cs; + } + + /* + * We set all bits of the SPI0_MODE register, so, + * no need to read-modify-write + */ + cs->mode = SPI_PPC4XX_MODE_SPE; + + switch (spi->mode & (SPI_CPHA | SPI_CPOL)) { + case SPI_MODE_0: + cs->mode |= SPI_CLK_MODE0; + break; + case SPI_MODE_1: + cs->mode |= SPI_CLK_MODE1; + break; + case SPI_MODE_2: + cs->mode |= SPI_CLK_MODE2; + break; + case SPI_MODE_3: + cs->mode |= SPI_CLK_MODE3; + break; + } + + if (spi->mode & SPI_LSB_FIRST) + cs->mode |= SPI_PPC4XX_MODE_RD; + + return 0; +} + +static void spi_ppc4xx_chipsel(struct spi_device *spi, int value) +{ + struct ppc4xx_spi *hw = spi_master_get_devdata(spi->master); + unsigned int cs = spi->chip_select; + unsigned int cspol; + + /* + * If there are no chip selects at all, or if this is the special + * case of a non-existent (dummy) chip select, do nothing. + */ + + if (!hw->master->num_chipselect || hw->gpios[cs] == -EEXIST) + return; + + cspol = spi->mode & SPI_CS_HIGH ? 1 : 0; + if (value == BITBANG_CS_INACTIVE) + cspol = !cspol; + + gpio_set_value(hw->gpios[cs], cspol); +} + +static irqreturn_t spi_ppc4xx_int(int irq, void *dev_id) +{ + struct ppc4xx_spi *hw; + u8 status; + u8 data; + unsigned int count; + + hw = (struct ppc4xx_spi *)dev_id; + + status = in_8(&hw->regs->sr); + if (!status) + return IRQ_NONE; + + /* + * BSY de-asserts one cycle after the transfer is complete. The + * interrupt is asserted after the transfer is complete. The exact + * relationship is not documented, hence this code. + */ + + if (unlikely(status & SPI_PPC4XX_SR_BSY)) { + u8 lstatus; + int cnt = 0; + + dev_dbg(hw->dev, "got interrupt but spi still busy?\n"); + do { + ndelay(10); + lstatus = in_8(&hw->regs->sr); + } while (++cnt < 100 && lstatus & SPI_PPC4XX_SR_BSY); + + if (cnt >= 100) { + dev_err(hw->dev, "busywait: too many loops!\n"); + complete(&hw->done); + return IRQ_HANDLED; + } else { + /* status is always 1 (RBR) here */ + status = in_8(&hw->regs->sr); + dev_dbg(hw->dev, "loops %d status %x\n", cnt, status); + } + } + + count = hw->count; + hw->count++; + + /* RBR triggered this interrupt. Therefore, data must be ready. */ + data = in_8(&hw->regs->rxd); + if (hw->rx) + hw->rx[count] = data; + + count++; + + if (count < hw->len) { + data = hw->tx ? hw->tx[count] : 0; + out_8(&hw->regs->txd, data); + out_8(&hw->regs->cr, SPI_PPC4XX_CR_STR); + } else { + complete(&hw->done); + } + + return IRQ_HANDLED; +} + +static void spi_ppc4xx_cleanup(struct spi_device *spi) +{ + kfree(spi->controller_state); +} + +static void spi_ppc4xx_enable(struct ppc4xx_spi *hw) +{ + /* + * On all 4xx PPC's the SPI bus is shared/multiplexed with + * the 2nd I2C bus. We need to enable the the SPI bus before + * using it. + */ + + /* need to clear bit 14 to enable SPC */ + dcri_clrset(SDR0, SDR0_PFC1, 0x80000000 >> 14, 0); +} + +static void free_gpios(struct ppc4xx_spi *hw) +{ + if (hw->master->num_chipselect) { + int i; + for (i = 0; i < hw->master->num_chipselect; i++) + if (gpio_is_valid(hw->gpios[i])) + gpio_free(hw->gpios[i]); + + kfree(hw->gpios); + hw->gpios = NULL; + } +} + +/* + * platform_device layer stuff... + */ +static int __init spi_ppc4xx_of_probe(struct platform_device *op) +{ + struct ppc4xx_spi *hw; + struct spi_master *master; + struct spi_bitbang *bbp; + struct resource resource; + struct device_node *np = op->dev.of_node; + struct device *dev = &op->dev; + struct device_node *opbnp; + int ret; + int num_gpios; + const unsigned int *clk; + + master = spi_alloc_master(dev, sizeof *hw); + if (master == NULL) + return -ENOMEM; + master->dev.of_node = np; + dev_set_drvdata(dev, master); + hw = spi_master_get_devdata(master); + hw->master = spi_master_get(master); + hw->dev = dev; + + init_completion(&hw->done); + + /* + * A count of zero implies a single SPI device without any chip-select. + * Note that of_gpio_count counts all gpios assigned to this spi master. + * This includes both "null" gpio's and real ones. + */ + num_gpios = of_gpio_count(np); + if (num_gpios) { + int i; + + hw->gpios = kzalloc(sizeof(int) * num_gpios, GFP_KERNEL); + if (!hw->gpios) { + ret = -ENOMEM; + goto free_master; + } + + for (i = 0; i < num_gpios; i++) { + int gpio; + enum of_gpio_flags flags; + + gpio = of_get_gpio_flags(np, i, &flags); + hw->gpios[i] = gpio; + + if (gpio_is_valid(gpio)) { + /* Real CS - set the initial state. */ + ret = gpio_request(gpio, np->name); + if (ret < 0) { + dev_err(dev, "can't request gpio " + "#%d: %d\n", i, ret); + goto free_gpios; + } + + gpio_direction_output(gpio, + !!(flags & OF_GPIO_ACTIVE_LOW)); + } else if (gpio == -EEXIST) { + ; /* No CS, but that's OK. */ + } else { + dev_err(dev, "invalid gpio #%d: %d\n", i, gpio); + ret = -EINVAL; + goto free_gpios; + } + } + } + + /* Setup the state for the bitbang driver */ + bbp = &hw->bitbang; + bbp->master = hw->master; + bbp->setup_transfer = spi_ppc4xx_setupxfer; + bbp->chipselect = spi_ppc4xx_chipsel; + bbp->txrx_bufs = spi_ppc4xx_txrx; + bbp->use_dma = 0; + bbp->master->setup = spi_ppc4xx_setup; + bbp->master->cleanup = spi_ppc4xx_cleanup; + + /* Allocate bus num dynamically. */ + bbp->master->bus_num = -1; + + /* the spi->mode bits understood by this driver: */ + bbp->master->mode_bits = + SPI_CPHA | SPI_CPOL | SPI_CS_HIGH | SPI_LSB_FIRST; + + /* this many pins in all GPIO controllers */ + bbp->master->num_chipselect = num_gpios; + + /* Get the clock for the OPB */ + opbnp = of_find_compatible_node(NULL, NULL, "ibm,opb"); + if (opbnp == NULL) { + dev_err(dev, "OPB: cannot find node\n"); + ret = -ENODEV; + goto free_gpios; + } + /* Get the clock (Hz) for the OPB */ + clk = of_get_property(opbnp, "clock-frequency", NULL); + if (clk == NULL) { + dev_err(dev, "OPB: no clock-frequency property set\n"); + of_node_put(opbnp); + ret = -ENODEV; + goto free_gpios; + } + hw->opb_freq = *clk; + hw->opb_freq >>= 2; + of_node_put(opbnp); + + ret = of_address_to_resource(np, 0, &resource); + if (ret) { + dev_err(dev, "error while parsing device node resource\n"); + goto free_gpios; + } + hw->mapbase = resource.start; + hw->mapsize = resource.end - resource.start + 1; + + /* Sanity check */ + if (hw->mapsize < sizeof(struct spi_ppc4xx_regs)) { + dev_err(dev, "too small to map registers\n"); + ret = -EINVAL; + goto free_gpios; + } + + /* Request IRQ */ + hw->irqnum = irq_of_parse_and_map(np, 0); + ret = request_irq(hw->irqnum, spi_ppc4xx_int, + IRQF_DISABLED, "spi_ppc4xx_of", (void *)hw); + if (ret) { + dev_err(dev, "unable to allocate interrupt\n"); + goto free_gpios; + } + + if (!request_mem_region(hw->mapbase, hw->mapsize, DRIVER_NAME)) { + dev_err(dev, "resource unavailable\n"); + ret = -EBUSY; + goto request_mem_error; + } + + hw->regs = ioremap(hw->mapbase, sizeof(struct spi_ppc4xx_regs)); + + if (!hw->regs) { + dev_err(dev, "unable to memory map registers\n"); + ret = -ENXIO; + goto map_io_error; + } + + spi_ppc4xx_enable(hw); + + /* Finally register our spi controller */ + dev->dma_mask = 0; + ret = spi_bitbang_start(bbp); + if (ret) { + dev_err(dev, "failed to register SPI master\n"); + goto unmap_regs; + } + + dev_info(dev, "driver initialized\n"); + + return 0; + +unmap_regs: + iounmap(hw->regs); +map_io_error: + release_mem_region(hw->mapbase, hw->mapsize); +request_mem_error: + free_irq(hw->irqnum, hw); +free_gpios: + free_gpios(hw); +free_master: + dev_set_drvdata(dev, NULL); + spi_master_put(master); + + dev_err(dev, "initialization failed\n"); + return ret; +} + +static int __exit spi_ppc4xx_of_remove(struct platform_device *op) +{ + struct spi_master *master = dev_get_drvdata(&op->dev); + struct ppc4xx_spi *hw = spi_master_get_devdata(master); + + spi_bitbang_stop(&hw->bitbang); + dev_set_drvdata(&op->dev, NULL); + release_mem_region(hw->mapbase, hw->mapsize); + free_irq(hw->irqnum, hw); + iounmap(hw->regs); + free_gpios(hw); + return 0; +} + +static const struct of_device_id spi_ppc4xx_of_match[] = { + { .compatible = "ibm,ppc4xx-spi", }, + {}, +}; + +MODULE_DEVICE_TABLE(of, spi_ppc4xx_of_match); + +static struct platform_driver spi_ppc4xx_of_driver = { + .probe = spi_ppc4xx_of_probe, + .remove = __exit_p(spi_ppc4xx_of_remove), + .driver = { + .name = DRIVER_NAME, + .owner = THIS_MODULE, + .of_match_table = spi_ppc4xx_of_match, + }, +}; + +static int __init spi_ppc4xx_init(void) +{ + return platform_driver_register(&spi_ppc4xx_of_driver); +} +module_init(spi_ppc4xx_init); + +static void __exit spi_ppc4xx_exit(void) +{ + platform_driver_unregister(&spi_ppc4xx_of_driver); +} +module_exit(spi_ppc4xx_exit); + +MODULE_AUTHOR("Gary Jennejohn & Stefan Roese"); +MODULE_DESCRIPTION("Simple PPC4xx SPI Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c new file mode 100644 index 0000000..378e504 --- /dev/null +++ b/drivers/spi/spi-pxa2xx-pci.c @@ -0,0 +1,180 @@ +/* + * CE4100's SPI device is more or less the same one as found on PXA + * + */ +#include +#include +#include +#include + +struct ce4100_info { + struct ssp_device ssp; + struct platform_device *spi_pdev; +}; + +static DEFINE_MUTEX(ssp_lock); +static LIST_HEAD(ssp_list); + +struct ssp_device *pxa_ssp_request(int port, const char *label) +{ + struct ssp_device *ssp = NULL; + + mutex_lock(&ssp_lock); + + list_for_each_entry(ssp, &ssp_list, node) { + if (ssp->port_id == port && ssp->use_count == 0) { + ssp->use_count++; + ssp->label = label; + break; + } + } + + mutex_unlock(&ssp_lock); + + if (&ssp->node == &ssp_list) + return NULL; + + return ssp; +} +EXPORT_SYMBOL_GPL(pxa_ssp_request); + +void pxa_ssp_free(struct ssp_device *ssp) +{ + mutex_lock(&ssp_lock); + if (ssp->use_count) { + ssp->use_count--; + ssp->label = NULL; + } else + dev_err(&ssp->pdev->dev, "device already free\n"); + mutex_unlock(&ssp_lock); +} +EXPORT_SYMBOL_GPL(pxa_ssp_free); + +static int __devinit ce4100_spi_probe(struct pci_dev *dev, + const struct pci_device_id *ent) +{ + int ret; + resource_size_t phys_beg; + resource_size_t phys_len; + struct ce4100_info *spi_info; + struct platform_device *pdev; + struct pxa2xx_spi_master spi_pdata; + struct ssp_device *ssp; + + ret = pci_enable_device(dev); + if (ret) + return ret; + + phys_beg = pci_resource_start(dev, 0); + phys_len = pci_resource_len(dev, 0); + + if (!request_mem_region(phys_beg, phys_len, + "CE4100 SPI")) { + dev_err(&dev->dev, "Can't request register space.\n"); + ret = -EBUSY; + return ret; + } + + pdev = platform_device_alloc("pxa2xx-spi", dev->devfn); + spi_info = kzalloc(sizeof(*spi_info), GFP_KERNEL); + if (!pdev || !spi_info ) { + ret = -ENOMEM; + goto err_nomem; + } + memset(&spi_pdata, 0, sizeof(spi_pdata)); + spi_pdata.num_chipselect = dev->devfn; + + ret = platform_device_add_data(pdev, &spi_pdata, sizeof(spi_pdata)); + if (ret) + goto err_nomem; + + pdev->dev.parent = &dev->dev; + pdev->dev.of_node = dev->dev.of_node; + ssp = &spi_info->ssp; + ssp->phys_base = pci_resource_start(dev, 0); + ssp->mmio_base = ioremap(phys_beg, phys_len); + if (!ssp->mmio_base) { + dev_err(&pdev->dev, "failed to ioremap() registers\n"); + ret = -EIO; + goto err_nomem; + } + ssp->irq = dev->irq; + ssp->port_id = pdev->id; + ssp->type = PXA25x_SSP; + + mutex_lock(&ssp_lock); + list_add(&ssp->node, &ssp_list); + mutex_unlock(&ssp_lock); + + pci_set_drvdata(dev, spi_info); + + ret = platform_device_add(pdev); + if (ret) + goto err_dev_add; + + return ret; + +err_dev_add: + pci_set_drvdata(dev, NULL); + mutex_lock(&ssp_lock); + list_del(&ssp->node); + mutex_unlock(&ssp_lock); + iounmap(ssp->mmio_base); + +err_nomem: + release_mem_region(phys_beg, phys_len); + platform_device_put(pdev); + kfree(spi_info); + return ret; +} + +static void __devexit ce4100_spi_remove(struct pci_dev *dev) +{ + struct ce4100_info *spi_info; + struct ssp_device *ssp; + + spi_info = pci_get_drvdata(dev); + ssp = &spi_info->ssp; + platform_device_unregister(spi_info->spi_pdev); + + iounmap(ssp->mmio_base); + release_mem_region(pci_resource_start(dev, 0), + pci_resource_len(dev, 0)); + + mutex_lock(&ssp_lock); + list_del(&ssp->node); + mutex_unlock(&ssp_lock); + + pci_set_drvdata(dev, NULL); + pci_disable_device(dev); + kfree(spi_info); +} + +static struct pci_device_id ce4100_spi_devices[] __devinitdata = { + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2e6a) }, + { }, +}; +MODULE_DEVICE_TABLE(pci, ce4100_spi_devices); + +static struct pci_driver ce4100_spi_driver = { + .name = "ce4100_spi", + .id_table = ce4100_spi_devices, + .probe = ce4100_spi_probe, + .remove = __devexit_p(ce4100_spi_remove), +}; + +static int __init ce4100_spi_init(void) +{ + return pci_register_driver(&ce4100_spi_driver); +} +module_init(ce4100_spi_init); + +static void __exit ce4100_spi_exit(void) +{ + pci_unregister_driver(&ce4100_spi_driver); +} +module_exit(ce4100_spi_exit); + +MODULE_DESCRIPTION("CE4100 PCI-SPI glue code for PXA's driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Sebastian Andrzej Siewior "); diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c new file mode 100644 index 0000000..dc25bee --- /dev/null +++ b/drivers/spi/spi-pxa2xx.c @@ -0,0 +1,1816 @@ +/* + * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + + +MODULE_AUTHOR("Stephen Street"); +MODULE_DESCRIPTION("PXA2xx SSP SPI Controller"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:pxa2xx-spi"); + +#define MAX_BUSES 3 + +#define TIMOUT_DFLT 1000 + +#define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR) +#define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK) +#define IS_DMA_ALIGNED(x) ((((u32)(x)) & 0x07) == 0) +#define MAX_DMA_LEN 8191 +#define DMA_ALIGNMENT 8 + +/* + * for testing SSCR1 changes that require SSP restart, basically + * everything except the service and interrupt enables, the pxa270 developer + * manual says only SSCR1_SCFR, SSCR1_SPH, SSCR1_SPO need to be in this + * list, but the PXA255 dev man says all bits without really meaning the + * service and interrupt enables + */ +#define SSCR1_CHANGE_MASK (SSCR1_TTELP | SSCR1_TTE | SSCR1_SCFR \ + | SSCR1_ECRA | SSCR1_ECRB | SSCR1_SCLKDIR \ + | SSCR1_SFRMDIR | SSCR1_RWOT | SSCR1_TRAIL \ + | SSCR1_IFS | SSCR1_STRF | SSCR1_EFWR \ + | SSCR1_RFT | SSCR1_TFT | SSCR1_MWDS \ + | SSCR1_SPH | SSCR1_SPO | SSCR1_LBM) + +#define DEFINE_SSP_REG(reg, off) \ +static inline u32 read_##reg(void const __iomem *p) \ +{ return __raw_readl(p + (off)); } \ +\ +static inline void write_##reg(u32 v, void __iomem *p) \ +{ __raw_writel(v, p + (off)); } + +DEFINE_SSP_REG(SSCR0, 0x00) +DEFINE_SSP_REG(SSCR1, 0x04) +DEFINE_SSP_REG(SSSR, 0x08) +DEFINE_SSP_REG(SSITR, 0x0c) +DEFINE_SSP_REG(SSDR, 0x10) +DEFINE_SSP_REG(SSTO, 0x28) +DEFINE_SSP_REG(SSPSP, 0x2c) + +#define START_STATE ((void*)0) +#define RUNNING_STATE ((void*)1) +#define DONE_STATE ((void*)2) +#define ERROR_STATE ((void*)-1) + +#define QUEUE_RUNNING 0 +#define QUEUE_STOPPED 1 + +struct driver_data { + /* Driver model hookup */ + struct platform_device *pdev; + + /* SSP Info */ + struct ssp_device *ssp; + + /* SPI framework hookup */ + enum pxa_ssp_type ssp_type; + struct spi_master *master; + + /* PXA hookup */ + struct pxa2xx_spi_master *master_info; + + /* DMA setup stuff */ + int rx_channel; + int tx_channel; + u32 *null_dma_buf; + + /* SSP register addresses */ + void __iomem *ioaddr; + u32 ssdr_physical; + + /* SSP masks*/ + u32 dma_cr1; + u32 int_cr1; + u32 clear_sr; + u32 mask_sr; + + /* Driver message queue */ + struct workqueue_struct *workqueue; + struct work_struct pump_messages; + spinlock_t lock; + struct list_head queue; + int busy; + int run; + + /* Message Transfer pump */ + struct tasklet_struct pump_transfers; + + /* Current message transfer state info */ + struct spi_message* cur_msg; + struct spi_transfer* cur_transfer; + struct chip_data *cur_chip; + size_t len; + void *tx; + void *tx_end; + void *rx; + void *rx_end; + int dma_mapped; + dma_addr_t rx_dma; + dma_addr_t tx_dma; + size_t rx_map_len; + size_t tx_map_len; + u8 n_bytes; + u32 dma_width; + int (*write)(struct driver_data *drv_data); + int (*read)(struct driver_data *drv_data); + irqreturn_t (*transfer_handler)(struct driver_data *drv_data); + void (*cs_control)(u32 command); +}; + +struct chip_data { + u32 cr0; + u32 cr1; + u32 psp; + u32 timeout; + u8 n_bytes; + u32 dma_width; + u32 dma_burst_size; + u32 threshold; + u32 dma_threshold; + u8 enable_dma; + u8 bits_per_word; + u32 speed_hz; + union { + int gpio_cs; + unsigned int frm; + }; + int gpio_cs_inverted; + int (*write)(struct driver_data *drv_data); + int (*read)(struct driver_data *drv_data); + void (*cs_control)(u32 command); +}; + +static void pump_messages(struct work_struct *work); + +static void cs_assert(struct driver_data *drv_data) +{ + struct chip_data *chip = drv_data->cur_chip; + + if (drv_data->ssp_type == CE4100_SSP) { + write_SSSR(drv_data->cur_chip->frm, drv_data->ioaddr); + return; + } + + if (chip->cs_control) { + chip->cs_control(PXA2XX_CS_ASSERT); + return; + } + + if (gpio_is_valid(chip->gpio_cs)) + gpio_set_value(chip->gpio_cs, chip->gpio_cs_inverted); +} + +static void cs_deassert(struct driver_data *drv_data) +{ + struct chip_data *chip = drv_data->cur_chip; + + if (drv_data->ssp_type == CE4100_SSP) + return; + + if (chip->cs_control) { + chip->cs_control(PXA2XX_CS_DEASSERT); + return; + } + + if (gpio_is_valid(chip->gpio_cs)) + gpio_set_value(chip->gpio_cs, !chip->gpio_cs_inverted); +} + +static void write_SSSR_CS(struct driver_data *drv_data, u32 val) +{ + void __iomem *reg = drv_data->ioaddr; + + if (drv_data->ssp_type == CE4100_SSP) + val |= read_SSSR(reg) & SSSR_ALT_FRM_MASK; + + write_SSSR(val, reg); +} + +static int pxa25x_ssp_comp(struct driver_data *drv_data) +{ + if (drv_data->ssp_type == PXA25x_SSP) + return 1; + if (drv_data->ssp_type == CE4100_SSP) + return 1; + return 0; +} + +static int flush(struct driver_data *drv_data) +{ + unsigned long limit = loops_per_jiffy << 1; + + void __iomem *reg = drv_data->ioaddr; + + do { + while (read_SSSR(reg) & SSSR_RNE) { + read_SSDR(reg); + } + } while ((read_SSSR(reg) & SSSR_BSY) && --limit); + write_SSSR_CS(drv_data, SSSR_ROR); + + return limit; +} + +static int null_writer(struct driver_data *drv_data) +{ + void __iomem *reg = drv_data->ioaddr; + u8 n_bytes = drv_data->n_bytes; + + if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK) + || (drv_data->tx == drv_data->tx_end)) + return 0; + + write_SSDR(0, reg); + drv_data->tx += n_bytes; + + return 1; +} + +static int null_reader(struct driver_data *drv_data) +{ + void __iomem *reg = drv_data->ioaddr; + u8 n_bytes = drv_data->n_bytes; + + while ((read_SSSR(reg) & SSSR_RNE) + && (drv_data->rx < drv_data->rx_end)) { + read_SSDR(reg); + drv_data->rx += n_bytes; + } + + return drv_data->rx == drv_data->rx_end; +} + +static int u8_writer(struct driver_data *drv_data) +{ + void __iomem *reg = drv_data->ioaddr; + + if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK) + || (drv_data->tx == drv_data->tx_end)) + return 0; + + write_SSDR(*(u8 *)(drv_data->tx), reg); + ++drv_data->tx; + + return 1; +} + +static int u8_reader(struct driver_data *drv_data) +{ + void __iomem *reg = drv_data->ioaddr; + + while ((read_SSSR(reg) & SSSR_RNE) + && (drv_data->rx < drv_data->rx_end)) { + *(u8 *)(drv_data->rx) = read_SSDR(reg); + ++drv_data->rx; + } + + return drv_data->rx == drv_data->rx_end; +} + +static int u16_writer(struct driver_data *drv_data) +{ + void __iomem *reg = drv_data->ioaddr; + + if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK) + || (drv_data->tx == drv_data->tx_end)) + return 0; + + write_SSDR(*(u16 *)(drv_data->tx), reg); + drv_data->tx += 2; + + return 1; +} + +static int u16_reader(struct driver_data *drv_data) +{ + void __iomem *reg = drv_data->ioaddr; + + while ((read_SSSR(reg) & SSSR_RNE) + && (drv_data->rx < drv_data->rx_end)) { + *(u16 *)(drv_data->rx) = read_SSDR(reg); + drv_data->rx += 2; + } + + return drv_data->rx == drv_data->rx_end; +} + +static int u32_writer(struct driver_data *drv_data) +{ + void __iomem *reg = drv_data->ioaddr; + + if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK) + || (drv_data->tx == drv_data->tx_end)) + return 0; + + write_SSDR(*(u32 *)(drv_data->tx), reg); + drv_data->tx += 4; + + return 1; +} + +static int u32_reader(struct driver_data *drv_data) +{ + void __iomem *reg = drv_data->ioaddr; + + while ((read_SSSR(reg) & SSSR_RNE) + && (drv_data->rx < drv_data->rx_end)) { + *(u32 *)(drv_data->rx) = read_SSDR(reg); + drv_data->rx += 4; + } + + return drv_data->rx == drv_data->rx_end; +} + +static void *next_transfer(struct driver_data *drv_data) +{ + struct spi_message *msg = drv_data->cur_msg; + struct spi_transfer *trans = drv_data->cur_transfer; + + /* Move to next transfer */ + if (trans->transfer_list.next != &msg->transfers) { + drv_data->cur_transfer = + list_entry(trans->transfer_list.next, + struct spi_transfer, + transfer_list); + return RUNNING_STATE; + } else + return DONE_STATE; +} + +static int map_dma_buffers(struct driver_data *drv_data) +{ + struct spi_message *msg = drv_data->cur_msg; + struct device *dev = &msg->spi->dev; + + if (!drv_data->cur_chip->enable_dma) + return 0; + + if (msg->is_dma_mapped) + return drv_data->rx_dma && drv_data->tx_dma; + + if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx)) + return 0; + + /* Modify setup if rx buffer is null */ + if (drv_data->rx == NULL) { + *drv_data->null_dma_buf = 0; + drv_data->rx = drv_data->null_dma_buf; + drv_data->rx_map_len = 4; + } else + drv_data->rx_map_len = drv_data->len; + + + /* Modify setup if tx buffer is null */ + if (drv_data->tx == NULL) { + *drv_data->null_dma_buf = 0; + drv_data->tx = drv_data->null_dma_buf; + drv_data->tx_map_len = 4; + } else + drv_data->tx_map_len = drv_data->len; + + /* Stream map the tx buffer. Always do DMA_TO_DEVICE first + * so we flush the cache *before* invalidating it, in case + * the tx and rx buffers overlap. + */ + drv_data->tx_dma = dma_map_single(dev, drv_data->tx, + drv_data->tx_map_len, DMA_TO_DEVICE); + if (dma_mapping_error(dev, drv_data->tx_dma)) + return 0; + + /* Stream map the rx buffer */ + drv_data->rx_dma = dma_map_single(dev, drv_data->rx, + drv_data->rx_map_len, DMA_FROM_DEVICE); + if (dma_mapping_error(dev, drv_data->rx_dma)) { + dma_unmap_single(dev, drv_data->tx_dma, + drv_data->tx_map_len, DMA_TO_DEVICE); + return 0; + } + + return 1; +} + +static void unmap_dma_buffers(struct driver_data *drv_data) +{ + struct device *dev; + + if (!drv_data->dma_mapped) + return; + + if (!drv_data->cur_msg->is_dma_mapped) { + dev = &drv_data->cur_msg->spi->dev; + dma_unmap_single(dev, drv_data->rx_dma, + drv_data->rx_map_len, DMA_FROM_DEVICE); + dma_unmap_single(dev, drv_data->tx_dma, + drv_data->tx_map_len, DMA_TO_DEVICE); + } + + drv_data->dma_mapped = 0; +} + +/* caller already set message->status; dma and pio irqs are blocked */ +static void giveback(struct driver_data *drv_data) +{ + struct spi_transfer* last_transfer; + unsigned long flags; + struct spi_message *msg; + + spin_lock_irqsave(&drv_data->lock, flags); + msg = drv_data->cur_msg; + drv_data->cur_msg = NULL; + drv_data->cur_transfer = NULL; + queue_work(drv_data->workqueue, &drv_data->pump_messages); + spin_unlock_irqrestore(&drv_data->lock, flags); + + last_transfer = list_entry(msg->transfers.prev, + struct spi_transfer, + transfer_list); + + /* Delay if requested before any change in chip select */ + if (last_transfer->delay_usecs) + udelay(last_transfer->delay_usecs); + + /* Drop chip select UNLESS cs_change is true or we are returning + * a message with an error, or next message is for another chip + */ + if (!last_transfer->cs_change) + cs_deassert(drv_data); + else { + struct spi_message *next_msg; + + /* Holding of cs was hinted, but we need to make sure + * the next message is for the same chip. Don't waste + * time with the following tests unless this was hinted. + * + * We cannot postpone this until pump_messages, because + * after calling msg->complete (below) the driver that + * sent the current message could be unloaded, which + * could invalidate the cs_control() callback... + */ + + /* get a pointer to the next message, if any */ + spin_lock_irqsave(&drv_data->lock, flags); + if (list_empty(&drv_data->queue)) + next_msg = NULL; + else + next_msg = list_entry(drv_data->queue.next, + struct spi_message, queue); + spin_unlock_irqrestore(&drv_data->lock, flags); + + /* see if the next and current messages point + * to the same chip + */ + if (next_msg && next_msg->spi != msg->spi) + next_msg = NULL; + if (!next_msg || msg->state == ERROR_STATE) + cs_deassert(drv_data); + } + + msg->state = NULL; + if (msg->complete) + msg->complete(msg->context); + + drv_data->cur_chip = NULL; +} + +static int wait_ssp_rx_stall(void const __iomem *ioaddr) +{ + unsigned long limit = loops_per_jiffy << 1; + + while ((read_SSSR(ioaddr) & SSSR_BSY) && --limit) + cpu_relax(); + + return limit; +} + +static int wait_dma_channel_stop(int channel) +{ + unsigned long limit = loops_per_jiffy << 1; + + while (!(DCSR(channel) & DCSR_STOPSTATE) && --limit) + cpu_relax(); + + return limit; +} + +static void dma_error_stop(struct driver_data *drv_data, const char *msg) +{ + void __iomem *reg = drv_data->ioaddr; + + /* Stop and reset */ + DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; + DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; + write_SSSR_CS(drv_data, drv_data->clear_sr); + write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); + if (!pxa25x_ssp_comp(drv_data)) + write_SSTO(0, reg); + flush(drv_data); + write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); + + unmap_dma_buffers(drv_data); + + dev_err(&drv_data->pdev->dev, "%s\n", msg); + + drv_data->cur_msg->state = ERROR_STATE; + tasklet_schedule(&drv_data->pump_transfers); +} + +static void dma_transfer_complete(struct driver_data *drv_data) +{ + void __iomem *reg = drv_data->ioaddr; + struct spi_message *msg = drv_data->cur_msg; + + /* Clear and disable interrupts on SSP and DMA channels*/ + write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); + write_SSSR_CS(drv_data, drv_data->clear_sr); + DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; + DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; + + if (wait_dma_channel_stop(drv_data->rx_channel) == 0) + dev_err(&drv_data->pdev->dev, + "dma_handler: dma rx channel stop failed\n"); + + if (wait_ssp_rx_stall(drv_data->ioaddr) == 0) + dev_err(&drv_data->pdev->dev, + "dma_transfer: ssp rx stall failed\n"); + + unmap_dma_buffers(drv_data); + + /* update the buffer pointer for the amount completed in dma */ + drv_data->rx += drv_data->len - + (DCMD(drv_data->rx_channel) & DCMD_LENGTH); + + /* read trailing data from fifo, it does not matter how many + * bytes are in the fifo just read until buffer is full + * or fifo is empty, which ever occurs first */ + drv_data->read(drv_data); + + /* return count of what was actually read */ + msg->actual_length += drv_data->len - + (drv_data->rx_end - drv_data->rx); + + /* Transfer delays and chip select release are + * handled in pump_transfers or giveback + */ + + /* Move to next transfer */ + msg->state = next_transfer(drv_data); + + /* Schedule transfer tasklet */ + tasklet_schedule(&drv_data->pump_transfers); +} + +static void dma_handler(int channel, void *data) +{ + struct driver_data *drv_data = data; + u32 irq_status = DCSR(channel) & DMA_INT_MASK; + + if (irq_status & DCSR_BUSERR) { + + if (channel == drv_data->tx_channel) + dma_error_stop(drv_data, + "dma_handler: " + "bad bus address on tx channel"); + else + dma_error_stop(drv_data, + "dma_handler: " + "bad bus address on rx channel"); + return; + } + + /* PXA255x_SSP has no timeout interrupt, wait for tailing bytes */ + if ((channel == drv_data->tx_channel) + && (irq_status & DCSR_ENDINTR) + && (drv_data->ssp_type == PXA25x_SSP)) { + + /* Wait for rx to stall */ + if (wait_ssp_rx_stall(drv_data->ioaddr) == 0) + dev_err(&drv_data->pdev->dev, + "dma_handler: ssp rx stall failed\n"); + + /* finish this transfer, start the next */ + dma_transfer_complete(drv_data); + } +} + +static irqreturn_t dma_transfer(struct driver_data *drv_data) +{ + u32 irq_status; + void __iomem *reg = drv_data->ioaddr; + + irq_status = read_SSSR(reg) & drv_data->mask_sr; + if (irq_status & SSSR_ROR) { + dma_error_stop(drv_data, "dma_transfer: fifo overrun"); + return IRQ_HANDLED; + } + + /* Check for false positive timeout */ + if ((irq_status & SSSR_TINT) + && (DCSR(drv_data->tx_channel) & DCSR_RUN)) { + write_SSSR(SSSR_TINT, reg); + return IRQ_HANDLED; + } + + if (irq_status & SSSR_TINT || drv_data->rx == drv_data->rx_end) { + + /* Clear and disable timeout interrupt, do the rest in + * dma_transfer_complete */ + if (!pxa25x_ssp_comp(drv_data)) + write_SSTO(0, reg); + + /* finish this transfer, start the next */ + dma_transfer_complete(drv_data); + + return IRQ_HANDLED; + } + + /* Opps problem detected */ + return IRQ_NONE; +} + +static void reset_sccr1(struct driver_data *drv_data) +{ + void __iomem *reg = drv_data->ioaddr; + struct chip_data *chip = drv_data->cur_chip; + u32 sccr1_reg; + + sccr1_reg = read_SSCR1(reg) & ~drv_data->int_cr1; + sccr1_reg &= ~SSCR1_RFT; + sccr1_reg |= chip->threshold; + write_SSCR1(sccr1_reg, reg); +} + +static void int_error_stop(struct driver_data *drv_data, const char* msg) +{ + void __iomem *reg = drv_data->ioaddr; + + /* Stop and reset SSP */ + write_SSSR_CS(drv_data, drv_data->clear_sr); + reset_sccr1(drv_data); + if (!pxa25x_ssp_comp(drv_data)) + write_SSTO(0, reg); + flush(drv_data); + write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); + + dev_err(&drv_data->pdev->dev, "%s\n", msg); + + drv_data->cur_msg->state = ERROR_STATE; + tasklet_schedule(&drv_data->pump_transfers); +} + +static void int_transfer_complete(struct driver_data *drv_data) +{ + void __iomem *reg = drv_data->ioaddr; + + /* Stop SSP */ + write_SSSR_CS(drv_data, drv_data->clear_sr); + reset_sccr1(drv_data); + if (!pxa25x_ssp_comp(drv_data)) + write_SSTO(0, reg); + + /* Update total byte transferred return count actual bytes read */ + drv_data->cur_msg->actual_length += drv_data->len - + (drv_data->rx_end - drv_data->rx); + + /* Transfer delays and chip select release are + * handled in pump_transfers or giveback + */ + + /* Move to next transfer */ + drv_data->cur_msg->state = next_transfer(drv_data); + + /* Schedule transfer tasklet */ + tasklet_schedule(&drv_data->pump_transfers); +} + +static irqreturn_t interrupt_transfer(struct driver_data *drv_data) +{ + void __iomem *reg = drv_data->ioaddr; + + u32 irq_mask = (read_SSCR1(reg) & SSCR1_TIE) ? + drv_data->mask_sr : drv_data->mask_sr & ~SSSR_TFS; + + u32 irq_status = read_SSSR(reg) & irq_mask; + + if (irq_status & SSSR_ROR) { + int_error_stop(drv_data, "interrupt_transfer: fifo overrun"); + return IRQ_HANDLED; + } + + if (irq_status & SSSR_TINT) { + write_SSSR(SSSR_TINT, reg); + if (drv_data->read(drv_data)) { + int_transfer_complete(drv_data); + return IRQ_HANDLED; + } + } + + /* Drain rx fifo, Fill tx fifo and prevent overruns */ + do { + if (drv_data->read(drv_data)) { + int_transfer_complete(drv_data); + return IRQ_HANDLED; + } + } while (drv_data->write(drv_data)); + + if (drv_data->read(drv_data)) { + int_transfer_complete(drv_data); + return IRQ_HANDLED; + } + + if (drv_data->tx == drv_data->tx_end) { + u32 bytes_left; + u32 sccr1_reg; + + sccr1_reg = read_SSCR1(reg); + sccr1_reg &= ~SSCR1_TIE; + + /* + * PXA25x_SSP has no timeout, set up rx threshould for the + * remaining RX bytes. + */ + if (pxa25x_ssp_comp(drv_data)) { + + sccr1_reg &= ~SSCR1_RFT; + + bytes_left = drv_data->rx_end - drv_data->rx; + switch (drv_data->n_bytes) { + case 4: + bytes_left >>= 1; + case 2: + bytes_left >>= 1; + } + + if (bytes_left > RX_THRESH_DFLT) + bytes_left = RX_THRESH_DFLT; + + sccr1_reg |= SSCR1_RxTresh(bytes_left); + } + write_SSCR1(sccr1_reg, reg); + } + + /* We did something */ + return IRQ_HANDLED; +} + +static irqreturn_t ssp_int(int irq, void *dev_id) +{ + struct driver_data *drv_data = dev_id; + void __iomem *reg = drv_data->ioaddr; + u32 sccr1_reg = read_SSCR1(reg); + u32 mask = drv_data->mask_sr; + u32 status; + + status = read_SSSR(reg); + + /* Ignore possible writes if we don't need to write */ + if (!(sccr1_reg & SSCR1_TIE)) + mask &= ~SSSR_TFS; + + if (!(status & mask)) + return IRQ_NONE; + + if (!drv_data->cur_msg) { + + write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); + write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg); + if (!pxa25x_ssp_comp(drv_data)) + write_SSTO(0, reg); + write_SSSR_CS(drv_data, drv_data->clear_sr); + + dev_err(&drv_data->pdev->dev, "bad message state " + "in interrupt handler\n"); + + /* Never fail */ + return IRQ_HANDLED; + } + + return drv_data->transfer_handler(drv_data); +} + +static int set_dma_burst_and_threshold(struct chip_data *chip, + struct spi_device *spi, + u8 bits_per_word, u32 *burst_code, + u32 *threshold) +{ + struct pxa2xx_spi_chip *chip_info = + (struct pxa2xx_spi_chip *)spi->controller_data; + int bytes_per_word; + int burst_bytes; + int thresh_words; + int req_burst_size; + int retval = 0; + + /* Set the threshold (in registers) to equal the same amount of data + * as represented by burst size (in bytes). The computation below + * is (burst_size rounded up to nearest 8 byte, word or long word) + * divided by (bytes/register); the tx threshold is the inverse of + * the rx, so that there will always be enough data in the rx fifo + * to satisfy a burst, and there will always be enough space in the + * tx fifo to accept a burst (a tx burst will overwrite the fifo if + * there is not enough space), there must always remain enough empty + * space in the rx fifo for any data loaded to the tx fifo. + * Whenever burst_size (in bytes) equals bits/word, the fifo threshold + * will be 8, or half the fifo; + * The threshold can only be set to 2, 4 or 8, but not 16, because + * to burst 16 to the tx fifo, the fifo would have to be empty; + * however, the minimum fifo trigger level is 1, and the tx will + * request service when the fifo is at this level, with only 15 spaces. + */ + + /* find bytes/word */ + if (bits_per_word <= 8) + bytes_per_word = 1; + else if (bits_per_word <= 16) + bytes_per_word = 2; + else + bytes_per_word = 4; + + /* use struct pxa2xx_spi_chip->dma_burst_size if available */ + if (chip_info) + req_burst_size = chip_info->dma_burst_size; + else { + switch (chip->dma_burst_size) { + default: + /* if the default burst size is not set, + * do it now */ + chip->dma_burst_size = DCMD_BURST8; + case DCMD_BURST8: + req_burst_size = 8; + break; + case DCMD_BURST16: + req_burst_size = 16; + break; + case DCMD_BURST32: + req_burst_size = 32; + break; + } + } + if (req_burst_size <= 8) { + *burst_code = DCMD_BURST8; + burst_bytes = 8; + } else if (req_burst_size <= 16) { + if (bytes_per_word == 1) { + /* don't burst more than 1/2 the fifo */ + *burst_code = DCMD_BURST8; + burst_bytes = 8; + retval = 1; + } else { + *burst_code = DCMD_BURST16; + burst_bytes = 16; + } + } else { + if (bytes_per_word == 1) { + /* don't burst more than 1/2 the fifo */ + *burst_code = DCMD_BURST8; + burst_bytes = 8; + retval = 1; + } else if (bytes_per_word == 2) { + /* don't burst more than 1/2 the fifo */ + *burst_code = DCMD_BURST16; + burst_bytes = 16; + retval = 1; + } else { + *burst_code = DCMD_BURST32; + burst_bytes = 32; + } + } + + thresh_words = burst_bytes / bytes_per_word; + + /* thresh_words will be between 2 and 8 */ + *threshold = (SSCR1_RxTresh(thresh_words) & SSCR1_RFT) + | (SSCR1_TxTresh(16-thresh_words) & SSCR1_TFT); + + return retval; +} + +static unsigned int ssp_get_clk_div(struct ssp_device *ssp, int rate) +{ + unsigned long ssp_clk = clk_get_rate(ssp->clk); + + if (ssp->type == PXA25x_SSP || ssp->type == CE4100_SSP) + return ((ssp_clk / (2 * rate) - 1) & 0xff) << 8; + else + return ((ssp_clk / rate - 1) & 0xfff) << 8; +} + +static void pump_transfers(unsigned long data) +{ + struct driver_data *drv_data = (struct driver_data *)data; + struct spi_message *message = NULL; + struct spi_transfer *transfer = NULL; + struct spi_transfer *previous = NULL; + struct chip_data *chip = NULL; + struct ssp_device *ssp = drv_data->ssp; + void __iomem *reg = drv_data->ioaddr; + u32 clk_div = 0; + u8 bits = 0; + u32 speed = 0; + u32 cr0; + u32 cr1; + u32 dma_thresh = drv_data->cur_chip->dma_threshold; + u32 dma_burst = drv_data->cur_chip->dma_burst_size; + + /* Get current state information */ + message = drv_data->cur_msg; + transfer = drv_data->cur_transfer; + chip = drv_data->cur_chip; + + /* Handle for abort */ + if (message->state == ERROR_STATE) { + message->status = -EIO; + giveback(drv_data); + return; + } + + /* Handle end of message */ + if (message->state == DONE_STATE) { + message->status = 0; + giveback(drv_data); + return; + } + + /* Delay if requested at end of transfer before CS change */ + if (message->state == RUNNING_STATE) { + previous = list_entry(transfer->transfer_list.prev, + struct spi_transfer, + transfer_list); + if (previous->delay_usecs) + udelay(previous->delay_usecs); + + /* Drop chip select only if cs_change is requested */ + if (previous->cs_change) + cs_deassert(drv_data); + } + + /* Check for transfers that need multiple DMA segments */ + if (transfer->len > MAX_DMA_LEN && chip->enable_dma) { + + /* reject already-mapped transfers; PIO won't always work */ + if (message->is_dma_mapped + || transfer->rx_dma || transfer->tx_dma) { + dev_err(&drv_data->pdev->dev, + "pump_transfers: mapped transfer length " + "of %u is greater than %d\n", + transfer->len, MAX_DMA_LEN); + message->status = -EINVAL; + giveback(drv_data); + return; + } + + /* warn ... we force this to PIO mode */ + if (printk_ratelimit()) + dev_warn(&message->spi->dev, "pump_transfers: " + "DMA disabled for transfer length %ld " + "greater than %d\n", + (long)drv_data->len, MAX_DMA_LEN); + } + + /* Setup the transfer state based on the type of transfer */ + if (flush(drv_data) == 0) { + dev_err(&drv_data->pdev->dev, "pump_transfers: flush failed\n"); + message->status = -EIO; + giveback(drv_data); + return; + } + drv_data->n_bytes = chip->n_bytes; + drv_data->dma_width = chip->dma_width; + drv_data->tx = (void *)transfer->tx_buf; + drv_data->tx_end = drv_data->tx + transfer->len; + drv_data->rx = transfer->rx_buf; + drv_data->rx_end = drv_data->rx + transfer->len; + drv_data->rx_dma = transfer->rx_dma; + drv_data->tx_dma = transfer->tx_dma; + drv_data->len = transfer->len & DCMD_LENGTH; + drv_data->write = drv_data->tx ? chip->write : null_writer; + drv_data->read = drv_data->rx ? chip->read : null_reader; + + /* Change speed and bit per word on a per transfer */ + cr0 = chip->cr0; + if (transfer->speed_hz || transfer->bits_per_word) { + + bits = chip->bits_per_word; + speed = chip->speed_hz; + + if (transfer->speed_hz) + speed = transfer->speed_hz; + + if (transfer->bits_per_word) + bits = transfer->bits_per_word; + + clk_div = ssp_get_clk_div(ssp, speed); + + if (bits <= 8) { + drv_data->n_bytes = 1; + drv_data->dma_width = DCMD_WIDTH1; + drv_data->read = drv_data->read != null_reader ? + u8_reader : null_reader; + drv_data->write = drv_data->write != null_writer ? + u8_writer : null_writer; + } else if (bits <= 16) { + drv_data->n_bytes = 2; + drv_data->dma_width = DCMD_WIDTH2; + drv_data->read = drv_data->read != null_reader ? + u16_reader : null_reader; + drv_data->write = drv_data->write != null_writer ? + u16_writer : null_writer; + } else if (bits <= 32) { + drv_data->n_bytes = 4; + drv_data->dma_width = DCMD_WIDTH4; + drv_data->read = drv_data->read != null_reader ? + u32_reader : null_reader; + drv_data->write = drv_data->write != null_writer ? + u32_writer : null_writer; + } + /* if bits/word is changed in dma mode, then must check the + * thresholds and burst also */ + if (chip->enable_dma) { + if (set_dma_burst_and_threshold(chip, message->spi, + bits, &dma_burst, + &dma_thresh)) + if (printk_ratelimit()) + dev_warn(&message->spi->dev, + "pump_transfers: " + "DMA burst size reduced to " + "match bits_per_word\n"); + } + + cr0 = clk_div + | SSCR0_Motorola + | SSCR0_DataSize(bits > 16 ? bits - 16 : bits) + | SSCR0_SSE + | (bits > 16 ? SSCR0_EDSS : 0); + } + + message->state = RUNNING_STATE; + + /* Try to map dma buffer and do a dma transfer if successful, but + * only if the length is non-zero and less than MAX_DMA_LEN. + * + * Zero-length non-descriptor DMA is illegal on PXA2xx; force use + * of PIO instead. Care is needed above because the transfer may + * have have been passed with buffers that are already dma mapped. + * A zero-length transfer in PIO mode will not try to write/read + * to/from the buffers + * + * REVISIT large transfers are exactly where we most want to be + * using DMA. If this happens much, split those transfers into + * multiple DMA segments rather than forcing PIO. + */ + drv_data->dma_mapped = 0; + if (drv_data->len > 0 && drv_data->len <= MAX_DMA_LEN) + drv_data->dma_mapped = map_dma_buffers(drv_data); + if (drv_data->dma_mapped) { + + /* Ensure we have the correct interrupt handler */ + drv_data->transfer_handler = dma_transfer; + + /* Setup rx DMA Channel */ + DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; + DSADR(drv_data->rx_channel) = drv_data->ssdr_physical; + DTADR(drv_data->rx_channel) = drv_data->rx_dma; + if (drv_data->rx == drv_data->null_dma_buf) + /* No target address increment */ + DCMD(drv_data->rx_channel) = DCMD_FLOWSRC + | drv_data->dma_width + | dma_burst + | drv_data->len; + else + DCMD(drv_data->rx_channel) = DCMD_INCTRGADDR + | DCMD_FLOWSRC + | drv_data->dma_width + | dma_burst + | drv_data->len; + + /* Setup tx DMA Channel */ + DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; + DSADR(drv_data->tx_channel) = drv_data->tx_dma; + DTADR(drv_data->tx_channel) = drv_data->ssdr_physical; + if (drv_data->tx == drv_data->null_dma_buf) + /* No source address increment */ + DCMD(drv_data->tx_channel) = DCMD_FLOWTRG + | drv_data->dma_width + | dma_burst + | drv_data->len; + else + DCMD(drv_data->tx_channel) = DCMD_INCSRCADDR + | DCMD_FLOWTRG + | drv_data->dma_width + | dma_burst + | drv_data->len; + + /* Enable dma end irqs on SSP to detect end of transfer */ + if (drv_data->ssp_type == PXA25x_SSP) + DCMD(drv_data->tx_channel) |= DCMD_ENDIRQEN; + + /* Clear status and start DMA engine */ + cr1 = chip->cr1 | dma_thresh | drv_data->dma_cr1; + write_SSSR(drv_data->clear_sr, reg); + DCSR(drv_data->rx_channel) |= DCSR_RUN; + DCSR(drv_data->tx_channel) |= DCSR_RUN; + } else { + /* Ensure we have the correct interrupt handler */ + drv_data->transfer_handler = interrupt_transfer; + + /* Clear status */ + cr1 = chip->cr1 | chip->threshold | drv_data->int_cr1; + write_SSSR_CS(drv_data, drv_data->clear_sr); + } + + /* see if we need to reload the config registers */ + if ((read_SSCR0(reg) != cr0) + || (read_SSCR1(reg) & SSCR1_CHANGE_MASK) != + (cr1 & SSCR1_CHANGE_MASK)) { + + /* stop the SSP, and update the other bits */ + write_SSCR0(cr0 & ~SSCR0_SSE, reg); + if (!pxa25x_ssp_comp(drv_data)) + write_SSTO(chip->timeout, reg); + /* first set CR1 without interrupt and service enables */ + write_SSCR1(cr1 & SSCR1_CHANGE_MASK, reg); + /* restart the SSP */ + write_SSCR0(cr0, reg); + + } else { + if (!pxa25x_ssp_comp(drv_data)) + write_SSTO(chip->timeout, reg); + } + + cs_assert(drv_data); + + /* after chip select, release the data by enabling service + * requests and interrupts, without changing any mode bits */ + write_SSCR1(cr1, reg); +} + +static void pump_messages(struct work_struct *work) +{ + struct driver_data *drv_data = + container_of(work, struct driver_data, pump_messages); + unsigned long flags; + + /* Lock queue and check for queue work */ + spin_lock_irqsave(&drv_data->lock, flags); + if (list_empty(&drv_data->queue) || drv_data->run == QUEUE_STOPPED) { + drv_data->busy = 0; + spin_unlock_irqrestore(&drv_data->lock, flags); + return; + } + + /* Make sure we are not already running a message */ + if (drv_data->cur_msg) { + spin_unlock_irqrestore(&drv_data->lock, flags); + return; + } + + /* Extract head of queue */ + drv_data->cur_msg = list_entry(drv_data->queue.next, + struct spi_message, queue); + list_del_init(&drv_data->cur_msg->queue); + + /* Initial message state*/ + drv_data->cur_msg->state = START_STATE; + drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next, + struct spi_transfer, + transfer_list); + + /* prepare to setup the SSP, in pump_transfers, using the per + * chip configuration */ + drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi); + + /* Mark as busy and launch transfers */ + tasklet_schedule(&drv_data->pump_transfers); + + drv_data->busy = 1; + spin_unlock_irqrestore(&drv_data->lock, flags); +} + +static int transfer(struct spi_device *spi, struct spi_message *msg) +{ + struct driver_data *drv_data = spi_master_get_devdata(spi->master); + unsigned long flags; + + spin_lock_irqsave(&drv_data->lock, flags); + + if (drv_data->run == QUEUE_STOPPED) { + spin_unlock_irqrestore(&drv_data->lock, flags); + return -ESHUTDOWN; + } + + msg->actual_length = 0; + msg->status = -EINPROGRESS; + msg->state = START_STATE; + + list_add_tail(&msg->queue, &drv_data->queue); + + if (drv_data->run == QUEUE_RUNNING && !drv_data->busy) + queue_work(drv_data->workqueue, &drv_data->pump_messages); + + spin_unlock_irqrestore(&drv_data->lock, flags); + + return 0; +} + +static int setup_cs(struct spi_device *spi, struct chip_data *chip, + struct pxa2xx_spi_chip *chip_info) +{ + int err = 0; + + if (chip == NULL || chip_info == NULL) + return 0; + + /* NOTE: setup() can be called multiple times, possibly with + * different chip_info, release previously requested GPIO + */ + if (gpio_is_valid(chip->gpio_cs)) + gpio_free(chip->gpio_cs); + + /* If (*cs_control) is provided, ignore GPIO chip select */ + if (chip_info->cs_control) { + chip->cs_control = chip_info->cs_control; + return 0; + } + + if (gpio_is_valid(chip_info->gpio_cs)) { + err = gpio_request(chip_info->gpio_cs, "SPI_CS"); + if (err) { + dev_err(&spi->dev, "failed to request chip select " + "GPIO%d\n", chip_info->gpio_cs); + return err; + } + + chip->gpio_cs = chip_info->gpio_cs; + chip->gpio_cs_inverted = spi->mode & SPI_CS_HIGH; + + err = gpio_direction_output(chip->gpio_cs, + !chip->gpio_cs_inverted); + } + + return err; +} + +static int setup(struct spi_device *spi) +{ + struct pxa2xx_spi_chip *chip_info = NULL; + struct chip_data *chip; + struct driver_data *drv_data = spi_master_get_devdata(spi->master); + struct ssp_device *ssp = drv_data->ssp; + unsigned int clk_div; + uint tx_thres = TX_THRESH_DFLT; + uint rx_thres = RX_THRESH_DFLT; + + if (!pxa25x_ssp_comp(drv_data) + && (spi->bits_per_word < 4 || spi->bits_per_word > 32)) { + dev_err(&spi->dev, "failed setup: ssp_type=%d, bits/wrd=%d " + "b/w not 4-32 for type non-PXA25x_SSP\n", + drv_data->ssp_type, spi->bits_per_word); + return -EINVAL; + } else if (pxa25x_ssp_comp(drv_data) + && (spi->bits_per_word < 4 + || spi->bits_per_word > 16)) { + dev_err(&spi->dev, "failed setup: ssp_type=%d, bits/wrd=%d " + "b/w not 4-16 for type PXA25x_SSP\n", + drv_data->ssp_type, spi->bits_per_word); + return -EINVAL; + } + + /* Only alloc on first setup */ + chip = spi_get_ctldata(spi); + if (!chip) { + chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); + if (!chip) { + dev_err(&spi->dev, + "failed setup: can't allocate chip data\n"); + return -ENOMEM; + } + + if (drv_data->ssp_type == CE4100_SSP) { + if (spi->chip_select > 4) { + dev_err(&spi->dev, "failed setup: " + "cs number must not be > 4.\n"); + kfree(chip); + return -EINVAL; + } + + chip->frm = spi->chip_select; + } else + chip->gpio_cs = -1; + chip->enable_dma = 0; + chip->timeout = TIMOUT_DFLT; + chip->dma_burst_size = drv_data->master_info->enable_dma ? + DCMD_BURST8 : 0; + } + + /* protocol drivers may change the chip settings, so... + * if chip_info exists, use it */ + chip_info = spi->controller_data; + + /* chip_info isn't always needed */ + chip->cr1 = 0; + if (chip_info) { + if (chip_info->timeout) + chip->timeout = chip_info->timeout; + if (chip_info->tx_threshold) + tx_thres = chip_info->tx_threshold; + if (chip_info->rx_threshold) + rx_thres = chip_info->rx_threshold; + chip->enable_dma = drv_data->master_info->enable_dma; + chip->dma_threshold = 0; + if (chip_info->enable_loopback) + chip->cr1 = SSCR1_LBM; + } + + chip->threshold = (SSCR1_RxTresh(rx_thres) & SSCR1_RFT) | + (SSCR1_TxTresh(tx_thres) & SSCR1_TFT); + + /* set dma burst and threshold outside of chip_info path so that if + * chip_info goes away after setting chip->enable_dma, the + * burst and threshold can still respond to changes in bits_per_word */ + if (chip->enable_dma) { + /* set up legal burst and threshold for dma */ + if (set_dma_burst_and_threshold(chip, spi, spi->bits_per_word, + &chip->dma_burst_size, + &chip->dma_threshold)) { + dev_warn(&spi->dev, "in setup: DMA burst size reduced " + "to match bits_per_word\n"); + } + } + + clk_div = ssp_get_clk_div(ssp, spi->max_speed_hz); + chip->speed_hz = spi->max_speed_hz; + + chip->cr0 = clk_div + | SSCR0_Motorola + | SSCR0_DataSize(spi->bits_per_word > 16 ? + spi->bits_per_word - 16 : spi->bits_per_word) + | SSCR0_SSE + | (spi->bits_per_word > 16 ? SSCR0_EDSS : 0); + chip->cr1 &= ~(SSCR1_SPO | SSCR1_SPH); + chip->cr1 |= (((spi->mode & SPI_CPHA) != 0) ? SSCR1_SPH : 0) + | (((spi->mode & SPI_CPOL) != 0) ? SSCR1_SPO : 0); + + /* NOTE: PXA25x_SSP _could_ use external clocking ... */ + if (!pxa25x_ssp_comp(drv_data)) + dev_dbg(&spi->dev, "%ld Hz actual, %s\n", + clk_get_rate(ssp->clk) + / (1 + ((chip->cr0 & SSCR0_SCR(0xfff)) >> 8)), + chip->enable_dma ? "DMA" : "PIO"); + else + dev_dbg(&spi->dev, "%ld Hz actual, %s\n", + clk_get_rate(ssp->clk) / 2 + / (1 + ((chip->cr0 & SSCR0_SCR(0x0ff)) >> 8)), + chip->enable_dma ? "DMA" : "PIO"); + + if (spi->bits_per_word <= 8) { + chip->n_bytes = 1; + chip->dma_width = DCMD_WIDTH1; + chip->read = u8_reader; + chip->write = u8_writer; + } else if (spi->bits_per_word <= 16) { + chip->n_bytes = 2; + chip->dma_width = DCMD_WIDTH2; + chip->read = u16_reader; + chip->write = u16_writer; + } else if (spi->bits_per_word <= 32) { + chip->cr0 |= SSCR0_EDSS; + chip->n_bytes = 4; + chip->dma_width = DCMD_WIDTH4; + chip->read = u32_reader; + chip->write = u32_writer; + } else { + dev_err(&spi->dev, "invalid wordsize\n"); + return -ENODEV; + } + chip->bits_per_word = spi->bits_per_word; + + spi_set_ctldata(spi, chip); + + if (drv_data->ssp_type == CE4100_SSP) + return 0; + + return setup_cs(spi, chip, chip_info); +} + +static void cleanup(struct spi_device *spi) +{ + struct chip_data *chip = spi_get_ctldata(spi); + struct driver_data *drv_data = spi_master_get_devdata(spi->master); + + if (!chip) + return; + + if (drv_data->ssp_type != CE4100_SSP && gpio_is_valid(chip->gpio_cs)) + gpio_free(chip->gpio_cs); + + kfree(chip); +} + +static int __devinit init_queue(struct driver_data *drv_data) +{ + INIT_LIST_HEAD(&drv_data->queue); + spin_lock_init(&drv_data->lock); + + drv_data->run = QUEUE_STOPPED; + drv_data->busy = 0; + + tasklet_init(&drv_data->pump_transfers, + pump_transfers, (unsigned long)drv_data); + + INIT_WORK(&drv_data->pump_messages, pump_messages); + drv_data->workqueue = create_singlethread_workqueue( + dev_name(drv_data->master->dev.parent)); + if (drv_data->workqueue == NULL) + return -EBUSY; + + return 0; +} + +static int start_queue(struct driver_data *drv_data) +{ + unsigned long flags; + + spin_lock_irqsave(&drv_data->lock, flags); + + if (drv_data->run == QUEUE_RUNNING || drv_data->busy) { + spin_unlock_irqrestore(&drv_data->lock, flags); + return -EBUSY; + } + + drv_data->run = QUEUE_RUNNING; + drv_data->cur_msg = NULL; + drv_data->cur_transfer = NULL; + drv_data->cur_chip = NULL; + spin_unlock_irqrestore(&drv_data->lock, flags); + + queue_work(drv_data->workqueue, &drv_data->pump_messages); + + return 0; +} + +static int stop_queue(struct driver_data *drv_data) +{ + unsigned long flags; + unsigned limit = 500; + int status = 0; + + spin_lock_irqsave(&drv_data->lock, flags); + + /* This is a bit lame, but is optimized for the common execution path. + * A wait_queue on the drv_data->busy could be used, but then the common + * execution path (pump_messages) would be required to call wake_up or + * friends on every SPI message. Do this instead */ + drv_data->run = QUEUE_STOPPED; + while ((!list_empty(&drv_data->queue) || drv_data->busy) && limit--) { + spin_unlock_irqrestore(&drv_data->lock, flags); + msleep(10); + spin_lock_irqsave(&drv_data->lock, flags); + } + + if (!list_empty(&drv_data->queue) || drv_data->busy) + status = -EBUSY; + + spin_unlock_irqrestore(&drv_data->lock, flags); + + return status; +} + +static int destroy_queue(struct driver_data *drv_data) +{ + int status; + + status = stop_queue(drv_data); + /* we are unloading the module or failing to load (only two calls + * to this routine), and neither call can handle a return value. + * However, destroy_workqueue calls flush_workqueue, and that will + * block until all work is done. If the reason that stop_queue + * timed out is that the work will never finish, then it does no + * good to call destroy_workqueue, so return anyway. */ + if (status != 0) + return status; + + destroy_workqueue(drv_data->workqueue); + + return 0; +} + +static int __devinit pxa2xx_spi_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct pxa2xx_spi_master *platform_info; + struct spi_master *master; + struct driver_data *drv_data; + struct ssp_device *ssp; + int status; + + platform_info = dev->platform_data; + + ssp = pxa_ssp_request(pdev->id, pdev->name); + if (ssp == NULL) { + dev_err(&pdev->dev, "failed to request SSP%d\n", pdev->id); + return -ENODEV; + } + + /* Allocate master with space for drv_data and null dma buffer */ + master = spi_alloc_master(dev, sizeof(struct driver_data) + 16); + if (!master) { + dev_err(&pdev->dev, "cannot alloc spi_master\n"); + pxa_ssp_free(ssp); + return -ENOMEM; + } + drv_data = spi_master_get_devdata(master); + drv_data->master = master; + drv_data->master_info = platform_info; + drv_data->pdev = pdev; + drv_data->ssp = ssp; + + master->dev.parent = &pdev->dev; + master->dev.of_node = pdev->dev.of_node; + /* the spi->mode bits understood by this driver: */ + master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; + + master->bus_num = pdev->id; + master->num_chipselect = platform_info->num_chipselect; + master->dma_alignment = DMA_ALIGNMENT; + master->cleanup = cleanup; + master->setup = setup; + master->transfer = transfer; + + drv_data->ssp_type = ssp->type; + drv_data->null_dma_buf = (u32 *)ALIGN((u32)(drv_data + + sizeof(struct driver_data)), 8); + + drv_data->ioaddr = ssp->mmio_base; + drv_data->ssdr_physical = ssp->phys_base + SSDR; + if (pxa25x_ssp_comp(drv_data)) { + drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE; + drv_data->dma_cr1 = 0; + drv_data->clear_sr = SSSR_ROR; + drv_data->mask_sr = SSSR_RFS | SSSR_TFS | SSSR_ROR; + } else { + drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE | SSCR1_TINTE; + drv_data->dma_cr1 = SSCR1_TSRE | SSCR1_RSRE | SSCR1_TINTE; + drv_data->clear_sr = SSSR_ROR | SSSR_TINT; + drv_data->mask_sr = SSSR_TINT | SSSR_RFS | SSSR_TFS | SSSR_ROR; + } + + status = request_irq(ssp->irq, ssp_int, IRQF_SHARED, dev_name(dev), + drv_data); + if (status < 0) { + dev_err(&pdev->dev, "cannot get IRQ %d\n", ssp->irq); + goto out_error_master_alloc; + } + + /* Setup DMA if requested */ + drv_data->tx_channel = -1; + drv_data->rx_channel = -1; + if (platform_info->enable_dma) { + + /* Get two DMA channels (rx and tx) */ + drv_data->rx_channel = pxa_request_dma("pxa2xx_spi_ssp_rx", + DMA_PRIO_HIGH, + dma_handler, + drv_data); + if (drv_data->rx_channel < 0) { + dev_err(dev, "problem (%d) requesting rx channel\n", + drv_data->rx_channel); + status = -ENODEV; + goto out_error_irq_alloc; + } + drv_data->tx_channel = pxa_request_dma("pxa2xx_spi_ssp_tx", + DMA_PRIO_MEDIUM, + dma_handler, + drv_data); + if (drv_data->tx_channel < 0) { + dev_err(dev, "problem (%d) requesting tx channel\n", + drv_data->tx_channel); + status = -ENODEV; + goto out_error_dma_alloc; + } + + DRCMR(ssp->drcmr_rx) = DRCMR_MAPVLD | drv_data->rx_channel; + DRCMR(ssp->drcmr_tx) = DRCMR_MAPVLD | drv_data->tx_channel; + } + + /* Enable SOC clock */ + clk_enable(ssp->clk); + + /* Load default SSP configuration */ + write_SSCR0(0, drv_data->ioaddr); + write_SSCR1(SSCR1_RxTresh(RX_THRESH_DFLT) | + SSCR1_TxTresh(TX_THRESH_DFLT), + drv_data->ioaddr); + write_SSCR0(SSCR0_SCR(2) + | SSCR0_Motorola + | SSCR0_DataSize(8), + drv_data->ioaddr); + if (!pxa25x_ssp_comp(drv_data)) + write_SSTO(0, drv_data->ioaddr); + write_SSPSP(0, drv_data->ioaddr); + + /* Initial and start queue */ + status = init_queue(drv_data); + if (status != 0) { + dev_err(&pdev->dev, "problem initializing queue\n"); + goto out_error_clock_enabled; + } + status = start_queue(drv_data); + if (status != 0) { + dev_err(&pdev->dev, "problem starting queue\n"); + goto out_error_clock_enabled; + } + + /* Register with the SPI framework */ + platform_set_drvdata(pdev, drv_data); + status = spi_register_master(master); + if (status != 0) { + dev_err(&pdev->dev, "problem registering spi master\n"); + goto out_error_queue_alloc; + } + + return status; + +out_error_queue_alloc: + destroy_queue(drv_data); + +out_error_clock_enabled: + clk_disable(ssp->clk); + +out_error_dma_alloc: + if (drv_data->tx_channel != -1) + pxa_free_dma(drv_data->tx_channel); + if (drv_data->rx_channel != -1) + pxa_free_dma(drv_data->rx_channel); + +out_error_irq_alloc: + free_irq(ssp->irq, drv_data); + +out_error_master_alloc: + spi_master_put(master); + pxa_ssp_free(ssp); + return status; +} + +static int pxa2xx_spi_remove(struct platform_device *pdev) +{ + struct driver_data *drv_data = platform_get_drvdata(pdev); + struct ssp_device *ssp; + int status = 0; + + if (!drv_data) + return 0; + ssp = drv_data->ssp; + + /* Remove the queue */ + status = destroy_queue(drv_data); + if (status != 0) + /* the kernel does not check the return status of this + * this routine (mod->exit, within the kernel). Therefore + * nothing is gained by returning from here, the module is + * going away regardless, and we should not leave any more + * resources allocated than necessary. We cannot free the + * message memory in drv_data->queue, but we can release the + * resources below. I think the kernel should honor -EBUSY + * returns but... */ + dev_err(&pdev->dev, "pxa2xx_spi_remove: workqueue will not " + "complete, message memory not freed\n"); + + /* Disable the SSP at the peripheral and SOC level */ + write_SSCR0(0, drv_data->ioaddr); + clk_disable(ssp->clk); + + /* Release DMA */ + if (drv_data->master_info->enable_dma) { + DRCMR(ssp->drcmr_rx) = 0; + DRCMR(ssp->drcmr_tx) = 0; + pxa_free_dma(drv_data->tx_channel); + pxa_free_dma(drv_data->rx_channel); + } + + /* Release IRQ */ + free_irq(ssp->irq, drv_data); + + /* Release SSP */ + pxa_ssp_free(ssp); + + /* Disconnect from the SPI framework */ + spi_unregister_master(drv_data->master); + + /* Prevent double remove */ + platform_set_drvdata(pdev, NULL); + + return 0; +} + +static void pxa2xx_spi_shutdown(struct platform_device *pdev) +{ + int status = 0; + + if ((status = pxa2xx_spi_remove(pdev)) != 0) + dev_err(&pdev->dev, "shutdown failed with %d\n", status); +} + +#ifdef CONFIG_PM +static int pxa2xx_spi_suspend(struct device *dev) +{ + struct driver_data *drv_data = dev_get_drvdata(dev); + struct ssp_device *ssp = drv_data->ssp; + int status = 0; + + status = stop_queue(drv_data); + if (status != 0) + return status; + write_SSCR0(0, drv_data->ioaddr); + clk_disable(ssp->clk); + + return 0; +} + +static int pxa2xx_spi_resume(struct device *dev) +{ + struct driver_data *drv_data = dev_get_drvdata(dev); + struct ssp_device *ssp = drv_data->ssp; + int status = 0; + + if (drv_data->rx_channel != -1) + DRCMR(drv_data->ssp->drcmr_rx) = + DRCMR_MAPVLD | drv_data->rx_channel; + if (drv_data->tx_channel != -1) + DRCMR(drv_data->ssp->drcmr_tx) = + DRCMR_MAPVLD | drv_data->tx_channel; + + /* Enable the SSP clock */ + clk_enable(ssp->clk); + + /* Start the queue running */ + status = start_queue(drv_data); + if (status != 0) { + dev_err(dev, "problem starting queue (%d)\n", status); + return status; + } + + return 0; +} + +static const struct dev_pm_ops pxa2xx_spi_pm_ops = { + .suspend = pxa2xx_spi_suspend, + .resume = pxa2xx_spi_resume, +}; +#endif + +static struct platform_driver driver = { + .driver = { + .name = "pxa2xx-spi", + .owner = THIS_MODULE, +#ifdef CONFIG_PM + .pm = &pxa2xx_spi_pm_ops, +#endif + }, + .probe = pxa2xx_spi_probe, + .remove = pxa2xx_spi_remove, + .shutdown = pxa2xx_spi_shutdown, +}; + +static int __init pxa2xx_spi_init(void) +{ + return platform_driver_register(&driver); +} +subsys_initcall(pxa2xx_spi_init); + +static void __exit pxa2xx_spi_exit(void) +{ + platform_driver_unregister(&driver); +} +module_exit(pxa2xx_spi_exit); diff --git a/drivers/spi/spi-s3c24xx-fiq.S b/drivers/spi/spi-s3c24xx-fiq.S new file mode 100644 index 0000000..059f2dc --- /dev/null +++ b/drivers/spi/spi-s3c24xx-fiq.S @@ -0,0 +1,116 @@ +/* linux/drivers/spi/spi_s3c24xx_fiq.S + * + * Copyright 2009 Simtec Electronics + * Ben Dooks + * + * S3C24XX SPI - FIQ pseudo-DMA transfer code + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#include +#include + +#include +#include +#include + +#include "spi-s3c24xx-fiq.h" + + .text + + @ entry to these routines is as follows, with the register names + @ defined in fiq.h so that they can be shared with the C files which + @ setup the calling registers. + @ + @ fiq_rirq The base of the IRQ registers to find S3C2410_SRCPND + @ fiq_rtmp Temporary register to hold tx/rx data + @ fiq_rspi The base of the SPI register block + @ fiq_rtx The tx buffer pointer + @ fiq_rrx The rx buffer pointer + @ fiq_rcount The number of bytes to move + + @ each entry starts with a word entry of how long it is + @ and an offset to the irq acknowledgment word + +ENTRY(s3c24xx_spi_fiq_rx) +s3c24xx_spi_fix_rx: + .word fiq_rx_end - fiq_rx_start + .word fiq_rx_irq_ack - fiq_rx_start +fiq_rx_start: + ldr fiq_rtmp, fiq_rx_irq_ack + str fiq_rtmp, [ fiq_rirq, # S3C2410_SRCPND - S3C24XX_VA_IRQ ] + + ldrb fiq_rtmp, [ fiq_rspi, # S3C2410_SPRDAT ] + strb fiq_rtmp, [ fiq_rrx ], #1 + + mov fiq_rtmp, #0xff + strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ] + + subs fiq_rcount, fiq_rcount, #1 + subnes pc, lr, #4 @@ return, still have work to do + + @@ set IRQ controller so that next op will trigger IRQ + mov fiq_rtmp, #0 + str fiq_rtmp, [ fiq_rirq, # S3C2410_INTMOD - S3C24XX_VA_IRQ ] + subs pc, lr, #4 + +fiq_rx_irq_ack: + .word 0 +fiq_rx_end: + +ENTRY(s3c24xx_spi_fiq_txrx) +s3c24xx_spi_fiq_txrx: + .word fiq_txrx_end - fiq_txrx_start + .word fiq_txrx_irq_ack - fiq_txrx_start +fiq_txrx_start: + + ldrb fiq_rtmp, [ fiq_rspi, # S3C2410_SPRDAT ] + strb fiq_rtmp, [ fiq_rrx ], #1 + + ldr fiq_rtmp, fiq_txrx_irq_ack + str fiq_rtmp, [ fiq_rirq, # S3C2410_SRCPND - S3C24XX_VA_IRQ ] + + ldrb fiq_rtmp, [ fiq_rtx ], #1 + strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ] + + subs fiq_rcount, fiq_rcount, #1 + subnes pc, lr, #4 @@ return, still have work to do + + mov fiq_rtmp, #0 + str fiq_rtmp, [ fiq_rirq, # S3C2410_INTMOD - S3C24XX_VA_IRQ ] + subs pc, lr, #4 + +fiq_txrx_irq_ack: + .word 0 + +fiq_txrx_end: + +ENTRY(s3c24xx_spi_fiq_tx) +s3c24xx_spi_fix_tx: + .word fiq_tx_end - fiq_tx_start + .word fiq_tx_irq_ack - fiq_tx_start +fiq_tx_start: + ldrb fiq_rtmp, [ fiq_rspi, # S3C2410_SPRDAT ] + + ldr fiq_rtmp, fiq_tx_irq_ack + str fiq_rtmp, [ fiq_rirq, # S3C2410_SRCPND - S3C24XX_VA_IRQ ] + + ldrb fiq_rtmp, [ fiq_rtx ], #1 + strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ] + + subs fiq_rcount, fiq_rcount, #1 + subnes pc, lr, #4 @@ return, still have work to do + + mov fiq_rtmp, #0 + str fiq_rtmp, [ fiq_rirq, # S3C2410_INTMOD - S3C24XX_VA_IRQ ] + subs pc, lr, #4 + +fiq_tx_irq_ack: + .word 0 + +fiq_tx_end: + + .end diff --git a/drivers/spi/spi-s3c24xx-fiq.h b/drivers/spi/spi-s3c24xx-fiq.h new file mode 100644 index 0000000..a5950bb --- /dev/null +++ b/drivers/spi/spi-s3c24xx-fiq.h @@ -0,0 +1,26 @@ +/* linux/drivers/spi/spi_s3c24xx_fiq.h + * + * Copyright 2009 Simtec Electronics + * Ben Dooks + * + * S3C24XX SPI - FIQ pseudo-DMA transfer support + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +/* We have R8 through R13 to play with */ + +#ifdef __ASSEMBLY__ +#define __REG_NR(x) r##x +#else +#define __REG_NR(x) (x) +#endif + +#define fiq_rspi __REG_NR(8) +#define fiq_rtmp __REG_NR(9) +#define fiq_rrx __REG_NR(10) +#define fiq_rtx __REG_NR(11) +#define fiq_rcount __REG_NR(12) +#define fiq_rirq __REG_NR(13) diff --git a/drivers/spi/spi-s3c24xx-gpio.c b/drivers/spi/spi-s3c24xx-gpio.c new file mode 100644 index 0000000..2d3c085 --- /dev/null +++ b/drivers/spi/spi-s3c24xx-gpio.c @@ -0,0 +1,200 @@ +/* + * Copyright (c) 2006 Ben Dooks + * Copyright (c) 2006 Simtec Electronics + * + * S3C24XX GPIO based SPI driver + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * +*/ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include + +struct s3c2410_spigpio { + struct spi_bitbang bitbang; + + struct s3c2410_spigpio_info *info; + struct platform_device *dev; +}; + +static inline struct s3c2410_spigpio *spidev_to_sg(struct spi_device *spi) +{ + return spi_master_get_devdata(spi->master); +} + +static inline void setsck(struct spi_device *dev, int on) +{ + struct s3c2410_spigpio *sg = spidev_to_sg(dev); + s3c2410_gpio_setpin(sg->info->pin_clk, on ? 1 : 0); +} + +static inline void setmosi(struct spi_device *dev, int on) +{ + struct s3c2410_spigpio *sg = spidev_to_sg(dev); + s3c2410_gpio_setpin(sg->info->pin_mosi, on ? 1 : 0); +} + +static inline u32 getmiso(struct spi_device *dev) +{ + struct s3c2410_spigpio *sg = spidev_to_sg(dev); + return s3c2410_gpio_getpin(sg->info->pin_miso) ? 1 : 0; +} + +#define spidelay(x) ndelay(x) + +#include "spi-bitbang-txrx.h" + + +static u32 s3c2410_spigpio_txrx_mode0(struct spi_device *spi, + unsigned nsecs, u32 word, u8 bits) +{ + return bitbang_txrx_be_cpha0(spi, nsecs, 0, 0, word, bits); +} + +static u32 s3c2410_spigpio_txrx_mode1(struct spi_device *spi, + unsigned nsecs, u32 word, u8 bits) +{ + return bitbang_txrx_be_cpha1(spi, nsecs, 0, 0, word, bits); +} + +static u32 s3c2410_spigpio_txrx_mode2(struct spi_device *spi, + unsigned nsecs, u32 word, u8 bits) +{ + return bitbang_txrx_be_cpha0(spi, nsecs, 1, 0, word, bits); +} + +static u32 s3c2410_spigpio_txrx_mode3(struct spi_device *spi, + unsigned nsecs, u32 word, u8 bits) +{ + return bitbang_txrx_be_cpha1(spi, nsecs, 1, 0, word, bits); +} + + +static void s3c2410_spigpio_chipselect(struct spi_device *dev, int value) +{ + struct s3c2410_spigpio *sg = spidev_to_sg(dev); + + if (sg->info && sg->info->chip_select) + (sg->info->chip_select)(sg->info, value); +} + +static int s3c2410_spigpio_probe(struct platform_device *dev) +{ + struct s3c2410_spigpio_info *info; + struct spi_master *master; + struct s3c2410_spigpio *sp; + int ret; + + master = spi_alloc_master(&dev->dev, sizeof(struct s3c2410_spigpio)); + if (master == NULL) { + dev_err(&dev->dev, "failed to allocate spi master\n"); + ret = -ENOMEM; + goto err; + } + + sp = spi_master_get_devdata(master); + + platform_set_drvdata(dev, sp); + + /* copy in the plkatform data */ + info = sp->info = dev->dev.platform_data; + + /* setup spi bitbang adaptor */ + sp->bitbang.master = spi_master_get(master); + sp->bitbang.master->bus_num = info->bus_num; + sp->bitbang.master->num_chipselect = info->num_chipselect; + sp->bitbang.chipselect = s3c2410_spigpio_chipselect; + + sp->bitbang.txrx_word[SPI_MODE_0] = s3c2410_spigpio_txrx_mode0; + sp->bitbang.txrx_word[SPI_MODE_1] = s3c2410_spigpio_txrx_mode1; + sp->bitbang.txrx_word[SPI_MODE_2] = s3c2410_spigpio_txrx_mode2; + sp->bitbang.txrx_word[SPI_MODE_3] = s3c2410_spigpio_txrx_mode3; + + /* set state of spi pins, always assume that the clock is + * available, but do check the MOSI and MISO. */ + s3c2410_gpio_setpin(info->pin_clk, 0); + s3c2410_gpio_cfgpin(info->pin_clk, S3C2410_GPIO_OUTPUT); + + if (info->pin_mosi < S3C2410_GPH10) { + s3c2410_gpio_setpin(info->pin_mosi, 0); + s3c2410_gpio_cfgpin(info->pin_mosi, S3C2410_GPIO_OUTPUT); + } + + if (info->pin_miso != S3C2410_GPA0 && info->pin_miso < S3C2410_GPH10) + s3c2410_gpio_cfgpin(info->pin_miso, S3C2410_GPIO_INPUT); + + ret = spi_bitbang_start(&sp->bitbang); + if (ret) + goto err_no_bitbang; + + return 0; + + err_no_bitbang: + spi_master_put(sp->bitbang.master); + err: + return ret; + +} + +static int s3c2410_spigpio_remove(struct platform_device *dev) +{ + struct s3c2410_spigpio *sp = platform_get_drvdata(dev); + + spi_bitbang_stop(&sp->bitbang); + spi_master_put(sp->bitbang.master); + + return 0; +} + +/* all gpio should be held over suspend/resume, so we should + * not need to deal with this +*/ + +#define s3c2410_spigpio_suspend NULL +#define s3c2410_spigpio_resume NULL + +/* work with hotplug and coldplug */ +MODULE_ALIAS("platform:spi_s3c24xx_gpio"); + +static struct platform_driver s3c2410_spigpio_drv = { + .probe = s3c2410_spigpio_probe, + .remove = s3c2410_spigpio_remove, + .suspend = s3c2410_spigpio_suspend, + .resume = s3c2410_spigpio_resume, + .driver = { + .name = "spi_s3c24xx_gpio", + .owner = THIS_MODULE, + }, +}; + +static int __init s3c2410_spigpio_init(void) +{ + return platform_driver_register(&s3c2410_spigpio_drv); +} + +static void __exit s3c2410_spigpio_exit(void) +{ + platform_driver_unregister(&s3c2410_spigpio_drv); +} + +module_init(s3c2410_spigpio_init); +module_exit(s3c2410_spigpio_exit); + +MODULE_DESCRIPTION("S3C24XX SPI Driver"); +MODULE_AUTHOR("Ben Dooks, "); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-s3c24xx.c b/drivers/spi/spi-s3c24xx.c new file mode 100644 index 0000000..1996ac5 --- /dev/null +++ b/drivers/spi/spi-s3c24xx.c @@ -0,0 +1,745 @@ +/* + * Copyright (c) 2006 Ben Dooks + * Copyright 2006-2009 Simtec Electronics + * Ben Dooks + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +#include +#include + +#include "spi-s3c24xx-fiq.h" + +/** + * s3c24xx_spi_devstate - per device data + * @hz: Last frequency calculated for @sppre field. + * @mode: Last mode setting for the @spcon field. + * @spcon: Value to write to the SPCON register. + * @sppre: Value to write to the SPPRE register. + */ +struct s3c24xx_spi_devstate { + unsigned int hz; + unsigned int mode; + u8 spcon; + u8 sppre; +}; + +enum spi_fiq_mode { + FIQ_MODE_NONE = 0, + FIQ_MODE_TX = 1, + FIQ_MODE_RX = 2, + FIQ_MODE_TXRX = 3, +}; + +struct s3c24xx_spi { + /* bitbang has to be first */ + struct spi_bitbang bitbang; + struct completion done; + + void __iomem *regs; + int irq; + int len; + int count; + + struct fiq_handler fiq_handler; + enum spi_fiq_mode fiq_mode; + unsigned char fiq_inuse; + unsigned char fiq_claimed; + + void (*set_cs)(struct s3c2410_spi_info *spi, + int cs, int pol); + + /* data buffers */ + const unsigned char *tx; + unsigned char *rx; + + struct clk *clk; + struct resource *ioarea; + struct spi_master *master; + struct spi_device *curdev; + struct device *dev; + struct s3c2410_spi_info *pdata; +}; + + +#define SPCON_DEFAULT (S3C2410_SPCON_MSTR | S3C2410_SPCON_SMOD_INT) +#define SPPIN_DEFAULT (S3C2410_SPPIN_KEEP) + +static inline struct s3c24xx_spi *to_hw(struct spi_device *sdev) +{ + return spi_master_get_devdata(sdev->master); +} + +static void s3c24xx_spi_gpiocs(struct s3c2410_spi_info *spi, int cs, int pol) +{ + gpio_set_value(spi->pin_cs, pol); +} + +static void s3c24xx_spi_chipsel(struct spi_device *spi, int value) +{ + struct s3c24xx_spi_devstate *cs = spi->controller_state; + struct s3c24xx_spi *hw = to_hw(spi); + unsigned int cspol = spi->mode & SPI_CS_HIGH ? 1 : 0; + + /* change the chipselect state and the state of the spi engine clock */ + + switch (value) { + case BITBANG_CS_INACTIVE: + hw->set_cs(hw->pdata, spi->chip_select, cspol^1); + writeb(cs->spcon, hw->regs + S3C2410_SPCON); + break; + + case BITBANG_CS_ACTIVE: + writeb(cs->spcon | S3C2410_SPCON_ENSCK, + hw->regs + S3C2410_SPCON); + hw->set_cs(hw->pdata, spi->chip_select, cspol); + break; + } +} + +static int s3c24xx_spi_update_state(struct spi_device *spi, + struct spi_transfer *t) +{ + struct s3c24xx_spi *hw = to_hw(spi); + struct s3c24xx_spi_devstate *cs = spi->controller_state; + unsigned int bpw; + unsigned int hz; + unsigned int div; + unsigned long clk; + + bpw = t ? t->bits_per_word : spi->bits_per_word; + hz = t ? t->speed_hz : spi->max_speed_hz; + + if (!bpw) + bpw = 8; + + if (!hz) + hz = spi->max_speed_hz; + + if (bpw != 8) { + dev_err(&spi->dev, "invalid bits-per-word (%d)\n", bpw); + return -EINVAL; + } + + if (spi->mode != cs->mode) { + u8 spcon = SPCON_DEFAULT | S3C2410_SPCON_ENSCK; + + if (spi->mode & SPI_CPHA) + spcon |= S3C2410_SPCON_CPHA_FMTB; + + if (spi->mode & SPI_CPOL) + spcon |= S3C2410_SPCON_CPOL_HIGH; + + cs->mode = spi->mode; + cs->spcon = spcon; + } + + if (cs->hz != hz) { + clk = clk_get_rate(hw->clk); + div = DIV_ROUND_UP(clk, hz * 2) - 1; + + if (div > 255) + div = 255; + + dev_dbg(&spi->dev, "pre-scaler=%d (wanted %d, got %ld)\n", + div, hz, clk / (2 * (div + 1))); + + cs->hz = hz; + cs->sppre = div; + } + + return 0; +} + +static int s3c24xx_spi_setupxfer(struct spi_device *spi, + struct spi_transfer *t) +{ + struct s3c24xx_spi_devstate *cs = spi->controller_state; + struct s3c24xx_spi *hw = to_hw(spi); + int ret; + + ret = s3c24xx_spi_update_state(spi, t); + if (!ret) + writeb(cs->sppre, hw->regs + S3C2410_SPPRE); + + return ret; +} + +static int s3c24xx_spi_setup(struct spi_device *spi) +{ + struct s3c24xx_spi_devstate *cs = spi->controller_state; + struct s3c24xx_spi *hw = to_hw(spi); + int ret; + + /* allocate settings on the first call */ + if (!cs) { + cs = kzalloc(sizeof(struct s3c24xx_spi_devstate), GFP_KERNEL); + if (!cs) { + dev_err(&spi->dev, "no memory for controller state\n"); + return -ENOMEM; + } + + cs->spcon = SPCON_DEFAULT; + cs->hz = -1; + spi->controller_state = cs; + } + + /* initialise the state from the device */ + ret = s3c24xx_spi_update_state(spi, NULL); + if (ret) + return ret; + + spin_lock(&hw->bitbang.lock); + if (!hw->bitbang.busy) { + hw->bitbang.chipselect(spi, BITBANG_CS_INACTIVE); + /* need to ndelay for 0.5 clocktick ? */ + } + spin_unlock(&hw->bitbang.lock); + + return 0; +} + +static void s3c24xx_spi_cleanup(struct spi_device *spi) +{ + kfree(spi->controller_state); +} + +static inline unsigned int hw_txbyte(struct s3c24xx_spi *hw, int count) +{ + return hw->tx ? hw->tx[count] : 0; +} + +#ifdef CONFIG_SPI_S3C24XX_FIQ +/* Support for FIQ based pseudo-DMA to improve the transfer speed. + * + * This code uses the assembly helper in spi_s3c24xx_spi.S which is + * used by the FIQ core to move data between main memory and the peripheral + * block. Since this is code running on the processor, there is no problem + * with cache coherency of the buffers, so we can use any buffer we like. + */ + +/** + * struct spi_fiq_code - FIQ code and header + * @length: The length of the code fragment, excluding this header. + * @ack_offset: The offset from @data to the word to place the IRQ ACK bit at. + * @data: The code itself to install as a FIQ handler. + */ +struct spi_fiq_code { + u32 length; + u32 ack_offset; + u8 data[0]; +}; + +extern struct spi_fiq_code s3c24xx_spi_fiq_txrx; +extern struct spi_fiq_code s3c24xx_spi_fiq_tx; +extern struct spi_fiq_code s3c24xx_spi_fiq_rx; + +/** + * ack_bit - turn IRQ into IRQ acknowledgement bit + * @irq: The interrupt number + * + * Returns the bit to write to the interrupt acknowledge register. + */ +static inline u32 ack_bit(unsigned int irq) +{ + return 1 << (irq - IRQ_EINT0); +} + +/** + * s3c24xx_spi_tryfiq - attempt to claim and setup FIQ for transfer + * @hw: The hardware state. + * + * Claim the FIQ handler (only one can be active at any one time) and + * then setup the correct transfer code for this transfer. + * + * This call updates all the necessary state information if successful, + * so the caller does not need to do anything more than start the transfer + * as normal, since the IRQ will have been re-routed to the FIQ handler. +*/ +void s3c24xx_spi_tryfiq(struct s3c24xx_spi *hw) +{ + struct pt_regs regs; + enum spi_fiq_mode mode; + struct spi_fiq_code *code; + int ret; + + if (!hw->fiq_claimed) { + /* try and claim fiq if we haven't got it, and if not + * then return and simply use another transfer method */ + + ret = claim_fiq(&hw->fiq_handler); + if (ret) + return; + } + + if (hw->tx && !hw->rx) + mode = FIQ_MODE_TX; + else if (hw->rx && !hw->tx) + mode = FIQ_MODE_RX; + else + mode = FIQ_MODE_TXRX; + + regs.uregs[fiq_rspi] = (long)hw->regs; + regs.uregs[fiq_rrx] = (long)hw->rx; + regs.uregs[fiq_rtx] = (long)hw->tx + 1; + regs.uregs[fiq_rcount] = hw->len - 1; + regs.uregs[fiq_rirq] = (long)S3C24XX_VA_IRQ; + + set_fiq_regs(®s); + + if (hw->fiq_mode != mode) { + u32 *ack_ptr; + + hw->fiq_mode = mode; + + switch (mode) { + case FIQ_MODE_TX: + code = &s3c24xx_spi_fiq_tx; + break; + case FIQ_MODE_RX: + code = &s3c24xx_spi_fiq_rx; + break; + case FIQ_MODE_TXRX: + code = &s3c24xx_spi_fiq_txrx; + break; + default: + code = NULL; + } + + BUG_ON(!code); + + ack_ptr = (u32 *)&code->data[code->ack_offset]; + *ack_ptr = ack_bit(hw->irq); + + set_fiq_handler(&code->data, code->length); + } + + s3c24xx_set_fiq(hw->irq, true); + + hw->fiq_mode = mode; + hw->fiq_inuse = 1; +} + +/** + * s3c24xx_spi_fiqop - FIQ core code callback + * @pw: Data registered with the handler + * @release: Whether this is a release or a return. + * + * Called by the FIQ code when another module wants to use the FIQ, so + * return whether we are currently using this or not and then update our + * internal state. + */ +static int s3c24xx_spi_fiqop(void *pw, int release) +{ + struct s3c24xx_spi *hw = pw; + int ret = 0; + + if (release) { + if (hw->fiq_inuse) + ret = -EBUSY; + + /* note, we do not need to unroute the FIQ, as the FIQ + * vector code de-routes it to signal the end of transfer */ + + hw->fiq_mode = FIQ_MODE_NONE; + hw->fiq_claimed = 0; + } else { + hw->fiq_claimed = 1; + } + + return ret; +} + +/** + * s3c24xx_spi_initfiq - setup the information for the FIQ core + * @hw: The hardware state. + * + * Setup the fiq_handler block to pass to the FIQ core. + */ +static inline void s3c24xx_spi_initfiq(struct s3c24xx_spi *hw) +{ + hw->fiq_handler.dev_id = hw; + hw->fiq_handler.name = dev_name(hw->dev); + hw->fiq_handler.fiq_op = s3c24xx_spi_fiqop; +} + +/** + * s3c24xx_spi_usefiq - return if we should be using FIQ. + * @hw: The hardware state. + * + * Return true if the platform data specifies whether this channel is + * allowed to use the FIQ. + */ +static inline bool s3c24xx_spi_usefiq(struct s3c24xx_spi *hw) +{ + return hw->pdata->use_fiq; +} + +/** + * s3c24xx_spi_usingfiq - return if channel is using FIQ + * @spi: The hardware state. + * + * Return whether the channel is currently using the FIQ (separate from + * whether the FIQ is claimed). + */ +static inline bool s3c24xx_spi_usingfiq(struct s3c24xx_spi *spi) +{ + return spi->fiq_inuse; +} +#else + +static inline void s3c24xx_spi_initfiq(struct s3c24xx_spi *s) { } +static inline void s3c24xx_spi_tryfiq(struct s3c24xx_spi *s) { } +static inline bool s3c24xx_spi_usefiq(struct s3c24xx_spi *s) { return false; } +static inline bool s3c24xx_spi_usingfiq(struct s3c24xx_spi *s) { return false; } + +#endif /* CONFIG_SPI_S3C24XX_FIQ */ + +static int s3c24xx_spi_txrx(struct spi_device *spi, struct spi_transfer *t) +{ + struct s3c24xx_spi *hw = to_hw(spi); + + hw->tx = t->tx_buf; + hw->rx = t->rx_buf; + hw->len = t->len; + hw->count = 0; + + init_completion(&hw->done); + + hw->fiq_inuse = 0; + if (s3c24xx_spi_usefiq(hw) && t->len >= 3) + s3c24xx_spi_tryfiq(hw); + + /* send the first byte */ + writeb(hw_txbyte(hw, 0), hw->regs + S3C2410_SPTDAT); + + wait_for_completion(&hw->done); + return hw->count; +} + +static irqreturn_t s3c24xx_spi_irq(int irq, void *dev) +{ + struct s3c24xx_spi *hw = dev; + unsigned int spsta = readb(hw->regs + S3C2410_SPSTA); + unsigned int count = hw->count; + + if (spsta & S3C2410_SPSTA_DCOL) { + dev_dbg(hw->dev, "data-collision\n"); + complete(&hw->done); + goto irq_done; + } + + if (!(spsta & S3C2410_SPSTA_READY)) { + dev_dbg(hw->dev, "spi not ready for tx?\n"); + complete(&hw->done); + goto irq_done; + } + + if (!s3c24xx_spi_usingfiq(hw)) { + hw->count++; + + if (hw->rx) + hw->rx[count] = readb(hw->regs + S3C2410_SPRDAT); + + count++; + + if (count < hw->len) + writeb(hw_txbyte(hw, count), hw->regs + S3C2410_SPTDAT); + else + complete(&hw->done); + } else { + hw->count = hw->len; + hw->fiq_inuse = 0; + + if (hw->rx) + hw->rx[hw->len-1] = readb(hw->regs + S3C2410_SPRDAT); + + complete(&hw->done); + } + + irq_done: + return IRQ_HANDLED; +} + +static void s3c24xx_spi_initialsetup(struct s3c24xx_spi *hw) +{ + /* for the moment, permanently enable the clock */ + + clk_enable(hw->clk); + + /* program defaults into the registers */ + + writeb(0xff, hw->regs + S3C2410_SPPRE); + writeb(SPPIN_DEFAULT, hw->regs + S3C2410_SPPIN); + writeb(SPCON_DEFAULT, hw->regs + S3C2410_SPCON); + + if (hw->pdata) { + if (hw->set_cs == s3c24xx_spi_gpiocs) + gpio_direction_output(hw->pdata->pin_cs, 1); + + if (hw->pdata->gpio_setup) + hw->pdata->gpio_setup(hw->pdata, 1); + } +} + +static int __init s3c24xx_spi_probe(struct platform_device *pdev) +{ + struct s3c2410_spi_info *pdata; + struct s3c24xx_spi *hw; + struct spi_master *master; + struct resource *res; + int err = 0; + + master = spi_alloc_master(&pdev->dev, sizeof(struct s3c24xx_spi)); + if (master == NULL) { + dev_err(&pdev->dev, "No memory for spi_master\n"); + err = -ENOMEM; + goto err_nomem; + } + + hw = spi_master_get_devdata(master); + memset(hw, 0, sizeof(struct s3c24xx_spi)); + + hw->master = spi_master_get(master); + hw->pdata = pdata = pdev->dev.platform_data; + hw->dev = &pdev->dev; + + if (pdata == NULL) { + dev_err(&pdev->dev, "No platform data supplied\n"); + err = -ENOENT; + goto err_no_pdata; + } + + platform_set_drvdata(pdev, hw); + init_completion(&hw->done); + + /* initialise fiq handler */ + + s3c24xx_spi_initfiq(hw); + + /* setup the master state. */ + + /* the spi->mode bits understood by this driver: */ + master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; + + master->num_chipselect = hw->pdata->num_cs; + master->bus_num = pdata->bus_num; + + /* setup the state for the bitbang driver */ + + hw->bitbang.master = hw->master; + hw->bitbang.setup_transfer = s3c24xx_spi_setupxfer; + hw->bitbang.chipselect = s3c24xx_spi_chipsel; + hw->bitbang.txrx_bufs = s3c24xx_spi_txrx; + + hw->master->setup = s3c24xx_spi_setup; + hw->master->cleanup = s3c24xx_spi_cleanup; + + dev_dbg(hw->dev, "bitbang at %p\n", &hw->bitbang); + + /* find and map our resources */ + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (res == NULL) { + dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n"); + err = -ENOENT; + goto err_no_iores; + } + + hw->ioarea = request_mem_region(res->start, resource_size(res), + pdev->name); + + if (hw->ioarea == NULL) { + dev_err(&pdev->dev, "Cannot reserve region\n"); + err = -ENXIO; + goto err_no_iores; + } + + hw->regs = ioremap(res->start, resource_size(res)); + if (hw->regs == NULL) { + dev_err(&pdev->dev, "Cannot map IO\n"); + err = -ENXIO; + goto err_no_iomap; + } + + hw->irq = platform_get_irq(pdev, 0); + if (hw->irq < 0) { + dev_err(&pdev->dev, "No IRQ specified\n"); + err = -ENOENT; + goto err_no_irq; + } + + err = request_irq(hw->irq, s3c24xx_spi_irq, 0, pdev->name, hw); + if (err) { + dev_err(&pdev->dev, "Cannot claim IRQ\n"); + goto err_no_irq; + } + + hw->clk = clk_get(&pdev->dev, "spi"); + if (IS_ERR(hw->clk)) { + dev_err(&pdev->dev, "No clock for device\n"); + err = PTR_ERR(hw->clk); + goto err_no_clk; + } + + /* setup any gpio we can */ + + if (!pdata->set_cs) { + if (pdata->pin_cs < 0) { + dev_err(&pdev->dev, "No chipselect pin\n"); + goto err_register; + } + + err = gpio_request(pdata->pin_cs, dev_name(&pdev->dev)); + if (err) { + dev_err(&pdev->dev, "Failed to get gpio for cs\n"); + goto err_register; + } + + hw->set_cs = s3c24xx_spi_gpiocs; + gpio_direction_output(pdata->pin_cs, 1); + } else + hw->set_cs = pdata->set_cs; + + s3c24xx_spi_initialsetup(hw); + + /* register our spi controller */ + + err = spi_bitbang_start(&hw->bitbang); + if (err) { + dev_err(&pdev->dev, "Failed to register SPI master\n"); + goto err_register; + } + + return 0; + + err_register: + if (hw->set_cs == s3c24xx_spi_gpiocs) + gpio_free(pdata->pin_cs); + + clk_disable(hw->clk); + clk_put(hw->clk); + + err_no_clk: + free_irq(hw->irq, hw); + + err_no_irq: + iounmap(hw->regs); + + err_no_iomap: + release_resource(hw->ioarea); + kfree(hw->ioarea); + + err_no_iores: + err_no_pdata: + spi_master_put(hw->master); + + err_nomem: + return err; +} + +static int __exit s3c24xx_spi_remove(struct platform_device *dev) +{ + struct s3c24xx_spi *hw = platform_get_drvdata(dev); + + platform_set_drvdata(dev, NULL); + + spi_bitbang_stop(&hw->bitbang); + + clk_disable(hw->clk); + clk_put(hw->clk); + + free_irq(hw->irq, hw); + iounmap(hw->regs); + + if (hw->set_cs == s3c24xx_spi_gpiocs) + gpio_free(hw->pdata->pin_cs); + + release_resource(hw->ioarea); + kfree(hw->ioarea); + + spi_master_put(hw->master); + return 0; +} + + +#ifdef CONFIG_PM + +static int s3c24xx_spi_suspend(struct device *dev) +{ + struct s3c24xx_spi *hw = platform_get_drvdata(to_platform_device(dev)); + + if (hw->pdata && hw->pdata->gpio_setup) + hw->pdata->gpio_setup(hw->pdata, 0); + + clk_disable(hw->clk); + return 0; +} + +static int s3c24xx_spi_resume(struct device *dev) +{ + struct s3c24xx_spi *hw = platform_get_drvdata(to_platform_device(dev)); + + s3c24xx_spi_initialsetup(hw); + return 0; +} + +static const struct dev_pm_ops s3c24xx_spi_pmops = { + .suspend = s3c24xx_spi_suspend, + .resume = s3c24xx_spi_resume, +}; + +#define S3C24XX_SPI_PMOPS &s3c24xx_spi_pmops +#else +#define S3C24XX_SPI_PMOPS NULL +#endif /* CONFIG_PM */ + +MODULE_ALIAS("platform:s3c2410-spi"); +static struct platform_driver s3c24xx_spi_driver = { + .remove = __exit_p(s3c24xx_spi_remove), + .driver = { + .name = "s3c2410-spi", + .owner = THIS_MODULE, + .pm = S3C24XX_SPI_PMOPS, + }, +}; + +static int __init s3c24xx_spi_init(void) +{ + return platform_driver_probe(&s3c24xx_spi_driver, s3c24xx_spi_probe); +} + +static void __exit s3c24xx_spi_exit(void) +{ + platform_driver_unregister(&s3c24xx_spi_driver); +} + +module_init(s3c24xx_spi_init); +module_exit(s3c24xx_spi_exit); + +MODULE_DESCRIPTION("S3C24XX SPI Driver"); +MODULE_AUTHOR("Ben Dooks, "); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c new file mode 100644 index 0000000..75e3a9b --- /dev/null +++ b/drivers/spi/spi-s3c64xx.c @@ -0,0 +1,1247 @@ +/* + * Copyright (C) 2009 Samsung Electronics Ltd. + * Jaswinder Singh + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +/* Registers and bit-fields */ + +#define S3C64XX_SPI_CH_CFG 0x00 +#define S3C64XX_SPI_CLK_CFG 0x04 +#define S3C64XX_SPI_MODE_CFG 0x08 +#define S3C64XX_SPI_SLAVE_SEL 0x0C +#define S3C64XX_SPI_INT_EN 0x10 +#define S3C64XX_SPI_STATUS 0x14 +#define S3C64XX_SPI_TX_DATA 0x18 +#define S3C64XX_SPI_RX_DATA 0x1C +#define S3C64XX_SPI_PACKET_CNT 0x20 +#define S3C64XX_SPI_PENDING_CLR 0x24 +#define S3C64XX_SPI_SWAP_CFG 0x28 +#define S3C64XX_SPI_FB_CLK 0x2C + +#define S3C64XX_SPI_CH_HS_EN (1<<6) /* High Speed Enable */ +#define S3C64XX_SPI_CH_SW_RST (1<<5) +#define S3C64XX_SPI_CH_SLAVE (1<<4) +#define S3C64XX_SPI_CPOL_L (1<<3) +#define S3C64XX_SPI_CPHA_B (1<<2) +#define S3C64XX_SPI_CH_RXCH_ON (1<<1) +#define S3C64XX_SPI_CH_TXCH_ON (1<<0) + +#define S3C64XX_SPI_CLKSEL_SRCMSK (3<<9) +#define S3C64XX_SPI_CLKSEL_SRCSHFT 9 +#define S3C64XX_SPI_ENCLK_ENABLE (1<<8) +#define S3C64XX_SPI_PSR_MASK 0xff + +#define S3C64XX_SPI_MODE_CH_TSZ_BYTE (0<<29) +#define S3C64XX_SPI_MODE_CH_TSZ_HALFWORD (1<<29) +#define S3C64XX_SPI_MODE_CH_TSZ_WORD (2<<29) +#define S3C64XX_SPI_MODE_CH_TSZ_MASK (3<<29) +#define S3C64XX_SPI_MODE_BUS_TSZ_BYTE (0<<17) +#define S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD (1<<17) +#define S3C64XX_SPI_MODE_BUS_TSZ_WORD (2<<17) +#define S3C64XX_SPI_MODE_BUS_TSZ_MASK (3<<17) +#define S3C64XX_SPI_MODE_RXDMA_ON (1<<2) +#define S3C64XX_SPI_MODE_TXDMA_ON (1<<1) +#define S3C64XX_SPI_MODE_4BURST (1<<0) + +#define S3C64XX_SPI_SLAVE_AUTO (1<<1) +#define S3C64XX_SPI_SLAVE_SIG_INACT (1<<0) + +#define S3C64XX_SPI_ACT(c) writel(0, (c)->regs + S3C64XX_SPI_SLAVE_SEL) + +#define S3C64XX_SPI_DEACT(c) writel(S3C64XX_SPI_SLAVE_SIG_INACT, \ + (c)->regs + S3C64XX_SPI_SLAVE_SEL) + +#define S3C64XX_SPI_INT_TRAILING_EN (1<<6) +#define S3C64XX_SPI_INT_RX_OVERRUN_EN (1<<5) +#define S3C64XX_SPI_INT_RX_UNDERRUN_EN (1<<4) +#define S3C64XX_SPI_INT_TX_OVERRUN_EN (1<<3) +#define S3C64XX_SPI_INT_TX_UNDERRUN_EN (1<<2) +#define S3C64XX_SPI_INT_RX_FIFORDY_EN (1<<1) +#define S3C64XX_SPI_INT_TX_FIFORDY_EN (1<<0) + +#define S3C64XX_SPI_ST_RX_OVERRUN_ERR (1<<5) +#define S3C64XX_SPI_ST_RX_UNDERRUN_ERR (1<<4) +#define S3C64XX_SPI_ST_TX_OVERRUN_ERR (1<<3) +#define S3C64XX_SPI_ST_TX_UNDERRUN_ERR (1<<2) +#define S3C64XX_SPI_ST_RX_FIFORDY (1<<1) +#define S3C64XX_SPI_ST_TX_FIFORDY (1<<0) + +#define S3C64XX_SPI_PACKET_CNT_EN (1<<16) + +#define S3C64XX_SPI_PND_TX_UNDERRUN_CLR (1<<4) +#define S3C64XX_SPI_PND_TX_OVERRUN_CLR (1<<3) +#define S3C64XX_SPI_PND_RX_UNDERRUN_CLR (1<<2) +#define S3C64XX_SPI_PND_RX_OVERRUN_CLR (1<<1) +#define S3C64XX_SPI_PND_TRAILING_CLR (1<<0) + +#define S3C64XX_SPI_SWAP_RX_HALF_WORD (1<<7) +#define S3C64XX_SPI_SWAP_RX_BYTE (1<<6) +#define S3C64XX_SPI_SWAP_RX_BIT (1<<5) +#define S3C64XX_SPI_SWAP_RX_EN (1<<4) +#define S3C64XX_SPI_SWAP_TX_HALF_WORD (1<<3) +#define S3C64XX_SPI_SWAP_TX_BYTE (1<<2) +#define S3C64XX_SPI_SWAP_TX_BIT (1<<1) +#define S3C64XX_SPI_SWAP_TX_EN (1<<0) + +#define S3C64XX_SPI_FBCLK_MSK (3<<0) + +#define S3C64XX_SPI_ST_TRLCNTZ(v, i) ((((v) >> (i)->rx_lvl_offset) & \ + (((i)->fifo_lvl_mask + 1))) \ + ? 1 : 0) + +#define S3C64XX_SPI_ST_TX_DONE(v, i) ((((v) >> (i)->rx_lvl_offset) & \ + (((i)->fifo_lvl_mask + 1) << 1)) \ + ? 1 : 0) +#define TX_FIFO_LVL(v, i) (((v) >> 6) & (i)->fifo_lvl_mask) +#define RX_FIFO_LVL(v, i) (((v) >> (i)->rx_lvl_offset) & (i)->fifo_lvl_mask) + +#define S3C64XX_SPI_MAX_TRAILCNT 0x3ff +#define S3C64XX_SPI_TRAILCNT_OFF 19 + +#define S3C64XX_SPI_TRAILCNT S3C64XX_SPI_MAX_TRAILCNT + +#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t) + +#define SUSPND (1<<0) +#define SPIBUSY (1<<1) +#define RXBUSY (1<<2) +#define TXBUSY (1<<3) + +/** + * struct s3c64xx_spi_driver_data - Runtime info holder for SPI driver. + * @clk: Pointer to the spi clock. + * @src_clk: Pointer to the clock used to generate SPI signals. + * @master: Pointer to the SPI Protocol master. + * @workqueue: Work queue for the SPI xfer requests. + * @cntrlr_info: Platform specific data for the controller this driver manages. + * @tgl_spi: Pointer to the last CS left untoggled by the cs_change hint. + * @work: Work + * @queue: To log SPI xfer requests. + * @lock: Controller specific lock. + * @state: Set of FLAGS to indicate status. + * @rx_dmach: Controller's DMA channel for Rx. + * @tx_dmach: Controller's DMA channel for Tx. + * @sfr_start: BUS address of SPI controller regs. + * @regs: Pointer to ioremap'ed controller registers. + * @xfer_completion: To indicate completion of xfer task. + * @cur_mode: Stores the active configuration of the controller. + * @cur_bpw: Stores the active bits per word settings. + * @cur_speed: Stores the active xfer clock speed. + */ +struct s3c64xx_spi_driver_data { + void __iomem *regs; + struct clk *clk; + struct clk *src_clk; + struct platform_device *pdev; + struct spi_master *master; + struct workqueue_struct *workqueue; + struct s3c64xx_spi_info *cntrlr_info; + struct spi_device *tgl_spi; + struct work_struct work; + struct list_head queue; + spinlock_t lock; + enum dma_ch rx_dmach; + enum dma_ch tx_dmach; + unsigned long sfr_start; + struct completion xfer_completion; + unsigned state; + unsigned cur_mode, cur_bpw; + unsigned cur_speed; +}; + +static struct s3c2410_dma_client s3c64xx_spi_dma_client = { + .name = "samsung-spi-dma", +}; + +static void flush_fifo(struct s3c64xx_spi_driver_data *sdd) +{ + struct s3c64xx_spi_info *sci = sdd->cntrlr_info; + void __iomem *regs = sdd->regs; + unsigned long loops; + u32 val; + + writel(0, regs + S3C64XX_SPI_PACKET_CNT); + + val = readl(regs + S3C64XX_SPI_CH_CFG); + val |= S3C64XX_SPI_CH_SW_RST; + val &= ~S3C64XX_SPI_CH_HS_EN; + writel(val, regs + S3C64XX_SPI_CH_CFG); + + /* Flush TxFIFO*/ + loops = msecs_to_loops(1); + do { + val = readl(regs + S3C64XX_SPI_STATUS); + } while (TX_FIFO_LVL(val, sci) && loops--); + + if (loops == 0) + dev_warn(&sdd->pdev->dev, "Timed out flushing TX FIFO\n"); + + /* Flush RxFIFO*/ + loops = msecs_to_loops(1); + do { + val = readl(regs + S3C64XX_SPI_STATUS); + if (RX_FIFO_LVL(val, sci)) + readl(regs + S3C64XX_SPI_RX_DATA); + else + break; + } while (loops--); + + if (loops == 0) + dev_warn(&sdd->pdev->dev, "Timed out flushing RX FIFO\n"); + + val = readl(regs + S3C64XX_SPI_CH_CFG); + val &= ~S3C64XX_SPI_CH_SW_RST; + writel(val, regs + S3C64XX_SPI_CH_CFG); + + val = readl(regs + S3C64XX_SPI_MODE_CFG); + val &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON); + writel(val, regs + S3C64XX_SPI_MODE_CFG); + + val = readl(regs + S3C64XX_SPI_CH_CFG); + val &= ~(S3C64XX_SPI_CH_RXCH_ON | S3C64XX_SPI_CH_TXCH_ON); + writel(val, regs + S3C64XX_SPI_CH_CFG); +} + +static void enable_datapath(struct s3c64xx_spi_driver_data *sdd, + struct spi_device *spi, + struct spi_transfer *xfer, int dma_mode) +{ + struct s3c64xx_spi_info *sci = sdd->cntrlr_info; + void __iomem *regs = sdd->regs; + u32 modecfg, chcfg; + + modecfg = readl(regs + S3C64XX_SPI_MODE_CFG); + modecfg &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON); + + chcfg = readl(regs + S3C64XX_SPI_CH_CFG); + chcfg &= ~S3C64XX_SPI_CH_TXCH_ON; + + if (dma_mode) { + chcfg &= ~S3C64XX_SPI_CH_RXCH_ON; + } else { + /* Always shift in data in FIFO, even if xfer is Tx only, + * this helps setting PCKT_CNT value for generating clocks + * as exactly needed. + */ + chcfg |= S3C64XX_SPI_CH_RXCH_ON; + writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff) + | S3C64XX_SPI_PACKET_CNT_EN, + regs + S3C64XX_SPI_PACKET_CNT); + } + + if (xfer->tx_buf != NULL) { + sdd->state |= TXBUSY; + chcfg |= S3C64XX_SPI_CH_TXCH_ON; + if (dma_mode) { + modecfg |= S3C64XX_SPI_MODE_TXDMA_ON; + s3c2410_dma_config(sdd->tx_dmach, sdd->cur_bpw / 8); + s3c2410_dma_enqueue(sdd->tx_dmach, (void *)sdd, + xfer->tx_dma, xfer->len); + s3c2410_dma_ctrl(sdd->tx_dmach, S3C2410_DMAOP_START); + } else { + switch (sdd->cur_bpw) { + case 32: + iowrite32_rep(regs + S3C64XX_SPI_TX_DATA, + xfer->tx_buf, xfer->len / 4); + break; + case 16: + iowrite16_rep(regs + S3C64XX_SPI_TX_DATA, + xfer->tx_buf, xfer->len / 2); + break; + default: + iowrite8_rep(regs + S3C64XX_SPI_TX_DATA, + xfer->tx_buf, xfer->len); + break; + } + } + } + + if (xfer->rx_buf != NULL) { + sdd->state |= RXBUSY; + + if (sci->high_speed && sdd->cur_speed >= 30000000UL + && !(sdd->cur_mode & SPI_CPHA)) + chcfg |= S3C64XX_SPI_CH_HS_EN; + + if (dma_mode) { + modecfg |= S3C64XX_SPI_MODE_RXDMA_ON; + chcfg |= S3C64XX_SPI_CH_RXCH_ON; + writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff) + | S3C64XX_SPI_PACKET_CNT_EN, + regs + S3C64XX_SPI_PACKET_CNT); + s3c2410_dma_config(sdd->rx_dmach, sdd->cur_bpw / 8); + s3c2410_dma_enqueue(sdd->rx_dmach, (void *)sdd, + xfer->rx_dma, xfer->len); + s3c2410_dma_ctrl(sdd->rx_dmach, S3C2410_DMAOP_START); + } + } + + writel(modecfg, regs + S3C64XX_SPI_MODE_CFG); + writel(chcfg, regs + S3C64XX_SPI_CH_CFG); +} + +static inline void enable_cs(struct s3c64xx_spi_driver_data *sdd, + struct spi_device *spi) +{ + struct s3c64xx_spi_csinfo *cs; + + if (sdd->tgl_spi != NULL) { /* If last device toggled after mssg */ + if (sdd->tgl_spi != spi) { /* if last mssg on diff device */ + /* Deselect the last toggled device */ + cs = sdd->tgl_spi->controller_data; + cs->set_level(cs->line, + spi->mode & SPI_CS_HIGH ? 0 : 1); + } + sdd->tgl_spi = NULL; + } + + cs = spi->controller_data; + cs->set_level(cs->line, spi->mode & SPI_CS_HIGH ? 1 : 0); +} + +static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd, + struct spi_transfer *xfer, int dma_mode) +{ + struct s3c64xx_spi_info *sci = sdd->cntrlr_info; + void __iomem *regs = sdd->regs; + unsigned long val; + int ms; + + /* millisecs to xfer 'len' bytes @ 'cur_speed' */ + ms = xfer->len * 8 * 1000 / sdd->cur_speed; + ms += 10; /* some tolerance */ + + if (dma_mode) { + val = msecs_to_jiffies(ms) + 10; + val = wait_for_completion_timeout(&sdd->xfer_completion, val); + } else { + u32 status; + val = msecs_to_loops(ms); + do { + status = readl(regs + S3C64XX_SPI_STATUS); + } while (RX_FIFO_LVL(status, sci) < xfer->len && --val); + } + + if (!val) + return -EIO; + + if (dma_mode) { + u32 status; + + /* + * DmaTx returns after simply writing data in the FIFO, + * w/o waiting for real transmission on the bus to finish. + * DmaRx returns only after Dma read data from FIFO which + * needs bus transmission to finish, so we don't worry if + * Xfer involved Rx(with or without Tx). + */ + if (xfer->rx_buf == NULL) { + val = msecs_to_loops(10); + status = readl(regs + S3C64XX_SPI_STATUS); + while ((TX_FIFO_LVL(status, sci) + || !S3C64XX_SPI_ST_TX_DONE(status, sci)) + && --val) { + cpu_relax(); + status = readl(regs + S3C64XX_SPI_STATUS); + } + + if (!val) + return -EIO; + } + } else { + /* If it was only Tx */ + if (xfer->rx_buf == NULL) { + sdd->state &= ~TXBUSY; + return 0; + } + + switch (sdd->cur_bpw) { + case 32: + ioread32_rep(regs + S3C64XX_SPI_RX_DATA, + xfer->rx_buf, xfer->len / 4); + break; + case 16: + ioread16_rep(regs + S3C64XX_SPI_RX_DATA, + xfer->rx_buf, xfer->len / 2); + break; + default: + ioread8_rep(regs + S3C64XX_SPI_RX_DATA, + xfer->rx_buf, xfer->len); + break; + } + sdd->state &= ~RXBUSY; + } + + return 0; +} + +static inline void disable_cs(struct s3c64xx_spi_driver_data *sdd, + struct spi_device *spi) +{ + struct s3c64xx_spi_csinfo *cs = spi->controller_data; + + if (sdd->tgl_spi == spi) + sdd->tgl_spi = NULL; + + cs->set_level(cs->line, spi->mode & SPI_CS_HIGH ? 0 : 1); +} + +static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) +{ + struct s3c64xx_spi_info *sci = sdd->cntrlr_info; + void __iomem *regs = sdd->regs; + u32 val; + + /* Disable Clock */ + if (sci->clk_from_cmu) { + clk_disable(sdd->src_clk); + } else { + val = readl(regs + S3C64XX_SPI_CLK_CFG); + val &= ~S3C64XX_SPI_ENCLK_ENABLE; + writel(val, regs + S3C64XX_SPI_CLK_CFG); + } + + /* Set Polarity and Phase */ + val = readl(regs + S3C64XX_SPI_CH_CFG); + val &= ~(S3C64XX_SPI_CH_SLAVE | + S3C64XX_SPI_CPOL_L | + S3C64XX_SPI_CPHA_B); + + if (sdd->cur_mode & SPI_CPOL) + val |= S3C64XX_SPI_CPOL_L; + + if (sdd->cur_mode & SPI_CPHA) + val |= S3C64XX_SPI_CPHA_B; + + writel(val, regs + S3C64XX_SPI_CH_CFG); + + /* Set Channel & DMA Mode */ + val = readl(regs + S3C64XX_SPI_MODE_CFG); + val &= ~(S3C64XX_SPI_MODE_BUS_TSZ_MASK + | S3C64XX_SPI_MODE_CH_TSZ_MASK); + + switch (sdd->cur_bpw) { + case 32: + val |= S3C64XX_SPI_MODE_BUS_TSZ_WORD; + val |= S3C64XX_SPI_MODE_CH_TSZ_WORD; + break; + case 16: + val |= S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD; + val |= S3C64XX_SPI_MODE_CH_TSZ_HALFWORD; + break; + default: + val |= S3C64XX_SPI_MODE_BUS_TSZ_BYTE; + val |= S3C64XX_SPI_MODE_CH_TSZ_BYTE; + break; + } + + writel(val, regs + S3C64XX_SPI_MODE_CFG); + + if (sci->clk_from_cmu) { + /* Configure Clock */ + /* There is half-multiplier before the SPI */ + clk_set_rate(sdd->src_clk, sdd->cur_speed * 2); + /* Enable Clock */ + clk_enable(sdd->src_clk); + } else { + /* Configure Clock */ + val = readl(regs + S3C64XX_SPI_CLK_CFG); + val &= ~S3C64XX_SPI_PSR_MASK; + val |= ((clk_get_rate(sdd->src_clk) / sdd->cur_speed / 2 - 1) + & S3C64XX_SPI_PSR_MASK); + writel(val, regs + S3C64XX_SPI_CLK_CFG); + + /* Enable Clock */ + val = readl(regs + S3C64XX_SPI_CLK_CFG); + val |= S3C64XX_SPI_ENCLK_ENABLE; + writel(val, regs + S3C64XX_SPI_CLK_CFG); + } +} + +static void s3c64xx_spi_dma_rxcb(struct s3c2410_dma_chan *chan, void *buf_id, + int size, enum s3c2410_dma_buffresult res) +{ + struct s3c64xx_spi_driver_data *sdd = buf_id; + unsigned long flags; + + spin_lock_irqsave(&sdd->lock, flags); + + if (res == S3C2410_RES_OK) + sdd->state &= ~RXBUSY; + else + dev_err(&sdd->pdev->dev, "DmaAbrtRx-%d\n", size); + + /* If the other done */ + if (!(sdd->state & TXBUSY)) + complete(&sdd->xfer_completion); + + spin_unlock_irqrestore(&sdd->lock, flags); +} + +static void s3c64xx_spi_dma_txcb(struct s3c2410_dma_chan *chan, void *buf_id, + int size, enum s3c2410_dma_buffresult res) +{ + struct s3c64xx_spi_driver_data *sdd = buf_id; + unsigned long flags; + + spin_lock_irqsave(&sdd->lock, flags); + + if (res == S3C2410_RES_OK) + sdd->state &= ~TXBUSY; + else + dev_err(&sdd->pdev->dev, "DmaAbrtTx-%d \n", size); + + /* If the other done */ + if (!(sdd->state & RXBUSY)) + complete(&sdd->xfer_completion); + + spin_unlock_irqrestore(&sdd->lock, flags); +} + +#define XFER_DMAADDR_INVALID DMA_BIT_MASK(32) + +static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd, + struct spi_message *msg) +{ + struct s3c64xx_spi_info *sci = sdd->cntrlr_info; + struct device *dev = &sdd->pdev->dev; + struct spi_transfer *xfer; + + if (msg->is_dma_mapped) + return 0; + + /* First mark all xfer unmapped */ + list_for_each_entry(xfer, &msg->transfers, transfer_list) { + xfer->rx_dma = XFER_DMAADDR_INVALID; + xfer->tx_dma = XFER_DMAADDR_INVALID; + } + + /* Map until end or first fail */ + list_for_each_entry(xfer, &msg->transfers, transfer_list) { + + if (xfer->len <= ((sci->fifo_lvl_mask >> 1) + 1)) + continue; + + if (xfer->tx_buf != NULL) { + xfer->tx_dma = dma_map_single(dev, + (void *)xfer->tx_buf, xfer->len, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, xfer->tx_dma)) { + dev_err(dev, "dma_map_single Tx failed\n"); + xfer->tx_dma = XFER_DMAADDR_INVALID; + return -ENOMEM; + } + } + + if (xfer->rx_buf != NULL) { + xfer->rx_dma = dma_map_single(dev, xfer->rx_buf, + xfer->len, DMA_FROM_DEVICE); + if (dma_mapping_error(dev, xfer->rx_dma)) { + dev_err(dev, "dma_map_single Rx failed\n"); + dma_unmap_single(dev, xfer->tx_dma, + xfer->len, DMA_TO_DEVICE); + xfer->tx_dma = XFER_DMAADDR_INVALID; + xfer->rx_dma = XFER_DMAADDR_INVALID; + return -ENOMEM; + } + } + } + + return 0; +} + +static void s3c64xx_spi_unmap_mssg(struct s3c64xx_spi_driver_data *sdd, + struct spi_message *msg) +{ + struct s3c64xx_spi_info *sci = sdd->cntrlr_info; + struct device *dev = &sdd->pdev->dev; + struct spi_transfer *xfer; + + if (msg->is_dma_mapped) + return; + + list_for_each_entry(xfer, &msg->transfers, transfer_list) { + + if (xfer->len <= ((sci->fifo_lvl_mask >> 1) + 1)) + continue; + + if (xfer->rx_buf != NULL + && xfer->rx_dma != XFER_DMAADDR_INVALID) + dma_unmap_single(dev, xfer->rx_dma, + xfer->len, DMA_FROM_DEVICE); + + if (xfer->tx_buf != NULL + && xfer->tx_dma != XFER_DMAADDR_INVALID) + dma_unmap_single(dev, xfer->tx_dma, + xfer->len, DMA_TO_DEVICE); + } +} + +static void handle_msg(struct s3c64xx_spi_driver_data *sdd, + struct spi_message *msg) +{ + struct s3c64xx_spi_info *sci = sdd->cntrlr_info; + struct spi_device *spi = msg->spi; + struct s3c64xx_spi_csinfo *cs = spi->controller_data; + struct spi_transfer *xfer; + int status = 0, cs_toggle = 0; + u32 speed; + u8 bpw; + + /* If Master's(controller) state differs from that needed by Slave */ + if (sdd->cur_speed != spi->max_speed_hz + || sdd->cur_mode != spi->mode + || sdd->cur_bpw != spi->bits_per_word) { + sdd->cur_bpw = spi->bits_per_word; + sdd->cur_speed = spi->max_speed_hz; + sdd->cur_mode = spi->mode; + s3c64xx_spi_config(sdd); + } + + /* Map all the transfers if needed */ + if (s3c64xx_spi_map_mssg(sdd, msg)) { + dev_err(&spi->dev, + "Xfer: Unable to map message buffers!\n"); + status = -ENOMEM; + goto out; + } + + /* Configure feedback delay */ + writel(cs->fb_delay & 0x3, sdd->regs + S3C64XX_SPI_FB_CLK); + + list_for_each_entry(xfer, &msg->transfers, transfer_list) { + + unsigned long flags; + int use_dma; + + INIT_COMPLETION(sdd->xfer_completion); + + /* Only BPW and Speed may change across transfers */ + bpw = xfer->bits_per_word ? : spi->bits_per_word; + speed = xfer->speed_hz ? : spi->max_speed_hz; + + if (xfer->len % (bpw / 8)) { + dev_err(&spi->dev, + "Xfer length(%u) not a multiple of word size(%u)\n", + xfer->len, bpw / 8); + status = -EIO; + goto out; + } + + if (bpw != sdd->cur_bpw || speed != sdd->cur_speed) { + sdd->cur_bpw = bpw; + sdd->cur_speed = speed; + s3c64xx_spi_config(sdd); + } + + /* Polling method for xfers not bigger than FIFO capacity */ + if (xfer->len <= ((sci->fifo_lvl_mask >> 1) + 1)) + use_dma = 0; + else + use_dma = 1; + + spin_lock_irqsave(&sdd->lock, flags); + + /* Pending only which is to be done */ + sdd->state &= ~RXBUSY; + sdd->state &= ~TXBUSY; + + enable_datapath(sdd, spi, xfer, use_dma); + + /* Slave Select */ + enable_cs(sdd, spi); + + /* Start the signals */ + S3C64XX_SPI_ACT(sdd); + + spin_unlock_irqrestore(&sdd->lock, flags); + + status = wait_for_xfer(sdd, xfer, use_dma); + + /* Quiese the signals */ + S3C64XX_SPI_DEACT(sdd); + + if (status) { + dev_err(&spi->dev, "I/O Error: " + "rx-%d tx-%d res:rx-%c tx-%c len-%d\n", + xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0, + (sdd->state & RXBUSY) ? 'f' : 'p', + (sdd->state & TXBUSY) ? 'f' : 'p', + xfer->len); + + if (use_dma) { + if (xfer->tx_buf != NULL + && (sdd->state & TXBUSY)) + s3c2410_dma_ctrl(sdd->tx_dmach, + S3C2410_DMAOP_FLUSH); + if (xfer->rx_buf != NULL + && (sdd->state & RXBUSY)) + s3c2410_dma_ctrl(sdd->rx_dmach, + S3C2410_DMAOP_FLUSH); + } + + goto out; + } + + if (xfer->delay_usecs) + udelay(xfer->delay_usecs); + + if (xfer->cs_change) { + /* Hint that the next mssg is gonna be + for the same device */ + if (list_is_last(&xfer->transfer_list, + &msg->transfers)) + cs_toggle = 1; + else + disable_cs(sdd, spi); + } + + msg->actual_length += xfer->len; + + flush_fifo(sdd); + } + +out: + if (!cs_toggle || status) + disable_cs(sdd, spi); + else + sdd->tgl_spi = spi; + + s3c64xx_spi_unmap_mssg(sdd, msg); + + msg->status = status; + + if (msg->complete) + msg->complete(msg->context); +} + +static int acquire_dma(struct s3c64xx_spi_driver_data *sdd) +{ + if (s3c2410_dma_request(sdd->rx_dmach, + &s3c64xx_spi_dma_client, NULL) < 0) { + dev_err(&sdd->pdev->dev, "cannot get RxDMA\n"); + return 0; + } + s3c2410_dma_set_buffdone_fn(sdd->rx_dmach, s3c64xx_spi_dma_rxcb); + s3c2410_dma_devconfig(sdd->rx_dmach, S3C2410_DMASRC_HW, + sdd->sfr_start + S3C64XX_SPI_RX_DATA); + + if (s3c2410_dma_request(sdd->tx_dmach, + &s3c64xx_spi_dma_client, NULL) < 0) { + dev_err(&sdd->pdev->dev, "cannot get TxDMA\n"); + s3c2410_dma_free(sdd->rx_dmach, &s3c64xx_spi_dma_client); + return 0; + } + s3c2410_dma_set_buffdone_fn(sdd->tx_dmach, s3c64xx_spi_dma_txcb); + s3c2410_dma_devconfig(sdd->tx_dmach, S3C2410_DMASRC_MEM, + sdd->sfr_start + S3C64XX_SPI_TX_DATA); + + return 1; +} + +static void s3c64xx_spi_work(struct work_struct *work) +{ + struct s3c64xx_spi_driver_data *sdd = container_of(work, + struct s3c64xx_spi_driver_data, work); + unsigned long flags; + + /* Acquire DMA channels */ + while (!acquire_dma(sdd)) + msleep(10); + + spin_lock_irqsave(&sdd->lock, flags); + + while (!list_empty(&sdd->queue) + && !(sdd->state & SUSPND)) { + + struct spi_message *msg; + + msg = container_of(sdd->queue.next, struct spi_message, queue); + + list_del_init(&msg->queue); + + /* Set Xfer busy flag */ + sdd->state |= SPIBUSY; + + spin_unlock_irqrestore(&sdd->lock, flags); + + handle_msg(sdd, msg); + + spin_lock_irqsave(&sdd->lock, flags); + + sdd->state &= ~SPIBUSY; + } + + spin_unlock_irqrestore(&sdd->lock, flags); + + /* Free DMA channels */ + s3c2410_dma_free(sdd->tx_dmach, &s3c64xx_spi_dma_client); + s3c2410_dma_free(sdd->rx_dmach, &s3c64xx_spi_dma_client); +} + +static int s3c64xx_spi_transfer(struct spi_device *spi, + struct spi_message *msg) +{ + struct s3c64xx_spi_driver_data *sdd; + unsigned long flags; + + sdd = spi_master_get_devdata(spi->master); + + spin_lock_irqsave(&sdd->lock, flags); + + if (sdd->state & SUSPND) { + spin_unlock_irqrestore(&sdd->lock, flags); + return -ESHUTDOWN; + } + + msg->status = -EINPROGRESS; + msg->actual_length = 0; + + list_add_tail(&msg->queue, &sdd->queue); + + queue_work(sdd->workqueue, &sdd->work); + + spin_unlock_irqrestore(&sdd->lock, flags); + + return 0; +} + +/* + * Here we only check the validity of requested configuration + * and save the configuration in a local data-structure. + * The controller is actually configured only just before we + * get a message to transfer. + */ +static int s3c64xx_spi_setup(struct spi_device *spi) +{ + struct s3c64xx_spi_csinfo *cs = spi->controller_data; + struct s3c64xx_spi_driver_data *sdd; + struct s3c64xx_spi_info *sci; + struct spi_message *msg; + unsigned long flags; + int err = 0; + + if (cs == NULL || cs->set_level == NULL) { + dev_err(&spi->dev, "No CS for SPI(%d)\n", spi->chip_select); + return -ENODEV; + } + + sdd = spi_master_get_devdata(spi->master); + sci = sdd->cntrlr_info; + + spin_lock_irqsave(&sdd->lock, flags); + + list_for_each_entry(msg, &sdd->queue, queue) { + /* Is some mssg is already queued for this device */ + if (msg->spi == spi) { + dev_err(&spi->dev, + "setup: attempt while mssg in queue!\n"); + spin_unlock_irqrestore(&sdd->lock, flags); + return -EBUSY; + } + } + + if (sdd->state & SUSPND) { + spin_unlock_irqrestore(&sdd->lock, flags); + dev_err(&spi->dev, + "setup: SPI-%d not active!\n", spi->master->bus_num); + return -ESHUTDOWN; + } + + spin_unlock_irqrestore(&sdd->lock, flags); + + if (spi->bits_per_word != 8 + && spi->bits_per_word != 16 + && spi->bits_per_word != 32) { + dev_err(&spi->dev, "setup: %dbits/wrd not supported!\n", + spi->bits_per_word); + err = -EINVAL; + goto setup_exit; + } + + /* Check if we can provide the requested rate */ + if (!sci->clk_from_cmu) { + u32 psr, speed; + + /* Max possible */ + speed = clk_get_rate(sdd->src_clk) / 2 / (0 + 1); + + if (spi->max_speed_hz > speed) + spi->max_speed_hz = speed; + + psr = clk_get_rate(sdd->src_clk) / 2 / spi->max_speed_hz - 1; + psr &= S3C64XX_SPI_PSR_MASK; + if (psr == S3C64XX_SPI_PSR_MASK) + psr--; + + speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1); + if (spi->max_speed_hz < speed) { + if (psr+1 < S3C64XX_SPI_PSR_MASK) { + psr++; + } else { + err = -EINVAL; + goto setup_exit; + } + } + + speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1); + if (spi->max_speed_hz >= speed) + spi->max_speed_hz = speed; + else + err = -EINVAL; + } + +setup_exit: + + /* setup() returns with device de-selected */ + disable_cs(sdd, spi); + + return err; +} + +static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel) +{ + struct s3c64xx_spi_info *sci = sdd->cntrlr_info; + void __iomem *regs = sdd->regs; + unsigned int val; + + sdd->cur_speed = 0; + + S3C64XX_SPI_DEACT(sdd); + + /* Disable Interrupts - we use Polling if not DMA mode */ + writel(0, regs + S3C64XX_SPI_INT_EN); + + if (!sci->clk_from_cmu) + writel(sci->src_clk_nr << S3C64XX_SPI_CLKSEL_SRCSHFT, + regs + S3C64XX_SPI_CLK_CFG); + writel(0, regs + S3C64XX_SPI_MODE_CFG); + writel(0, regs + S3C64XX_SPI_PACKET_CNT); + + /* Clear any irq pending bits */ + writel(readl(regs + S3C64XX_SPI_PENDING_CLR), + regs + S3C64XX_SPI_PENDING_CLR); + + writel(0, regs + S3C64XX_SPI_SWAP_CFG); + + val = readl(regs + S3C64XX_SPI_MODE_CFG); + val &= ~S3C64XX_SPI_MODE_4BURST; + val &= ~(S3C64XX_SPI_MAX_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF); + val |= (S3C64XX_SPI_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF); + writel(val, regs + S3C64XX_SPI_MODE_CFG); + + flush_fifo(sdd); +} + +static int __init s3c64xx_spi_probe(struct platform_device *pdev) +{ + struct resource *mem_res, *dmatx_res, *dmarx_res; + struct s3c64xx_spi_driver_data *sdd; + struct s3c64xx_spi_info *sci; + struct spi_master *master; + int ret; + + if (pdev->id < 0) { + dev_err(&pdev->dev, + "Invalid platform device id-%d\n", pdev->id); + return -ENODEV; + } + + if (pdev->dev.platform_data == NULL) { + dev_err(&pdev->dev, "platform_data missing!\n"); + return -ENODEV; + } + + sci = pdev->dev.platform_data; + if (!sci->src_clk_name) { + dev_err(&pdev->dev, + "Board init must call s3c64xx_spi_set_info()\n"); + return -EINVAL; + } + + /* Check for availability of necessary resource */ + + dmatx_res = platform_get_resource(pdev, IORESOURCE_DMA, 0); + if (dmatx_res == NULL) { + dev_err(&pdev->dev, "Unable to get SPI-Tx dma resource\n"); + return -ENXIO; + } + + dmarx_res = platform_get_resource(pdev, IORESOURCE_DMA, 1); + if (dmarx_res == NULL) { + dev_err(&pdev->dev, "Unable to get SPI-Rx dma resource\n"); + return -ENXIO; + } + + mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (mem_res == NULL) { + dev_err(&pdev->dev, "Unable to get SPI MEM resource\n"); + return -ENXIO; + } + + master = spi_alloc_master(&pdev->dev, + sizeof(struct s3c64xx_spi_driver_data)); + if (master == NULL) { + dev_err(&pdev->dev, "Unable to allocate SPI Master\n"); + return -ENOMEM; + } + + platform_set_drvdata(pdev, master); + + sdd = spi_master_get_devdata(master); + sdd->master = master; + sdd->cntrlr_info = sci; + sdd->pdev = pdev; + sdd->sfr_start = mem_res->start; + sdd->tx_dmach = dmatx_res->start; + sdd->rx_dmach = dmarx_res->start; + + sdd->cur_bpw = 8; + + master->bus_num = pdev->id; + master->setup = s3c64xx_spi_setup; + master->transfer = s3c64xx_spi_transfer; + master->num_chipselect = sci->num_cs; + master->dma_alignment = 8; + /* the spi->mode bits understood by this driver: */ + master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; + + if (request_mem_region(mem_res->start, + resource_size(mem_res), pdev->name) == NULL) { + dev_err(&pdev->dev, "Req mem region failed\n"); + ret = -ENXIO; + goto err0; + } + + sdd->regs = ioremap(mem_res->start, resource_size(mem_res)); + if (sdd->regs == NULL) { + dev_err(&pdev->dev, "Unable to remap IO\n"); + ret = -ENXIO; + goto err1; + } + + if (sci->cfg_gpio == NULL || sci->cfg_gpio(pdev)) { + dev_err(&pdev->dev, "Unable to config gpio\n"); + ret = -EBUSY; + goto err2; + } + + /* Setup clocks */ + sdd->clk = clk_get(&pdev->dev, "spi"); + if (IS_ERR(sdd->clk)) { + dev_err(&pdev->dev, "Unable to acquire clock 'spi'\n"); + ret = PTR_ERR(sdd->clk); + goto err3; + } + + if (clk_enable(sdd->clk)) { + dev_err(&pdev->dev, "Couldn't enable clock 'spi'\n"); + ret = -EBUSY; + goto err4; + } + + sdd->src_clk = clk_get(&pdev->dev, sci->src_clk_name); + if (IS_ERR(sdd->src_clk)) { + dev_err(&pdev->dev, + "Unable to acquire clock '%s'\n", sci->src_clk_name); + ret = PTR_ERR(sdd->src_clk); + goto err5; + } + + if (clk_enable(sdd->src_clk)) { + dev_err(&pdev->dev, "Couldn't enable clock '%s'\n", + sci->src_clk_name); + ret = -EBUSY; + goto err6; + } + + sdd->workqueue = create_singlethread_workqueue( + dev_name(master->dev.parent)); + if (sdd->workqueue == NULL) { + dev_err(&pdev->dev, "Unable to create workqueue\n"); + ret = -ENOMEM; + goto err7; + } + + /* Setup Deufult Mode */ + s3c64xx_spi_hwinit(sdd, pdev->id); + + spin_lock_init(&sdd->lock); + init_completion(&sdd->xfer_completion); + INIT_WORK(&sdd->work, s3c64xx_spi_work); + INIT_LIST_HEAD(&sdd->queue); + + if (spi_register_master(master)) { + dev_err(&pdev->dev, "cannot register SPI master\n"); + ret = -EBUSY; + goto err8; + } + + dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d " + "with %d Slaves attached\n", + pdev->id, master->num_chipselect); + dev_dbg(&pdev->dev, "\tIOmem=[0x%x-0x%x]\tDMA=[Rx-%d, Tx-%d]\n", + mem_res->end, mem_res->start, + sdd->rx_dmach, sdd->tx_dmach); + + return 0; + +err8: + destroy_workqueue(sdd->workqueue); +err7: + clk_disable(sdd->src_clk); +err6: + clk_put(sdd->src_clk); +err5: + clk_disable(sdd->clk); +err4: + clk_put(sdd->clk); +err3: +err2: + iounmap((void *) sdd->regs); +err1: + release_mem_region(mem_res->start, resource_size(mem_res)); +err0: + platform_set_drvdata(pdev, NULL); + spi_master_put(master); + + return ret; +} + +static int s3c64xx_spi_remove(struct platform_device *pdev) +{ + struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); + struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); + struct resource *mem_res; + unsigned long flags; + + spin_lock_irqsave(&sdd->lock, flags); + sdd->state |= SUSPND; + spin_unlock_irqrestore(&sdd->lock, flags); + + while (sdd->state & SPIBUSY) + msleep(10); + + spi_unregister_master(master); + + destroy_workqueue(sdd->workqueue); + + clk_disable(sdd->src_clk); + clk_put(sdd->src_clk); + + clk_disable(sdd->clk); + clk_put(sdd->clk); + + iounmap((void *) sdd->regs); + + mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (mem_res != NULL) + release_mem_region(mem_res->start, resource_size(mem_res)); + + platform_set_drvdata(pdev, NULL); + spi_master_put(master); + + return 0; +} + +#ifdef CONFIG_PM +static int s3c64xx_spi_suspend(struct platform_device *pdev, pm_message_t state) +{ + struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); + struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); + unsigned long flags; + + spin_lock_irqsave(&sdd->lock, flags); + sdd->state |= SUSPND; + spin_unlock_irqrestore(&sdd->lock, flags); + + while (sdd->state & SPIBUSY) + msleep(10); + + /* Disable the clock */ + clk_disable(sdd->src_clk); + clk_disable(sdd->clk); + + sdd->cur_speed = 0; /* Output Clock is stopped */ + + return 0; +} + +static int s3c64xx_spi_resume(struct platform_device *pdev) +{ + struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); + struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); + struct s3c64xx_spi_info *sci = sdd->cntrlr_info; + unsigned long flags; + + sci->cfg_gpio(pdev); + + /* Enable the clock */ + clk_enable(sdd->src_clk); + clk_enable(sdd->clk); + + s3c64xx_spi_hwinit(sdd, pdev->id); + + spin_lock_irqsave(&sdd->lock, flags); + sdd->state &= ~SUSPND; + spin_unlock_irqrestore(&sdd->lock, flags); + + return 0; +} +#else +#define s3c64xx_spi_suspend NULL +#define s3c64xx_spi_resume NULL +#endif /* CONFIG_PM */ + +static struct platform_driver s3c64xx_spi_driver = { + .driver = { + .name = "s3c64xx-spi", + .owner = THIS_MODULE, + }, + .remove = s3c64xx_spi_remove, + .suspend = s3c64xx_spi_suspend, + .resume = s3c64xx_spi_resume, +}; +MODULE_ALIAS("platform:s3c64xx-spi"); + +static int __init s3c64xx_spi_init(void) +{ + return platform_driver_probe(&s3c64xx_spi_driver, s3c64xx_spi_probe); +} +subsys_initcall(s3c64xx_spi_init); + +static void __exit s3c64xx_spi_exit(void) +{ + platform_driver_unregister(&s3c64xx_spi_driver); +} +module_exit(s3c64xx_spi_exit); + +MODULE_AUTHOR("Jaswinder Singh "); +MODULE_DESCRIPTION("S3C64XX SPI Controller Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c new file mode 100644 index 0000000..e00d94b --- /dev/null +++ b/drivers/spi/spi-sh-msiof.c @@ -0,0 +1,749 @@ +/* + * SuperH MSIOF SPI Master Interface + * + * Copyright (c) 2009 Magnus Damm + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +struct sh_msiof_spi_priv { + struct spi_bitbang bitbang; /* must be first for spi_bitbang.c */ + void __iomem *mapbase; + struct clk *clk; + struct platform_device *pdev; + struct sh_msiof_spi_info *info; + struct completion done; + unsigned long flags; + int tx_fifo_size; + int rx_fifo_size; +}; + +#define TMDR1 0x00 +#define TMDR2 0x04 +#define TMDR3 0x08 +#define RMDR1 0x10 +#define RMDR2 0x14 +#define RMDR3 0x18 +#define TSCR 0x20 +#define RSCR 0x22 +#define CTR 0x28 +#define FCTR 0x30 +#define STR 0x40 +#define IER 0x44 +#define TDR1 0x48 +#define TDR2 0x4c +#define TFDR 0x50 +#define RDR1 0x58 +#define RDR2 0x5c +#define RFDR 0x60 + +#define CTR_TSCKE (1 << 15) +#define CTR_TFSE (1 << 14) +#define CTR_TXE (1 << 9) +#define CTR_RXE (1 << 8) + +#define STR_TEOF (1 << 23) +#define STR_REOF (1 << 7) + +static u32 sh_msiof_read(struct sh_msiof_spi_priv *p, int reg_offs) +{ + switch (reg_offs) { + case TSCR: + case RSCR: + return ioread16(p->mapbase + reg_offs); + default: + return ioread32(p->mapbase + reg_offs); + } +} + +static void sh_msiof_write(struct sh_msiof_spi_priv *p, int reg_offs, + u32 value) +{ + switch (reg_offs) { + case TSCR: + case RSCR: + iowrite16(value, p->mapbase + reg_offs); + break; + default: + iowrite32(value, p->mapbase + reg_offs); + break; + } +} + +static int sh_msiof_modify_ctr_wait(struct sh_msiof_spi_priv *p, + u32 clr, u32 set) +{ + u32 mask = clr | set; + u32 data; + int k; + + data = sh_msiof_read(p, CTR); + data &= ~clr; + data |= set; + sh_msiof_write(p, CTR, data); + + for (k = 100; k > 0; k--) { + if ((sh_msiof_read(p, CTR) & mask) == set) + break; + + udelay(10); + } + + return k > 0 ? 0 : -ETIMEDOUT; +} + +static irqreturn_t sh_msiof_spi_irq(int irq, void *data) +{ + struct sh_msiof_spi_priv *p = data; + + /* just disable the interrupt and wake up */ + sh_msiof_write(p, IER, 0); + complete(&p->done); + + return IRQ_HANDLED; +} + +static struct { + unsigned short div; + unsigned short scr; +} const sh_msiof_spi_clk_table[] = { + { 1, 0x0007 }, + { 2, 0x0000 }, + { 4, 0x0001 }, + { 8, 0x0002 }, + { 16, 0x0003 }, + { 32, 0x0004 }, + { 64, 0x1f00 }, + { 128, 0x1f01 }, + { 256, 0x1f02 }, + { 512, 0x1f03 }, + { 1024, 0x1f04 }, +}; + +static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p, + unsigned long parent_rate, + unsigned long spi_hz) +{ + unsigned long div = 1024; + size_t k; + + if (!WARN_ON(!spi_hz || !parent_rate)) + div = parent_rate / spi_hz; + + /* TODO: make more fine grained */ + + for (k = 0; k < ARRAY_SIZE(sh_msiof_spi_clk_table); k++) { + if (sh_msiof_spi_clk_table[k].div >= div) + break; + } + + k = min_t(int, k, ARRAY_SIZE(sh_msiof_spi_clk_table) - 1); + + sh_msiof_write(p, TSCR, sh_msiof_spi_clk_table[k].scr); + sh_msiof_write(p, RSCR, sh_msiof_spi_clk_table[k].scr); +} + +static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p, + u32 cpol, u32 cpha, + u32 tx_hi_z, u32 lsb_first) +{ + u32 tmp; + int edge; + + /* + * CPOL CPHA TSCKIZ RSCKIZ TEDG REDG + * 0 0 10 10 1 1 + * 0 1 10 10 0 0 + * 1 0 11 11 0 0 + * 1 1 11 11 1 1 + */ + sh_msiof_write(p, FCTR, 0); + sh_msiof_write(p, TMDR1, 0xe2000005 | (lsb_first << 24)); + sh_msiof_write(p, RMDR1, 0x22000005 | (lsb_first << 24)); + + tmp = 0xa0000000; + tmp |= cpol << 30; /* TSCKIZ */ + tmp |= cpol << 28; /* RSCKIZ */ + + edge = cpol ^ !cpha; + + tmp |= edge << 27; /* TEDG */ + tmp |= edge << 26; /* REDG */ + tmp |= (tx_hi_z ? 2 : 0) << 22; /* TXDIZ */ + sh_msiof_write(p, CTR, tmp); +} + +static void sh_msiof_spi_set_mode_regs(struct sh_msiof_spi_priv *p, + const void *tx_buf, void *rx_buf, + u32 bits, u32 words) +{ + u32 dr2 = ((bits - 1) << 24) | ((words - 1) << 16); + + if (tx_buf) + sh_msiof_write(p, TMDR2, dr2); + else + sh_msiof_write(p, TMDR2, dr2 | 1); + + if (rx_buf) + sh_msiof_write(p, RMDR2, dr2); + + sh_msiof_write(p, IER, STR_TEOF | STR_REOF); +} + +static void sh_msiof_reset_str(struct sh_msiof_spi_priv *p) +{ + sh_msiof_write(p, STR, sh_msiof_read(p, STR)); +} + +static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p, + const void *tx_buf, int words, int fs) +{ + const u8 *buf_8 = tx_buf; + int k; + + for (k = 0; k < words; k++) + sh_msiof_write(p, TFDR, buf_8[k] << fs); +} + +static void sh_msiof_spi_write_fifo_16(struct sh_msiof_spi_priv *p, + const void *tx_buf, int words, int fs) +{ + const u16 *buf_16 = tx_buf; + int k; + + for (k = 0; k < words; k++) + sh_msiof_write(p, TFDR, buf_16[k] << fs); +} + +static void sh_msiof_spi_write_fifo_16u(struct sh_msiof_spi_priv *p, + const void *tx_buf, int words, int fs) +{ + const u16 *buf_16 = tx_buf; + int k; + + for (k = 0; k < words; k++) + sh_msiof_write(p, TFDR, get_unaligned(&buf_16[k]) << fs); +} + +static void sh_msiof_spi_write_fifo_32(struct sh_msiof_spi_priv *p, + const void *tx_buf, int words, int fs) +{ + const u32 *buf_32 = tx_buf; + int k; + + for (k = 0; k < words; k++) + sh_msiof_write(p, TFDR, buf_32[k] << fs); +} + +static void sh_msiof_spi_write_fifo_32u(struct sh_msiof_spi_priv *p, + const void *tx_buf, int words, int fs) +{ + const u32 *buf_32 = tx_buf; + int k; + + for (k = 0; k < words; k++) + sh_msiof_write(p, TFDR, get_unaligned(&buf_32[k]) << fs); +} + +static void sh_msiof_spi_write_fifo_s32(struct sh_msiof_spi_priv *p, + const void *tx_buf, int words, int fs) +{ + const u32 *buf_32 = tx_buf; + int k; + + for (k = 0; k < words; k++) + sh_msiof_write(p, TFDR, swab32(buf_32[k] << fs)); +} + +static void sh_msiof_spi_write_fifo_s32u(struct sh_msiof_spi_priv *p, + const void *tx_buf, int words, int fs) +{ + const u32 *buf_32 = tx_buf; + int k; + + for (k = 0; k < words; k++) + sh_msiof_write(p, TFDR, swab32(get_unaligned(&buf_32[k]) << fs)); +} + +static void sh_msiof_spi_read_fifo_8(struct sh_msiof_spi_priv *p, + void *rx_buf, int words, int fs) +{ + u8 *buf_8 = rx_buf; + int k; + + for (k = 0; k < words; k++) + buf_8[k] = sh_msiof_read(p, RFDR) >> fs; +} + +static void sh_msiof_spi_read_fifo_16(struct sh_msiof_spi_priv *p, + void *rx_buf, int words, int fs) +{ + u16 *buf_16 = rx_buf; + int k; + + for (k = 0; k < words; k++) + buf_16[k] = sh_msiof_read(p, RFDR) >> fs; +} + +static void sh_msiof_spi_read_fifo_16u(struct sh_msiof_spi_priv *p, + void *rx_buf, int words, int fs) +{ + u16 *buf_16 = rx_buf; + int k; + + for (k = 0; k < words; k++) + put_unaligned(sh_msiof_read(p, RFDR) >> fs, &buf_16[k]); +} + +static void sh_msiof_spi_read_fifo_32(struct sh_msiof_spi_priv *p, + void *rx_buf, int words, int fs) +{ + u32 *buf_32 = rx_buf; + int k; + + for (k = 0; k < words; k++) + buf_32[k] = sh_msiof_read(p, RFDR) >> fs; +} + +static void sh_msiof_spi_read_fifo_32u(struct sh_msiof_spi_priv *p, + void *rx_buf, int words, int fs) +{ + u32 *buf_32 = rx_buf; + int k; + + for (k = 0; k < words; k++) + put_unaligned(sh_msiof_read(p, RFDR) >> fs, &buf_32[k]); +} + +static void sh_msiof_spi_read_fifo_s32(struct sh_msiof_spi_priv *p, + void *rx_buf, int words, int fs) +{ + u32 *buf_32 = rx_buf; + int k; + + for (k = 0; k < words; k++) + buf_32[k] = swab32(sh_msiof_read(p, RFDR) >> fs); +} + +static void sh_msiof_spi_read_fifo_s32u(struct sh_msiof_spi_priv *p, + void *rx_buf, int words, int fs) +{ + u32 *buf_32 = rx_buf; + int k; + + for (k = 0; k < words; k++) + put_unaligned(swab32(sh_msiof_read(p, RFDR) >> fs), &buf_32[k]); +} + +static int sh_msiof_spi_bits(struct spi_device *spi, struct spi_transfer *t) +{ + int bits; + + bits = t ? t->bits_per_word : 0; + if (!bits) + bits = spi->bits_per_word; + return bits; +} + +static unsigned long sh_msiof_spi_hz(struct spi_device *spi, + struct spi_transfer *t) +{ + unsigned long hz; + + hz = t ? t->speed_hz : 0; + if (!hz) + hz = spi->max_speed_hz; + return hz; +} + +static int sh_msiof_spi_setup_transfer(struct spi_device *spi, + struct spi_transfer *t) +{ + int bits; + + /* noting to check hz values against since parent clock is disabled */ + + bits = sh_msiof_spi_bits(spi, t); + if (bits < 8) + return -EINVAL; + if (bits > 32) + return -EINVAL; + + return spi_bitbang_setup_transfer(spi, t); +} + +static void sh_msiof_spi_chipselect(struct spi_device *spi, int is_on) +{ + struct sh_msiof_spi_priv *p = spi_master_get_devdata(spi->master); + int value; + + /* chip select is active low unless SPI_CS_HIGH is set */ + if (spi->mode & SPI_CS_HIGH) + value = (is_on == BITBANG_CS_ACTIVE) ? 1 : 0; + else + value = (is_on == BITBANG_CS_ACTIVE) ? 0 : 1; + + if (is_on == BITBANG_CS_ACTIVE) { + if (!test_and_set_bit(0, &p->flags)) { + pm_runtime_get_sync(&p->pdev->dev); + clk_enable(p->clk); + } + + /* Configure pins before asserting CS */ + sh_msiof_spi_set_pin_regs(p, !!(spi->mode & SPI_CPOL), + !!(spi->mode & SPI_CPHA), + !!(spi->mode & SPI_3WIRE), + !!(spi->mode & SPI_LSB_FIRST)); + } + + /* use spi->controller data for CS (same strategy as spi_gpio) */ + gpio_set_value((unsigned)spi->controller_data, value); + + if (is_on == BITBANG_CS_INACTIVE) { + if (test_and_clear_bit(0, &p->flags)) { + clk_disable(p->clk); + pm_runtime_put(&p->pdev->dev); + } + } +} + +static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p, + void (*tx_fifo)(struct sh_msiof_spi_priv *, + const void *, int, int), + void (*rx_fifo)(struct sh_msiof_spi_priv *, + void *, int, int), + const void *tx_buf, void *rx_buf, + int words, int bits) +{ + int fifo_shift; + int ret; + + /* limit maximum word transfer to rx/tx fifo size */ + if (tx_buf) + words = min_t(int, words, p->tx_fifo_size); + if (rx_buf) + words = min_t(int, words, p->rx_fifo_size); + + /* the fifo contents need shifting */ + fifo_shift = 32 - bits; + + /* setup msiof transfer mode registers */ + sh_msiof_spi_set_mode_regs(p, tx_buf, rx_buf, bits, words); + + /* write tx fifo */ + if (tx_buf) + tx_fifo(p, tx_buf, words, fifo_shift); + + /* setup clock and rx/tx signals */ + ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TSCKE); + if (rx_buf) + ret = ret ? ret : sh_msiof_modify_ctr_wait(p, 0, CTR_RXE); + ret = ret ? ret : sh_msiof_modify_ctr_wait(p, 0, CTR_TXE); + + /* start by setting frame bit */ + INIT_COMPLETION(p->done); + ret = ret ? ret : sh_msiof_modify_ctr_wait(p, 0, CTR_TFSE); + if (ret) { + dev_err(&p->pdev->dev, "failed to start hardware\n"); + goto err; + } + + /* wait for tx fifo to be emptied / rx fifo to be filled */ + wait_for_completion(&p->done); + + /* read rx fifo */ + if (rx_buf) + rx_fifo(p, rx_buf, words, fifo_shift); + + /* clear status bits */ + sh_msiof_reset_str(p); + + /* shut down frame, tx/tx and clock signals */ + ret = sh_msiof_modify_ctr_wait(p, CTR_TFSE, 0); + ret = ret ? ret : sh_msiof_modify_ctr_wait(p, CTR_TXE, 0); + if (rx_buf) + ret = ret ? ret : sh_msiof_modify_ctr_wait(p, CTR_RXE, 0); + ret = ret ? ret : sh_msiof_modify_ctr_wait(p, CTR_TSCKE, 0); + if (ret) { + dev_err(&p->pdev->dev, "failed to shut down hardware\n"); + goto err; + } + + return words; + + err: + sh_msiof_write(p, IER, 0); + return ret; +} + +static int sh_msiof_spi_txrx(struct spi_device *spi, struct spi_transfer *t) +{ + struct sh_msiof_spi_priv *p = spi_master_get_devdata(spi->master); + void (*tx_fifo)(struct sh_msiof_spi_priv *, const void *, int, int); + void (*rx_fifo)(struct sh_msiof_spi_priv *, void *, int, int); + int bits; + int bytes_per_word; + int bytes_done; + int words; + int n; + bool swab; + + bits = sh_msiof_spi_bits(spi, t); + + if (bits <= 8 && t->len > 15 && !(t->len & 3)) { + bits = 32; + swab = true; + } else { + swab = false; + } + + /* setup bytes per word and fifo read/write functions */ + if (bits <= 8) { + bytes_per_word = 1; + tx_fifo = sh_msiof_spi_write_fifo_8; + rx_fifo = sh_msiof_spi_read_fifo_8; + } else if (bits <= 16) { + bytes_per_word = 2; + if ((unsigned long)t->tx_buf & 0x01) + tx_fifo = sh_msiof_spi_write_fifo_16u; + else + tx_fifo = sh_msiof_spi_write_fifo_16; + + if ((unsigned long)t->rx_buf & 0x01) + rx_fifo = sh_msiof_spi_read_fifo_16u; + else + rx_fifo = sh_msiof_spi_read_fifo_16; + } else if (swab) { + bytes_per_word = 4; + if ((unsigned long)t->tx_buf & 0x03) + tx_fifo = sh_msiof_spi_write_fifo_s32u; + else + tx_fifo = sh_msiof_spi_write_fifo_s32; + + if ((unsigned long)t->rx_buf & 0x03) + rx_fifo = sh_msiof_spi_read_fifo_s32u; + else + rx_fifo = sh_msiof_spi_read_fifo_s32; + } else { + bytes_per_word = 4; + if ((unsigned long)t->tx_buf & 0x03) + tx_fifo = sh_msiof_spi_write_fifo_32u; + else + tx_fifo = sh_msiof_spi_write_fifo_32; + + if ((unsigned long)t->rx_buf & 0x03) + rx_fifo = sh_msiof_spi_read_fifo_32u; + else + rx_fifo = sh_msiof_spi_read_fifo_32; + } + + /* setup clocks (clock already enabled in chipselect()) */ + sh_msiof_spi_set_clk_regs(p, clk_get_rate(p->clk), + sh_msiof_spi_hz(spi, t)); + + /* transfer in fifo sized chunks */ + words = t->len / bytes_per_word; + bytes_done = 0; + + while (bytes_done < t->len) { + void *rx_buf = t->rx_buf ? t->rx_buf + bytes_done : NULL; + const void *tx_buf = t->tx_buf ? t->tx_buf + bytes_done : NULL; + n = sh_msiof_spi_txrx_once(p, tx_fifo, rx_fifo, + tx_buf, + rx_buf, + words, bits); + if (n < 0) + break; + + bytes_done += n * bytes_per_word; + words -= n; + } + + return bytes_done; +} + +static u32 sh_msiof_spi_txrx_word(struct spi_device *spi, unsigned nsecs, + u32 word, u8 bits) +{ + BUG(); /* unused but needed by bitbang code */ + return 0; +} + +static int sh_msiof_spi_probe(struct platform_device *pdev) +{ + struct resource *r; + struct spi_master *master; + struct sh_msiof_spi_priv *p; + char clk_name[16]; + int i; + int ret; + + master = spi_alloc_master(&pdev->dev, sizeof(struct sh_msiof_spi_priv)); + if (master == NULL) { + dev_err(&pdev->dev, "failed to allocate spi master\n"); + ret = -ENOMEM; + goto err0; + } + + p = spi_master_get_devdata(master); + + platform_set_drvdata(pdev, p); + p->info = pdev->dev.platform_data; + init_completion(&p->done); + + snprintf(clk_name, sizeof(clk_name), "msiof%d", pdev->id); + p->clk = clk_get(&pdev->dev, clk_name); + if (IS_ERR(p->clk)) { + dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name); + ret = PTR_ERR(p->clk); + goto err1; + } + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + i = platform_get_irq(pdev, 0); + if (!r || i < 0) { + dev_err(&pdev->dev, "cannot get platform resources\n"); + ret = -ENOENT; + goto err2; + } + p->mapbase = ioremap_nocache(r->start, resource_size(r)); + if (!p->mapbase) { + dev_err(&pdev->dev, "unable to ioremap\n"); + ret = -ENXIO; + goto err2; + } + + ret = request_irq(i, sh_msiof_spi_irq, IRQF_DISABLED, + dev_name(&pdev->dev), p); + if (ret) { + dev_err(&pdev->dev, "unable to request irq\n"); + goto err3; + } + + p->pdev = pdev; + pm_runtime_enable(&pdev->dev); + + /* The standard version of MSIOF use 64 word FIFOs */ + p->tx_fifo_size = 64; + p->rx_fifo_size = 64; + + /* Platform data may override FIFO sizes */ + if (p->info->tx_fifo_override) + p->tx_fifo_size = p->info->tx_fifo_override; + if (p->info->rx_fifo_override) + p->rx_fifo_size = p->info->rx_fifo_override; + + /* init master and bitbang code */ + master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; + master->mode_bits |= SPI_LSB_FIRST | SPI_3WIRE; + master->flags = 0; + master->bus_num = pdev->id; + master->num_chipselect = p->info->num_chipselect; + master->setup = spi_bitbang_setup; + master->cleanup = spi_bitbang_cleanup; + + p->bitbang.master = master; + p->bitbang.chipselect = sh_msiof_spi_chipselect; + p->bitbang.setup_transfer = sh_msiof_spi_setup_transfer; + p->bitbang.txrx_bufs = sh_msiof_spi_txrx; + p->bitbang.txrx_word[SPI_MODE_0] = sh_msiof_spi_txrx_word; + p->bitbang.txrx_word[SPI_MODE_1] = sh_msiof_spi_txrx_word; + p->bitbang.txrx_word[SPI_MODE_2] = sh_msiof_spi_txrx_word; + p->bitbang.txrx_word[SPI_MODE_3] = sh_msiof_spi_txrx_word; + + ret = spi_bitbang_start(&p->bitbang); + if (ret == 0) + return 0; + + pm_runtime_disable(&pdev->dev); + err3: + iounmap(p->mapbase); + err2: + clk_put(p->clk); + err1: + spi_master_put(master); + err0: + return ret; +} + +static int sh_msiof_spi_remove(struct platform_device *pdev) +{ + struct sh_msiof_spi_priv *p = platform_get_drvdata(pdev); + int ret; + + ret = spi_bitbang_stop(&p->bitbang); + if (!ret) { + pm_runtime_disable(&pdev->dev); + free_irq(platform_get_irq(pdev, 0), p); + iounmap(p->mapbase); + clk_put(p->clk); + spi_master_put(p->bitbang.master); + } + return ret; +} + +static int sh_msiof_spi_runtime_nop(struct device *dev) +{ + /* Runtime PM callback shared between ->runtime_suspend() + * and ->runtime_resume(). Simply returns success. + * + * This driver re-initializes all registers after + * pm_runtime_get_sync() anyway so there is no need + * to save and restore registers here. + */ + return 0; +} + +static struct dev_pm_ops sh_msiof_spi_dev_pm_ops = { + .runtime_suspend = sh_msiof_spi_runtime_nop, + .runtime_resume = sh_msiof_spi_runtime_nop, +}; + +static struct platform_driver sh_msiof_spi_drv = { + .probe = sh_msiof_spi_probe, + .remove = sh_msiof_spi_remove, + .driver = { + .name = "spi_sh_msiof", + .owner = THIS_MODULE, + .pm = &sh_msiof_spi_dev_pm_ops, + }, +}; + +static int __init sh_msiof_spi_init(void) +{ + return platform_driver_register(&sh_msiof_spi_drv); +} +module_init(sh_msiof_spi_init); + +static void __exit sh_msiof_spi_exit(void) +{ + platform_driver_unregister(&sh_msiof_spi_drv); +} +module_exit(sh_msiof_spi_exit); + +MODULE_DESCRIPTION("SuperH MSIOF SPI Master Interface Driver"); +MODULE_AUTHOR("Magnus Damm"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:spi_sh_msiof"); diff --git a/drivers/spi/spi-sh-sci.c b/drivers/spi/spi-sh-sci.c new file mode 100644 index 0000000..e7779c0 --- /dev/null +++ b/drivers/spi/spi-sh-sci.c @@ -0,0 +1,205 @@ +/* + * SH SCI SPI interface + * + * Copyright (c) 2008 Magnus Damm + * + * Based on S3C24XX GPIO based SPI driver, which is: + * Copyright (c) 2006 Ben Dooks + * Copyright (c) 2006 Simtec Electronics + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +struct sh_sci_spi { + struct spi_bitbang bitbang; + + void __iomem *membase; + unsigned char val; + struct sh_spi_info *info; + struct platform_device *dev; +}; + +#define SCSPTR(sp) (sp->membase + 0x1c) +#define PIN_SCK (1 << 2) +#define PIN_TXD (1 << 0) +#define PIN_RXD PIN_TXD +#define PIN_INIT ((1 << 1) | (1 << 3) | PIN_SCK | PIN_TXD) + +static inline void setbits(struct sh_sci_spi *sp, int bits, int on) +{ + /* + * We are the only user of SCSPTR so no locking is required. + * Reading bit 2 and 0 in SCSPTR gives pin state as input. + * Writing the same bits sets the output value. + * This makes regular read-modify-write difficult so we + * use sp->val to keep track of the latest register value. + */ + + if (on) + sp->val |= bits; + else + sp->val &= ~bits; + + iowrite8(sp->val, SCSPTR(sp)); +} + +static inline void setsck(struct spi_device *dev, int on) +{ + setbits(spi_master_get_devdata(dev->master), PIN_SCK, on); +} + +static inline void setmosi(struct spi_device *dev, int on) +{ + setbits(spi_master_get_devdata(dev->master), PIN_TXD, on); +} + +static inline u32 getmiso(struct spi_device *dev) +{ + struct sh_sci_spi *sp = spi_master_get_devdata(dev->master); + + return (ioread8(SCSPTR(sp)) & PIN_RXD) ? 1 : 0; +} + +#define spidelay(x) ndelay(x) + +#include "spi-bitbang-txrx.h" + +static u32 sh_sci_spi_txrx_mode0(struct spi_device *spi, + unsigned nsecs, u32 word, u8 bits) +{ + return bitbang_txrx_be_cpha0(spi, nsecs, 0, 0, word, bits); +} + +static u32 sh_sci_spi_txrx_mode1(struct spi_device *spi, + unsigned nsecs, u32 word, u8 bits) +{ + return bitbang_txrx_be_cpha1(spi, nsecs, 0, 0, word, bits); +} + +static u32 sh_sci_spi_txrx_mode2(struct spi_device *spi, + unsigned nsecs, u32 word, u8 bits) +{ + return bitbang_txrx_be_cpha0(spi, nsecs, 1, 0, word, bits); +} + +static u32 sh_sci_spi_txrx_mode3(struct spi_device *spi, + unsigned nsecs, u32 word, u8 bits) +{ + return bitbang_txrx_be_cpha1(spi, nsecs, 1, 0, word, bits); +} + +static void sh_sci_spi_chipselect(struct spi_device *dev, int value) +{ + struct sh_sci_spi *sp = spi_master_get_devdata(dev->master); + + if (sp->info && sp->info->chip_select) + (sp->info->chip_select)(sp->info, dev->chip_select, value); +} + +static int sh_sci_spi_probe(struct platform_device *dev) +{ + struct resource *r; + struct spi_master *master; + struct sh_sci_spi *sp; + int ret; + + master = spi_alloc_master(&dev->dev, sizeof(struct sh_sci_spi)); + if (master == NULL) { + dev_err(&dev->dev, "failed to allocate spi master\n"); + ret = -ENOMEM; + goto err0; + } + + sp = spi_master_get_devdata(master); + + platform_set_drvdata(dev, sp); + sp->info = dev->dev.platform_data; + + /* setup spi bitbang adaptor */ + sp->bitbang.master = spi_master_get(master); + sp->bitbang.master->bus_num = sp->info->bus_num; + sp->bitbang.master->num_chipselect = sp->info->num_chipselect; + sp->bitbang.chipselect = sh_sci_spi_chipselect; + + sp->bitbang.txrx_word[SPI_MODE_0] = sh_sci_spi_txrx_mode0; + sp->bitbang.txrx_word[SPI_MODE_1] = sh_sci_spi_txrx_mode1; + sp->bitbang.txrx_word[SPI_MODE_2] = sh_sci_spi_txrx_mode2; + sp->bitbang.txrx_word[SPI_MODE_3] = sh_sci_spi_txrx_mode3; + + r = platform_get_resource(dev, IORESOURCE_MEM, 0); + if (r == NULL) { + ret = -ENOENT; + goto err1; + } + sp->membase = ioremap(r->start, resource_size(r)); + if (!sp->membase) { + ret = -ENXIO; + goto err1; + } + sp->val = ioread8(SCSPTR(sp)); + setbits(sp, PIN_INIT, 1); + + ret = spi_bitbang_start(&sp->bitbang); + if (!ret) + return 0; + + setbits(sp, PIN_INIT, 0); + iounmap(sp->membase); + err1: + spi_master_put(sp->bitbang.master); + err0: + return ret; +} + +static int sh_sci_spi_remove(struct platform_device *dev) +{ + struct sh_sci_spi *sp = platform_get_drvdata(dev); + + iounmap(sp->membase); + setbits(sp, PIN_INIT, 0); + spi_bitbang_stop(&sp->bitbang); + spi_master_put(sp->bitbang.master); + return 0; +} + +static struct platform_driver sh_sci_spi_drv = { + .probe = sh_sci_spi_probe, + .remove = sh_sci_spi_remove, + .driver = { + .name = "spi_sh_sci", + .owner = THIS_MODULE, + }, +}; + +static int __init sh_sci_spi_init(void) +{ + return platform_driver_register(&sh_sci_spi_drv); +} +module_init(sh_sci_spi_init); + +static void __exit sh_sci_spi_exit(void) +{ + platform_driver_unregister(&sh_sci_spi_drv); +} +module_exit(sh_sci_spi_exit); + +MODULE_DESCRIPTION("SH SCI SPI Driver"); +MODULE_AUTHOR("Magnus Damm "); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:spi_sh_sci"); diff --git a/drivers/spi/spi-sh.c b/drivers/spi/spi-sh.c new file mode 100644 index 0000000..9eedd71 --- /dev/null +++ b/drivers/spi/spi-sh.c @@ -0,0 +1,543 @@ +/* + * SH SPI bus driver + * + * Copyright (C) 2011 Renesas Solutions Corp. + * + * Based on pxa2xx_spi.c: + * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define SPI_SH_TBR 0x00 +#define SPI_SH_RBR 0x00 +#define SPI_SH_CR1 0x08 +#define SPI_SH_CR2 0x10 +#define SPI_SH_CR3 0x18 +#define SPI_SH_CR4 0x20 +#define SPI_SH_CR5 0x28 + +/* CR1 */ +#define SPI_SH_TBE 0x80 +#define SPI_SH_TBF 0x40 +#define SPI_SH_RBE 0x20 +#define SPI_SH_RBF 0x10 +#define SPI_SH_PFONRD 0x08 +#define SPI_SH_SSDB 0x04 +#define SPI_SH_SSD 0x02 +#define SPI_SH_SSA 0x01 + +/* CR2 */ +#define SPI_SH_RSTF 0x80 +#define SPI_SH_LOOPBK 0x40 +#define SPI_SH_CPOL 0x20 +#define SPI_SH_CPHA 0x10 +#define SPI_SH_L1M0 0x08 + +/* CR3 */ +#define SPI_SH_MAX_BYTE 0xFF + +/* CR4 */ +#define SPI_SH_TBEI 0x80 +#define SPI_SH_TBFI 0x40 +#define SPI_SH_RBEI 0x20 +#define SPI_SH_RBFI 0x10 +#define SPI_SH_WPABRT 0x04 +#define SPI_SH_SSS 0x01 + +/* CR8 */ +#define SPI_SH_P1L0 0x80 +#define SPI_SH_PP1L0 0x40 +#define SPI_SH_MUXI 0x20 +#define SPI_SH_MUXIRQ 0x10 + +#define SPI_SH_FIFO_SIZE 32 +#define SPI_SH_SEND_TIMEOUT (3 * HZ) +#define SPI_SH_RECEIVE_TIMEOUT (HZ >> 3) + +#undef DEBUG + +struct spi_sh_data { + void __iomem *addr; + int irq; + struct spi_master *master; + struct list_head queue; + struct workqueue_struct *workqueue; + struct work_struct ws; + unsigned long cr1; + wait_queue_head_t wait; + spinlock_t lock; +}; + +static void spi_sh_write(struct spi_sh_data *ss, unsigned long data, + unsigned long offset) +{ + writel(data, ss->addr + offset); +} + +static unsigned long spi_sh_read(struct spi_sh_data *ss, unsigned long offset) +{ + return readl(ss->addr + offset); +} + +static void spi_sh_set_bit(struct spi_sh_data *ss, unsigned long val, + unsigned long offset) +{ + unsigned long tmp; + + tmp = spi_sh_read(ss, offset); + tmp |= val; + spi_sh_write(ss, tmp, offset); +} + +static void spi_sh_clear_bit(struct spi_sh_data *ss, unsigned long val, + unsigned long offset) +{ + unsigned long tmp; + + tmp = spi_sh_read(ss, offset); + tmp &= ~val; + spi_sh_write(ss, tmp, offset); +} + +static void clear_fifo(struct spi_sh_data *ss) +{ + spi_sh_set_bit(ss, SPI_SH_RSTF, SPI_SH_CR2); + spi_sh_clear_bit(ss, SPI_SH_RSTF, SPI_SH_CR2); +} + +static int spi_sh_wait_receive_buffer(struct spi_sh_data *ss) +{ + int timeout = 100000; + + while (spi_sh_read(ss, SPI_SH_CR1) & SPI_SH_RBE) { + udelay(10); + if (timeout-- < 0) + return -ETIMEDOUT; + } + return 0; +} + +static int spi_sh_wait_write_buffer_empty(struct spi_sh_data *ss) +{ + int timeout = 100000; + + while (!(spi_sh_read(ss, SPI_SH_CR1) & SPI_SH_TBE)) { + udelay(10); + if (timeout-- < 0) + return -ETIMEDOUT; + } + return 0; +} + +static int spi_sh_send(struct spi_sh_data *ss, struct spi_message *mesg, + struct spi_transfer *t) +{ + int i, retval = 0; + int remain = t->len; + int cur_len; + unsigned char *data; + unsigned long tmp; + long ret; + + if (t->len) + spi_sh_set_bit(ss, SPI_SH_SSA, SPI_SH_CR1); + + data = (unsigned char *)t->tx_buf; + while (remain > 0) { + cur_len = min(SPI_SH_FIFO_SIZE, remain); + for (i = 0; i < cur_len && + !(spi_sh_read(ss, SPI_SH_CR4) & + SPI_SH_WPABRT) && + !(spi_sh_read(ss, SPI_SH_CR1) & SPI_SH_TBF); + i++) + spi_sh_write(ss, (unsigned long)data[i], SPI_SH_TBR); + + if (spi_sh_read(ss, SPI_SH_CR4) & SPI_SH_WPABRT) { + /* Abort SPI operation */ + spi_sh_set_bit(ss, SPI_SH_WPABRT, SPI_SH_CR4); + retval = -EIO; + break; + } + + cur_len = i; + + remain -= cur_len; + data += cur_len; + + if (remain > 0) { + ss->cr1 &= ~SPI_SH_TBE; + spi_sh_set_bit(ss, SPI_SH_TBE, SPI_SH_CR4); + ret = wait_event_interruptible_timeout(ss->wait, + ss->cr1 & SPI_SH_TBE, + SPI_SH_SEND_TIMEOUT); + if (ret == 0 && !(ss->cr1 & SPI_SH_TBE)) { + printk(KERN_ERR "%s: timeout\n", __func__); + return -ETIMEDOUT; + } + } + } + + if (list_is_last(&t->transfer_list, &mesg->transfers)) { + tmp = spi_sh_read(ss, SPI_SH_CR1); + tmp = tmp & ~(SPI_SH_SSD | SPI_SH_SSDB); + spi_sh_write(ss, tmp, SPI_SH_CR1); + spi_sh_set_bit(ss, SPI_SH_SSA, SPI_SH_CR1); + + ss->cr1 &= ~SPI_SH_TBE; + spi_sh_set_bit(ss, SPI_SH_TBE, SPI_SH_CR4); + ret = wait_event_interruptible_timeout(ss->wait, + ss->cr1 & SPI_SH_TBE, + SPI_SH_SEND_TIMEOUT); + if (ret == 0 && (ss->cr1 & SPI_SH_TBE)) { + printk(KERN_ERR "%s: timeout\n", __func__); + return -ETIMEDOUT; + } + } + + return retval; +} + +static int spi_sh_receive(struct spi_sh_data *ss, struct spi_message *mesg, + struct spi_transfer *t) +{ + int i; + int remain = t->len; + int cur_len; + unsigned char *data; + unsigned long tmp; + long ret; + + if (t->len > SPI_SH_MAX_BYTE) + spi_sh_write(ss, SPI_SH_MAX_BYTE, SPI_SH_CR3); + else + spi_sh_write(ss, t->len, SPI_SH_CR3); + + tmp = spi_sh_read(ss, SPI_SH_CR1); + tmp = tmp & ~(SPI_SH_SSD | SPI_SH_SSDB); + spi_sh_write(ss, tmp, SPI_SH_CR1); + spi_sh_set_bit(ss, SPI_SH_SSA, SPI_SH_CR1); + + spi_sh_wait_write_buffer_empty(ss); + + data = (unsigned char *)t->rx_buf; + while (remain > 0) { + if (remain >= SPI_SH_FIFO_SIZE) { + ss->cr1 &= ~SPI_SH_RBF; + spi_sh_set_bit(ss, SPI_SH_RBF, SPI_SH_CR4); + ret = wait_event_interruptible_timeout(ss->wait, + ss->cr1 & SPI_SH_RBF, + SPI_SH_RECEIVE_TIMEOUT); + if (ret == 0 && + spi_sh_read(ss, SPI_SH_CR1) & SPI_SH_RBE) { + printk(KERN_ERR "%s: timeout\n", __func__); + return -ETIMEDOUT; + } + } + + cur_len = min(SPI_SH_FIFO_SIZE, remain); + for (i = 0; i < cur_len; i++) { + if (spi_sh_wait_receive_buffer(ss)) + break; + data[i] = (unsigned char)spi_sh_read(ss, SPI_SH_RBR); + } + + remain -= cur_len; + data += cur_len; + } + + /* deassert CS when SPI is receiving. */ + if (t->len > SPI_SH_MAX_BYTE) { + clear_fifo(ss); + spi_sh_write(ss, 1, SPI_SH_CR3); + } else { + spi_sh_write(ss, 0, SPI_SH_CR3); + } + + return 0; +} + +static void spi_sh_work(struct work_struct *work) +{ + struct spi_sh_data *ss = container_of(work, struct spi_sh_data, ws); + struct spi_message *mesg; + struct spi_transfer *t; + unsigned long flags; + int ret; + + pr_debug("%s: enter\n", __func__); + + spin_lock_irqsave(&ss->lock, flags); + while (!list_empty(&ss->queue)) { + mesg = list_entry(ss->queue.next, struct spi_message, queue); + list_del_init(&mesg->queue); + + spin_unlock_irqrestore(&ss->lock, flags); + list_for_each_entry(t, &mesg->transfers, transfer_list) { + pr_debug("tx_buf = %p, rx_buf = %p\n", + t->tx_buf, t->rx_buf); + pr_debug("len = %d, delay_usecs = %d\n", + t->len, t->delay_usecs); + + if (t->tx_buf) { + ret = spi_sh_send(ss, mesg, t); + if (ret < 0) + goto error; + } + if (t->rx_buf) { + ret = spi_sh_receive(ss, mesg, t); + if (ret < 0) + goto error; + } + mesg->actual_length += t->len; + } + spin_lock_irqsave(&ss->lock, flags); + + mesg->status = 0; + mesg->complete(mesg->context); + } + + clear_fifo(ss); + spi_sh_set_bit(ss, SPI_SH_SSD, SPI_SH_CR1); + udelay(100); + + spi_sh_clear_bit(ss, SPI_SH_SSA | SPI_SH_SSDB | SPI_SH_SSD, + SPI_SH_CR1); + + clear_fifo(ss); + + spin_unlock_irqrestore(&ss->lock, flags); + + return; + + error: + mesg->status = ret; + mesg->complete(mesg->context); + + spi_sh_clear_bit(ss, SPI_SH_SSA | SPI_SH_SSDB | SPI_SH_SSD, + SPI_SH_CR1); + clear_fifo(ss); + +} + +static int spi_sh_setup(struct spi_device *spi) +{ + struct spi_sh_data *ss = spi_master_get_devdata(spi->master); + + if (!spi->bits_per_word) + spi->bits_per_word = 8; + + pr_debug("%s: enter\n", __func__); + + spi_sh_write(ss, 0xfe, SPI_SH_CR1); /* SPI sycle stop */ + spi_sh_write(ss, 0x00, SPI_SH_CR1); /* CR1 init */ + spi_sh_write(ss, 0x00, SPI_SH_CR3); /* CR3 init */ + + clear_fifo(ss); + + /* 1/8 clock */ + spi_sh_write(ss, spi_sh_read(ss, SPI_SH_CR2) | 0x07, SPI_SH_CR2); + udelay(10); + + return 0; +} + +static int spi_sh_transfer(struct spi_device *spi, struct spi_message *mesg) +{ + struct spi_sh_data *ss = spi_master_get_devdata(spi->master); + unsigned long flags; + + pr_debug("%s: enter\n", __func__); + pr_debug("\tmode = %02x\n", spi->mode); + + spin_lock_irqsave(&ss->lock, flags); + + mesg->actual_length = 0; + mesg->status = -EINPROGRESS; + + spi_sh_clear_bit(ss, SPI_SH_SSA, SPI_SH_CR1); + + list_add_tail(&mesg->queue, &ss->queue); + queue_work(ss->workqueue, &ss->ws); + + spin_unlock_irqrestore(&ss->lock, flags); + + return 0; +} + +static void spi_sh_cleanup(struct spi_device *spi) +{ + struct spi_sh_data *ss = spi_master_get_devdata(spi->master); + + pr_debug("%s: enter\n", __func__); + + spi_sh_clear_bit(ss, SPI_SH_SSA | SPI_SH_SSDB | SPI_SH_SSD, + SPI_SH_CR1); +} + +static irqreturn_t spi_sh_irq(int irq, void *_ss) +{ + struct spi_sh_data *ss = (struct spi_sh_data *)_ss; + unsigned long cr1; + + cr1 = spi_sh_read(ss, SPI_SH_CR1); + if (cr1 & SPI_SH_TBE) + ss->cr1 |= SPI_SH_TBE; + if (cr1 & SPI_SH_TBF) + ss->cr1 |= SPI_SH_TBF; + if (cr1 & SPI_SH_RBE) + ss->cr1 |= SPI_SH_RBE; + if (cr1 & SPI_SH_RBF) + ss->cr1 |= SPI_SH_RBF; + + if (ss->cr1) { + spi_sh_clear_bit(ss, ss->cr1, SPI_SH_CR4); + wake_up(&ss->wait); + } + + return IRQ_HANDLED; +} + +static int __devexit spi_sh_remove(struct platform_device *pdev) +{ + struct spi_sh_data *ss = dev_get_drvdata(&pdev->dev); + + spi_unregister_master(ss->master); + destroy_workqueue(ss->workqueue); + free_irq(ss->irq, ss); + iounmap(ss->addr); + + return 0; +} + +static int __devinit spi_sh_probe(struct platform_device *pdev) +{ + struct resource *res; + struct spi_master *master; + struct spi_sh_data *ss; + int ret, irq; + + /* get base addr */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (unlikely(res == NULL)) { + dev_err(&pdev->dev, "invalid resource\n"); + return -EINVAL; + } + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "platform_get_irq error\n"); + return -ENODEV; + } + + master = spi_alloc_master(&pdev->dev, sizeof(struct spi_sh_data)); + if (master == NULL) { + dev_err(&pdev->dev, "spi_alloc_master error.\n"); + return -ENOMEM; + } + + ss = spi_master_get_devdata(master); + dev_set_drvdata(&pdev->dev, ss); + + ss->irq = irq; + ss->master = master; + ss->addr = ioremap(res->start, resource_size(res)); + if (ss->addr == NULL) { + dev_err(&pdev->dev, "ioremap error.\n"); + ret = -ENOMEM; + goto error1; + } + INIT_LIST_HEAD(&ss->queue); + spin_lock_init(&ss->lock); + INIT_WORK(&ss->ws, spi_sh_work); + init_waitqueue_head(&ss->wait); + ss->workqueue = create_singlethread_workqueue( + dev_name(master->dev.parent)); + if (ss->workqueue == NULL) { + dev_err(&pdev->dev, "create workqueue error\n"); + ret = -EBUSY; + goto error2; + } + + ret = request_irq(irq, spi_sh_irq, IRQF_DISABLED, "spi_sh", ss); + if (ret < 0) { + dev_err(&pdev->dev, "request_irq error\n"); + goto error3; + } + + master->num_chipselect = 2; + master->bus_num = pdev->id; + master->setup = spi_sh_setup; + master->transfer = spi_sh_transfer; + master->cleanup = spi_sh_cleanup; + + ret = spi_register_master(master); + if (ret < 0) { + printk(KERN_ERR "spi_register_master error.\n"); + goto error4; + } + + return 0; + + error4: + free_irq(irq, ss); + error3: + destroy_workqueue(ss->workqueue); + error2: + iounmap(ss->addr); + error1: + spi_master_put(master); + + return ret; +} + +static struct platform_driver spi_sh_driver = { + .probe = spi_sh_probe, + .remove = __devexit_p(spi_sh_remove), + .driver = { + .name = "sh_spi", + .owner = THIS_MODULE, + }, +}; + +static int __init spi_sh_init(void) +{ + return platform_driver_register(&spi_sh_driver); +} +module_init(spi_sh_init); + +static void __exit spi_sh_exit(void) +{ + platform_driver_unregister(&spi_sh_driver); +} +module_exit(spi_sh_exit); + +MODULE_DESCRIPTION("SH SPI bus driver"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Yoshihiro Shimoda"); +MODULE_ALIAS("platform:sh_spi"); diff --git a/drivers/spi/spi-stmp.c b/drivers/spi/spi-stmp.c new file mode 100644 index 0000000..fadff76 --- /dev/null +++ b/drivers/spi/spi-stmp.c @@ -0,0 +1,679 @@ +/* + * Freescale STMP378X SPI master driver + * + * Author: dmitry pervushin + * + * Copyright 2008 Freescale Semiconductor, Inc. All Rights Reserved. + * Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved. + */ + +/* + * The code contained herein is licensed under the GNU General Public + * License. You may obtain a copy of the GNU General Public License + * Version 2 or later at the following locations: + * + * http://www.opensource.org/licenses/gpl-license.html + * http://www.gnu.org/copyleft/gpl.html + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + + +/* 0 means DMA mode(recommended, default), !0 - PIO mode */ +static int pio; +static int clock; + +/* default timeout for busy waits is 2 seconds */ +#define STMP_SPI_TIMEOUT (2 * HZ) + +struct stmp_spi { + int id; + + void * __iomem regs; /* vaddr of the control registers */ + + int irq, err_irq; + u32 dma; + struct stmp3xxx_dma_descriptor d; + + u32 speed_khz; + u32 saved_timings; + u32 divider; + + struct clk *clk; + struct device *master_dev; + + struct work_struct work; + struct workqueue_struct *workqueue; + + /* lock protects queue access */ + spinlock_t lock; + struct list_head queue; + + struct completion done; +}; + +#define busy_wait(cond) \ + ({ \ + unsigned long end_jiffies = jiffies + STMP_SPI_TIMEOUT; \ + bool succeeded = false; \ + do { \ + if (cond) { \ + succeeded = true; \ + break; \ + } \ + cpu_relax(); \ + } while (time_before(jiffies, end_jiffies)); \ + succeeded; \ + }) + +/** + * stmp_spi_init_hw + * Initialize the SSP port + */ +static int stmp_spi_init_hw(struct stmp_spi *ss) +{ + int err = 0; + void *pins = ss->master_dev->platform_data; + + err = stmp3xxx_request_pin_group(pins, dev_name(ss->master_dev)); + if (err) + goto out; + + ss->clk = clk_get(NULL, "ssp"); + if (IS_ERR(ss->clk)) { + err = PTR_ERR(ss->clk); + goto out_free_pins; + } + clk_enable(ss->clk); + + stmp3xxx_reset_block(ss->regs, false); + stmp3xxx_dma_reset_channel(ss->dma); + + return 0; + +out_free_pins: + stmp3xxx_release_pin_group(pins, dev_name(ss->master_dev)); +out: + return err; +} + +static void stmp_spi_release_hw(struct stmp_spi *ss) +{ + void *pins = ss->master_dev->platform_data; + + if (ss->clk && !IS_ERR(ss->clk)) { + clk_disable(ss->clk); + clk_put(ss->clk); + } + stmp3xxx_release_pin_group(pins, dev_name(ss->master_dev)); +} + +static int stmp_spi_setup_transfer(struct spi_device *spi, + struct spi_transfer *t) +{ + u8 bits_per_word; + u32 hz; + struct stmp_spi *ss = spi_master_get_devdata(spi->master); + u16 rate; + + bits_per_word = spi->bits_per_word; + if (t && t->bits_per_word) + bits_per_word = t->bits_per_word; + + /* + * Calculate speed: + * - by default, use maximum speed from ssp clk + * - if device overrides it, use it + * - if transfer specifies other speed, use transfer's one + */ + hz = 1000 * ss->speed_khz / ss->divider; + if (spi->max_speed_hz) + hz = min(hz, spi->max_speed_hz); + if (t && t->speed_hz) + hz = min(hz, t->speed_hz); + + if (hz == 0) { + dev_err(&spi->dev, "Cannot continue with zero clock\n"); + return -EINVAL; + } + + if (bits_per_word != 8) { + dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n", + __func__, bits_per_word); + return -EINVAL; + } + + dev_dbg(&spi->dev, "Requested clk rate = %uHz, max = %uHz/%d = %uHz\n", + hz, ss->speed_khz, ss->divider, + ss->speed_khz * 1000 / ss->divider); + + if (ss->speed_khz * 1000 / ss->divider < hz) { + dev_err(&spi->dev, "%s, unsupported clock rate %uHz\n", + __func__, hz); + return -EINVAL; + } + + rate = 1000 * ss->speed_khz/ss->divider/hz; + + writel(BF(ss->divider, SSP_TIMING_CLOCK_DIVIDE) | + BF(rate - 1, SSP_TIMING_CLOCK_RATE), + HW_SSP_TIMING + ss->regs); + + writel(BF(1 /* mode SPI */, SSP_CTRL1_SSP_MODE) | + BF(4 /* 8 bits */, SSP_CTRL1_WORD_LENGTH) | + ((spi->mode & SPI_CPOL) ? BM_SSP_CTRL1_POLARITY : 0) | + ((spi->mode & SPI_CPHA) ? BM_SSP_CTRL1_PHASE : 0) | + (pio ? 0 : BM_SSP_CTRL1_DMA_ENABLE), + ss->regs + HW_SSP_CTRL1); + + return 0; +} + +static int stmp_spi_setup(struct spi_device *spi) +{ + /* spi_setup() does basic checks, + * stmp_spi_setup_transfer() does more later + */ + if (spi->bits_per_word != 8) { + dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n", + __func__, spi->bits_per_word); + return -EINVAL; + } + return 0; +} + +static inline u32 stmp_spi_cs(unsigned cs) +{ + return ((cs & 1) ? BM_SSP_CTRL0_WAIT_FOR_CMD : 0) | + ((cs & 2) ? BM_SSP_CTRL0_WAIT_FOR_IRQ : 0); +} + +static int stmp_spi_txrx_dma(struct stmp_spi *ss, int cs, + unsigned char *buf, dma_addr_t dma_buf, int len, + int first, int last, bool write) +{ + u32 c0 = 0; + dma_addr_t spi_buf_dma = dma_buf; + int status = 0; + enum dma_data_direction dir = write ? DMA_TO_DEVICE : DMA_FROM_DEVICE; + + c0 |= (first ? BM_SSP_CTRL0_LOCK_CS : 0); + c0 |= (last ? BM_SSP_CTRL0_IGNORE_CRC : 0); + c0 |= (write ? 0 : BM_SSP_CTRL0_READ); + c0 |= BM_SSP_CTRL0_DATA_XFER; + + c0 |= stmp_spi_cs(cs); + + c0 |= BF(len, SSP_CTRL0_XFER_COUNT); + + if (!dma_buf) + spi_buf_dma = dma_map_single(ss->master_dev, buf, len, dir); + + ss->d.command->cmd = + BF(len, APBH_CHn_CMD_XFER_COUNT) | + BF(1, APBH_CHn_CMD_CMDWORDS) | + BM_APBH_CHn_CMD_WAIT4ENDCMD | + BM_APBH_CHn_CMD_IRQONCMPLT | + BF(write ? BV_APBH_CHn_CMD_COMMAND__DMA_READ : + BV_APBH_CHn_CMD_COMMAND__DMA_WRITE, + APBH_CHn_CMD_COMMAND); + ss->d.command->pio_words[0] = c0; + ss->d.command->buf_ptr = spi_buf_dma; + + stmp3xxx_dma_reset_channel(ss->dma); + stmp3xxx_dma_clear_interrupt(ss->dma); + stmp3xxx_dma_enable_interrupt(ss->dma); + init_completion(&ss->done); + stmp3xxx_dma_go(ss->dma, &ss->d, 1); + wait_for_completion(&ss->done); + + if (!busy_wait(readl(ss->regs + HW_SSP_CTRL0) & BM_SSP_CTRL0_RUN)) + status = -ETIMEDOUT; + + if (!dma_buf) + dma_unmap_single(ss->master_dev, spi_buf_dma, len, dir); + + return status; +} + +static inline void stmp_spi_enable(struct stmp_spi *ss) +{ + stmp3xxx_setl(BM_SSP_CTRL0_LOCK_CS, ss->regs + HW_SSP_CTRL0); + stmp3xxx_clearl(BM_SSP_CTRL0_IGNORE_CRC, ss->regs + HW_SSP_CTRL0); +} + +static inline void stmp_spi_disable(struct stmp_spi *ss) +{ + stmp3xxx_clearl(BM_SSP_CTRL0_LOCK_CS, ss->regs + HW_SSP_CTRL0); + stmp3xxx_setl(BM_SSP_CTRL0_IGNORE_CRC, ss->regs + HW_SSP_CTRL0); +} + +static int stmp_spi_txrx_pio(struct stmp_spi *ss, int cs, + unsigned char *buf, int len, + bool first, bool last, bool write) +{ + if (first) + stmp_spi_enable(ss); + + stmp3xxx_setl(stmp_spi_cs(cs), ss->regs + HW_SSP_CTRL0); + + while (len--) { + if (last && len <= 0) + stmp_spi_disable(ss); + + stmp3xxx_clearl(BM_SSP_CTRL0_XFER_COUNT, + ss->regs + HW_SSP_CTRL0); + stmp3xxx_setl(1, ss->regs + HW_SSP_CTRL0); + + if (write) + stmp3xxx_clearl(BM_SSP_CTRL0_READ, + ss->regs + HW_SSP_CTRL0); + else + stmp3xxx_setl(BM_SSP_CTRL0_READ, + ss->regs + HW_SSP_CTRL0); + + /* Run! */ + stmp3xxx_setl(BM_SSP_CTRL0_RUN, ss->regs + HW_SSP_CTRL0); + + if (!busy_wait(readl(ss->regs + HW_SSP_CTRL0) & + BM_SSP_CTRL0_RUN)) + break; + + if (write) + writel(*buf, ss->regs + HW_SSP_DATA); + + /* Set TRANSFER */ + stmp3xxx_setl(BM_SSP_CTRL0_DATA_XFER, ss->regs + HW_SSP_CTRL0); + + if (!write) { + if (busy_wait((readl(ss->regs + HW_SSP_STATUS) & + BM_SSP_STATUS_FIFO_EMPTY))) + break; + *buf = readl(ss->regs + HW_SSP_DATA) & 0xFF; + } + + if (!busy_wait(readl(ss->regs + HW_SSP_CTRL0) & + BM_SSP_CTRL0_RUN)) + break; + + /* advance to the next byte */ + buf++; + } + + return len < 0 ? 0 : -ETIMEDOUT; +} + +static int stmp_spi_handle_message(struct stmp_spi *ss, struct spi_message *m) +{ + bool first, last; + struct spi_transfer *t, *tmp_t; + int status = 0; + int cs; + + cs = m->spi->chip_select; + + list_for_each_entry_safe(t, tmp_t, &m->transfers, transfer_list) { + + first = (&t->transfer_list == m->transfers.next); + last = (&t->transfer_list == m->transfers.prev); + + if (first || t->speed_hz || t->bits_per_word) + stmp_spi_setup_transfer(m->spi, t); + + /* reject "not last" transfers which request to change cs */ + if (t->cs_change && !last) { + dev_err(&m->spi->dev, + "Message with t->cs_change has been skipped\n"); + continue; + } + + if (t->tx_buf) { + status = pio ? + stmp_spi_txrx_pio(ss, cs, (void *)t->tx_buf, + t->len, first, last, true) : + stmp_spi_txrx_dma(ss, cs, (void *)t->tx_buf, + t->tx_dma, t->len, first, last, true); +#ifdef DEBUG + if (t->len < 0x10) + print_hex_dump_bytes("Tx ", + DUMP_PREFIX_OFFSET, + t->tx_buf, t->len); + else + pr_debug("Tx: %d bytes\n", t->len); +#endif + } + if (t->rx_buf) { + status = pio ? + stmp_spi_txrx_pio(ss, cs, t->rx_buf, + t->len, first, last, false) : + stmp_spi_txrx_dma(ss, cs, t->rx_buf, + t->rx_dma, t->len, first, last, false); +#ifdef DEBUG + if (t->len < 0x10) + print_hex_dump_bytes("Rx ", + DUMP_PREFIX_OFFSET, + t->rx_buf, t->len); + else + pr_debug("Rx: %d bytes\n", t->len); +#endif + } + + if (t->delay_usecs) + udelay(t->delay_usecs); + + if (status) + break; + + } + return status; +} + +/** + * stmp_spi_handle - handle messages from the queue + */ +static void stmp_spi_handle(struct work_struct *w) +{ + struct stmp_spi *ss = container_of(w, struct stmp_spi, work); + unsigned long flags; + struct spi_message *m; + + spin_lock_irqsave(&ss->lock, flags); + while (!list_empty(&ss->queue)) { + m = list_entry(ss->queue.next, struct spi_message, queue); + list_del_init(&m->queue); + spin_unlock_irqrestore(&ss->lock, flags); + + m->status = stmp_spi_handle_message(ss, m); + m->complete(m->context); + + spin_lock_irqsave(&ss->lock, flags); + } + spin_unlock_irqrestore(&ss->lock, flags); + + return; +} + +/** + * stmp_spi_transfer - perform message transfer. + * Called indirectly from spi_async, queues all the messages to + * spi_handle_message. + * @spi: spi device + * @m: message to be queued + */ +static int stmp_spi_transfer(struct spi_device *spi, struct spi_message *m) +{ + struct stmp_spi *ss = spi_master_get_devdata(spi->master); + unsigned long flags; + + m->status = -EINPROGRESS; + spin_lock_irqsave(&ss->lock, flags); + list_add_tail(&m->queue, &ss->queue); + queue_work(ss->workqueue, &ss->work); + spin_unlock_irqrestore(&ss->lock, flags); + return 0; +} + +static irqreturn_t stmp_spi_irq(int irq, void *dev_id) +{ + struct stmp_spi *ss = dev_id; + + stmp3xxx_dma_clear_interrupt(ss->dma); + complete(&ss->done); + return IRQ_HANDLED; +} + +static irqreturn_t stmp_spi_irq_err(int irq, void *dev_id) +{ + struct stmp_spi *ss = dev_id; + u32 c1, st; + + c1 = readl(ss->regs + HW_SSP_CTRL1); + st = readl(ss->regs + HW_SSP_STATUS); + dev_err(ss->master_dev, "%s: status = 0x%08X, c1 = 0x%08X\n", + __func__, st, c1); + stmp3xxx_clearl(c1 & 0xCCCC0000, ss->regs + HW_SSP_CTRL1); + + return IRQ_HANDLED; +} + +static int __devinit stmp_spi_probe(struct platform_device *dev) +{ + int err = 0; + struct spi_master *master; + struct stmp_spi *ss; + struct resource *r; + + master = spi_alloc_master(&dev->dev, sizeof(struct stmp_spi)); + if (master == NULL) { + err = -ENOMEM; + goto out0; + } + master->flags = SPI_MASTER_HALF_DUPLEX; + + ss = spi_master_get_devdata(master); + platform_set_drvdata(dev, master); + + /* Get resources(memory, IRQ) associated with the device */ + r = platform_get_resource(dev, IORESOURCE_MEM, 0); + if (r == NULL) { + err = -ENODEV; + goto out_put_master; + } + ss->regs = ioremap(r->start, resource_size(r)); + if (!ss->regs) { + err = -EINVAL; + goto out_put_master; + } + + ss->master_dev = &dev->dev; + ss->id = dev->id; + + INIT_WORK(&ss->work, stmp_spi_handle); + INIT_LIST_HEAD(&ss->queue); + spin_lock_init(&ss->lock); + + ss->workqueue = create_singlethread_workqueue(dev_name(&dev->dev)); + if (!ss->workqueue) { + err = -ENXIO; + goto out_put_master; + } + master->transfer = stmp_spi_transfer; + master->setup = stmp_spi_setup; + + /* the spi->mode bits understood by this driver: */ + master->mode_bits = SPI_CPOL | SPI_CPHA; + + ss->irq = platform_get_irq(dev, 0); + if (ss->irq < 0) { + err = ss->irq; + goto out_put_master; + } + ss->err_irq = platform_get_irq(dev, 1); + if (ss->err_irq < 0) { + err = ss->err_irq; + goto out_put_master; + } + + r = platform_get_resource(dev, IORESOURCE_DMA, 0); + if (r == NULL) { + err = -ENODEV; + goto out_put_master; + } + + ss->dma = r->start; + err = stmp3xxx_dma_request(ss->dma, &dev->dev, dev_name(&dev->dev)); + if (err) + goto out_put_master; + + err = stmp3xxx_dma_allocate_command(ss->dma, &ss->d); + if (err) + goto out_free_dma; + + master->bus_num = dev->id; + master->num_chipselect = 1; + + /* SPI controller initializations */ + err = stmp_spi_init_hw(ss); + if (err) { + dev_dbg(&dev->dev, "cannot initialize hardware\n"); + goto out_free_dma_desc; + } + + if (clock) { + dev_info(&dev->dev, "clock rate forced to %d\n", clock); + clk_set_rate(ss->clk, clock); + } + ss->speed_khz = clk_get_rate(ss->clk); + ss->divider = 2; + dev_info(&dev->dev, "max possible speed %d = %ld/%d kHz\n", + ss->speed_khz, clk_get_rate(ss->clk), ss->divider); + + /* Register for SPI interrupt */ + err = request_irq(ss->irq, stmp_spi_irq, 0, + dev_name(&dev->dev), ss); + if (err) { + dev_dbg(&dev->dev, "request_irq failed, %d\n", err); + goto out_release_hw; + } + + /* ..and shared interrupt for all SSP controllers */ + err = request_irq(ss->err_irq, stmp_spi_irq_err, IRQF_SHARED, + dev_name(&dev->dev), ss); + if (err) { + dev_dbg(&dev->dev, "request_irq(error) failed, %d\n", err); + goto out_free_irq; + } + + err = spi_register_master(master); + if (err) { + dev_dbg(&dev->dev, "cannot register spi master, %d\n", err); + goto out_free_irq_2; + } + dev_info(&dev->dev, "at (mapped) 0x%08X, irq=%d, bus %d, %s mode\n", + (u32)ss->regs, ss->irq, master->bus_num, + pio ? "PIO" : "DMA"); + return 0; + +out_free_irq_2: + free_irq(ss->err_irq, ss); +out_free_irq: + free_irq(ss->irq, ss); +out_free_dma_desc: + stmp3xxx_dma_free_command(ss->dma, &ss->d); +out_free_dma: + stmp3xxx_dma_release(ss->dma); +out_release_hw: + stmp_spi_release_hw(ss); +out_put_master: + if (ss->workqueue) + destroy_workqueue(ss->workqueue); + if (ss->regs) + iounmap(ss->regs); + platform_set_drvdata(dev, NULL); + spi_master_put(master); +out0: + return err; +} + +static int __devexit stmp_spi_remove(struct platform_device *dev) +{ + struct stmp_spi *ss; + struct spi_master *master; + + master = platform_get_drvdata(dev); + if (master == NULL) + goto out0; + ss = spi_master_get_devdata(master); + + spi_unregister_master(master); + + free_irq(ss->err_irq, ss); + free_irq(ss->irq, ss); + stmp3xxx_dma_free_command(ss->dma, &ss->d); + stmp3xxx_dma_release(ss->dma); + stmp_spi_release_hw(ss); + destroy_workqueue(ss->workqueue); + iounmap(ss->regs); + spi_master_put(master); + platform_set_drvdata(dev, NULL); +out0: + return 0; +} + +#ifdef CONFIG_PM +static int stmp_spi_suspend(struct platform_device *pdev, pm_message_t pmsg) +{ + struct stmp_spi *ss; + struct spi_master *master; + + master = platform_get_drvdata(pdev); + ss = spi_master_get_devdata(master); + + ss->saved_timings = readl(HW_SSP_TIMING + ss->regs); + clk_disable(ss->clk); + + return 0; +} + +static int stmp_spi_resume(struct platform_device *pdev) +{ + struct stmp_spi *ss; + struct spi_master *master; + + master = platform_get_drvdata(pdev); + ss = spi_master_get_devdata(master); + + clk_enable(ss->clk); + stmp3xxx_reset_block(ss->regs, false); + writel(ss->saved_timings, ss->regs + HW_SSP_TIMING); + + return 0; +} + +#else +#define stmp_spi_suspend NULL +#define stmp_spi_resume NULL +#endif + +static struct platform_driver stmp_spi_driver = { + .probe = stmp_spi_probe, + .remove = __devexit_p(stmp_spi_remove), + .driver = { + .name = "stmp3xxx_ssp", + .owner = THIS_MODULE, + }, + .suspend = stmp_spi_suspend, + .resume = stmp_spi_resume, +}; + +static int __init stmp_spi_init(void) +{ + return platform_driver_register(&stmp_spi_driver); +} + +static void __exit stmp_spi_exit(void) +{ + platform_driver_unregister(&stmp_spi_driver); +} + +module_init(stmp_spi_init); +module_exit(stmp_spi_exit); +module_param(pio, int, S_IRUGO); +module_param(clock, int, S_IRUGO); +MODULE_AUTHOR("dmitry pervushin "); +MODULE_DESCRIPTION("STMP3xxx SPI/SSP driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-tegra.c b/drivers/spi/spi-tegra.c new file mode 100644 index 0000000..6c3aa6e --- /dev/null +++ b/drivers/spi/spi-tegra.c @@ -0,0 +1,618 @@ +/* + * Driver for Nvidia TEGRA spi controller. + * + * Copyright (C) 2010 Google, Inc. + * + * Author: + * Erik Gilling + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +#define SLINK_COMMAND 0x000 +#define SLINK_BIT_LENGTH(x) (((x) & 0x1f) << 0) +#define SLINK_WORD_SIZE(x) (((x) & 0x1f) << 5) +#define SLINK_BOTH_EN (1 << 10) +#define SLINK_CS_SW (1 << 11) +#define SLINK_CS_VALUE (1 << 12) +#define SLINK_CS_POLARITY (1 << 13) +#define SLINK_IDLE_SDA_DRIVE_LOW (0 << 16) +#define SLINK_IDLE_SDA_DRIVE_HIGH (1 << 16) +#define SLINK_IDLE_SDA_PULL_LOW (2 << 16) +#define SLINK_IDLE_SDA_PULL_HIGH (3 << 16) +#define SLINK_IDLE_SDA_MASK (3 << 16) +#define SLINK_CS_POLARITY1 (1 << 20) +#define SLINK_CK_SDA (1 << 21) +#define SLINK_CS_POLARITY2 (1 << 22) +#define SLINK_CS_POLARITY3 (1 << 23) +#define SLINK_IDLE_SCLK_DRIVE_LOW (0 << 24) +#define SLINK_IDLE_SCLK_DRIVE_HIGH (1 << 24) +#define SLINK_IDLE_SCLK_PULL_LOW (2 << 24) +#define SLINK_IDLE_SCLK_PULL_HIGH (3 << 24) +#define SLINK_IDLE_SCLK_MASK (3 << 24) +#define SLINK_M_S (1 << 28) +#define SLINK_WAIT (1 << 29) +#define SLINK_GO (1 << 30) +#define SLINK_ENB (1 << 31) + +#define SLINK_COMMAND2 0x004 +#define SLINK_LSBFE (1 << 0) +#define SLINK_SSOE (1 << 1) +#define SLINK_SPIE (1 << 4) +#define SLINK_BIDIROE (1 << 6) +#define SLINK_MODFEN (1 << 7) +#define SLINK_INT_SIZE(x) (((x) & 0x1f) << 8) +#define SLINK_CS_ACTIVE_BETWEEN (1 << 17) +#define SLINK_SS_EN_CS(x) (((x) & 0x3) << 18) +#define SLINK_SS_SETUP(x) (((x) & 0x3) << 20) +#define SLINK_FIFO_REFILLS_0 (0 << 22) +#define SLINK_FIFO_REFILLS_1 (1 << 22) +#define SLINK_FIFO_REFILLS_2 (2 << 22) +#define SLINK_FIFO_REFILLS_3 (3 << 22) +#define SLINK_FIFO_REFILLS_MASK (3 << 22) +#define SLINK_WAIT_PACK_INT(x) (((x) & 0x7) << 26) +#define SLINK_SPC0 (1 << 29) +#define SLINK_TXEN (1 << 30) +#define SLINK_RXEN (1 << 31) + +#define SLINK_STATUS 0x008 +#define SLINK_COUNT(val) (((val) >> 0) & 0x1f) +#define SLINK_WORD(val) (((val) >> 5) & 0x1f) +#define SLINK_BLK_CNT(val) (((val) >> 0) & 0xffff) +#define SLINK_MODF (1 << 16) +#define SLINK_RX_UNF (1 << 18) +#define SLINK_TX_OVF (1 << 19) +#define SLINK_TX_FULL (1 << 20) +#define SLINK_TX_EMPTY (1 << 21) +#define SLINK_RX_FULL (1 << 22) +#define SLINK_RX_EMPTY (1 << 23) +#define SLINK_TX_UNF (1 << 24) +#define SLINK_RX_OVF (1 << 25) +#define SLINK_TX_FLUSH (1 << 26) +#define SLINK_RX_FLUSH (1 << 27) +#define SLINK_SCLK (1 << 28) +#define SLINK_ERR (1 << 29) +#define SLINK_RDY (1 << 30) +#define SLINK_BSY (1 << 31) + +#define SLINK_MAS_DATA 0x010 +#define SLINK_SLAVE_DATA 0x014 + +#define SLINK_DMA_CTL 0x018 +#define SLINK_DMA_BLOCK_SIZE(x) (((x) & 0xffff) << 0) +#define SLINK_TX_TRIG_1 (0 << 16) +#define SLINK_TX_TRIG_4 (1 << 16) +#define SLINK_TX_TRIG_8 (2 << 16) +#define SLINK_TX_TRIG_16 (3 << 16) +#define SLINK_TX_TRIG_MASK (3 << 16) +#define SLINK_RX_TRIG_1 (0 << 18) +#define SLINK_RX_TRIG_4 (1 << 18) +#define SLINK_RX_TRIG_8 (2 << 18) +#define SLINK_RX_TRIG_16 (3 << 18) +#define SLINK_RX_TRIG_MASK (3 << 18) +#define SLINK_PACKED (1 << 20) +#define SLINK_PACK_SIZE_4 (0 << 21) +#define SLINK_PACK_SIZE_8 (1 << 21) +#define SLINK_PACK_SIZE_16 (2 << 21) +#define SLINK_PACK_SIZE_32 (3 << 21) +#define SLINK_PACK_SIZE_MASK (3 << 21) +#define SLINK_IE_TXC (1 << 26) +#define SLINK_IE_RXC (1 << 27) +#define SLINK_DMA_EN (1 << 31) + +#define SLINK_STATUS2 0x01c +#define SLINK_TX_FIFO_EMPTY_COUNT(val) (((val) & 0x3f) >> 0) +#define SLINK_RX_FIFO_FULL_COUNT(val) (((val) & 0x3f) >> 16) + +#define SLINK_TX_FIFO 0x100 +#define SLINK_RX_FIFO 0x180 + +static const unsigned long spi_tegra_req_sels[] = { + TEGRA_DMA_REQ_SEL_SL2B1, + TEGRA_DMA_REQ_SEL_SL2B2, + TEGRA_DMA_REQ_SEL_SL2B3, + TEGRA_DMA_REQ_SEL_SL2B4, +}; + +#define BB_LEN 32 + +struct spi_tegra_data { + struct spi_master *master; + struct platform_device *pdev; + spinlock_t lock; + + struct clk *clk; + void __iomem *base; + unsigned long phys; + + u32 cur_speed; + + struct list_head queue; + struct spi_transfer *cur; + unsigned cur_pos; + unsigned cur_len; + unsigned cur_bytes_per_word; + + /* The tegra spi controller has a bug which causes the first word + * in PIO transactions to be garbage. Since packed DMA transactions + * require transfers to be 4 byte aligned we need a bounce buffer + * for the generic case. + */ + struct tegra_dma_req rx_dma_req; + struct tegra_dma_channel *rx_dma; + u32 *rx_bb; + dma_addr_t rx_bb_phys; +}; + + +static inline unsigned long spi_tegra_readl(struct spi_tegra_data *tspi, + unsigned long reg) +{ + return readl(tspi->base + reg); +} + +static inline void spi_tegra_writel(struct spi_tegra_data *tspi, + unsigned long val, + unsigned long reg) +{ + writel(val, tspi->base + reg); +} + +static void spi_tegra_go(struct spi_tegra_data *tspi) +{ + unsigned long val; + + wmb(); + + val = spi_tegra_readl(tspi, SLINK_DMA_CTL); + val &= ~SLINK_DMA_BLOCK_SIZE(~0) & ~SLINK_DMA_EN; + val |= SLINK_DMA_BLOCK_SIZE(tspi->rx_dma_req.size / 4 - 1); + spi_tegra_writel(tspi, val, SLINK_DMA_CTL); + + tegra_dma_enqueue_req(tspi->rx_dma, &tspi->rx_dma_req); + + val |= SLINK_DMA_EN; + spi_tegra_writel(tspi, val, SLINK_DMA_CTL); +} + +static unsigned spi_tegra_fill_tx_fifo(struct spi_tegra_data *tspi, + struct spi_transfer *t) +{ + unsigned len = min(t->len - tspi->cur_pos, BB_LEN * + tspi->cur_bytes_per_word); + u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_pos; + int i, j; + unsigned long val; + + val = spi_tegra_readl(tspi, SLINK_COMMAND); + val &= ~SLINK_WORD_SIZE(~0); + val |= SLINK_WORD_SIZE(len / tspi->cur_bytes_per_word - 1); + spi_tegra_writel(tspi, val, SLINK_COMMAND); + + for (i = 0; i < len; i += tspi->cur_bytes_per_word) { + val = 0; + for (j = 0; j < tspi->cur_bytes_per_word; j++) + val |= tx_buf[i + j] << j * 8; + + spi_tegra_writel(tspi, val, SLINK_TX_FIFO); + } + + tspi->rx_dma_req.size = len / tspi->cur_bytes_per_word * 4; + + return len; +} + +static unsigned spi_tegra_drain_rx_fifo(struct spi_tegra_data *tspi, + struct spi_transfer *t) +{ + unsigned len = tspi->cur_len; + u8 *rx_buf = (u8 *)t->rx_buf + tspi->cur_pos; + int i, j; + unsigned long val; + + for (i = 0; i < len; i += tspi->cur_bytes_per_word) { + val = tspi->rx_bb[i / tspi->cur_bytes_per_word]; + for (j = 0; j < tspi->cur_bytes_per_word; j++) + rx_buf[i + j] = (val >> (j * 8)) & 0xff; + } + + return len; +} + +static void spi_tegra_start_transfer(struct spi_device *spi, + struct spi_transfer *t) +{ + struct spi_tegra_data *tspi = spi_master_get_devdata(spi->master); + u32 speed; + u8 bits_per_word; + unsigned long val; + + speed = t->speed_hz ? t->speed_hz : spi->max_speed_hz; + bits_per_word = t->bits_per_word ? t->bits_per_word : + spi->bits_per_word; + + tspi->cur_bytes_per_word = (bits_per_word - 1) / 8 + 1; + + if (speed != tspi->cur_speed) + clk_set_rate(tspi->clk, speed); + + if (tspi->cur_speed == 0) + clk_enable(tspi->clk); + + tspi->cur_speed = speed; + + val = spi_tegra_readl(tspi, SLINK_COMMAND2); + val &= ~SLINK_SS_EN_CS(~0) | SLINK_RXEN | SLINK_TXEN; + if (t->rx_buf) + val |= SLINK_RXEN; + if (t->tx_buf) + val |= SLINK_TXEN; + val |= SLINK_SS_EN_CS(spi->chip_select); + val |= SLINK_SPIE; + spi_tegra_writel(tspi, val, SLINK_COMMAND2); + + val = spi_tegra_readl(tspi, SLINK_COMMAND); + val &= ~SLINK_BIT_LENGTH(~0); + val |= SLINK_BIT_LENGTH(bits_per_word - 1); + + /* FIXME: should probably control CS manually so that we can be sure + * it does not go low between transfer and to support delay_usecs + * correctly. + */ + val &= ~SLINK_IDLE_SCLK_MASK & ~SLINK_CK_SDA & ~SLINK_CS_SW; + + if (spi->mode & SPI_CPHA) + val |= SLINK_CK_SDA; + + if (spi->mode & SPI_CPOL) + val |= SLINK_IDLE_SCLK_DRIVE_HIGH; + else + val |= SLINK_IDLE_SCLK_DRIVE_LOW; + + val |= SLINK_M_S; + + spi_tegra_writel(tspi, val, SLINK_COMMAND); + + spi_tegra_writel(tspi, SLINK_RX_FLUSH | SLINK_TX_FLUSH, SLINK_STATUS); + + tspi->cur = t; + tspi->cur_pos = 0; + tspi->cur_len = spi_tegra_fill_tx_fifo(tspi, t); + + spi_tegra_go(tspi); +} + +static void spi_tegra_start_message(struct spi_device *spi, + struct spi_message *m) +{ + struct spi_transfer *t; + + m->actual_length = 0; + m->status = 0; + + t = list_first_entry(&m->transfers, struct spi_transfer, transfer_list); + spi_tegra_start_transfer(spi, t); +} + +static void tegra_spi_rx_dma_complete(struct tegra_dma_req *req) +{ + struct spi_tegra_data *tspi = req->dev; + unsigned long flags; + struct spi_message *m; + struct spi_device *spi; + int timeout = 0; + unsigned long val; + + /* the SPI controller may come back with both the BSY and RDY bits + * set. In this case we need to wait for the BSY bit to clear so + * that we are sure the DMA is finished. 1000 reads was empirically + * determined to be long enough. + */ + while (timeout++ < 1000) { + if (!(spi_tegra_readl(tspi, SLINK_STATUS) & SLINK_BSY)) + break; + } + + spin_lock_irqsave(&tspi->lock, flags); + + val = spi_tegra_readl(tspi, SLINK_STATUS); + val |= SLINK_RDY; + spi_tegra_writel(tspi, val, SLINK_STATUS); + + m = list_first_entry(&tspi->queue, struct spi_message, queue); + + if (timeout >= 1000) + m->status = -EIO; + + spi = m->state; + + tspi->cur_pos += spi_tegra_drain_rx_fifo(tspi, tspi->cur); + m->actual_length += tspi->cur_pos; + + if (tspi->cur_pos < tspi->cur->len) { + tspi->cur_len = spi_tegra_fill_tx_fifo(tspi, tspi->cur); + spi_tegra_go(tspi); + } else if (!list_is_last(&tspi->cur->transfer_list, + &m->transfers)) { + tspi->cur = list_first_entry(&tspi->cur->transfer_list, + struct spi_transfer, + transfer_list); + spi_tegra_start_transfer(spi, tspi->cur); + } else { + list_del(&m->queue); + + m->complete(m->context); + + if (!list_empty(&tspi->queue)) { + m = list_first_entry(&tspi->queue, struct spi_message, + queue); + spi = m->state; + spi_tegra_start_message(spi, m); + } else { + clk_disable(tspi->clk); + tspi->cur_speed = 0; + } + } + + spin_unlock_irqrestore(&tspi->lock, flags); +} + +static int spi_tegra_setup(struct spi_device *spi) +{ + struct spi_tegra_data *tspi = spi_master_get_devdata(spi->master); + unsigned long cs_bit; + unsigned long val; + unsigned long flags; + + dev_dbg(&spi->dev, "setup %d bpw, %scpol, %scpha, %dHz\n", + spi->bits_per_word, + spi->mode & SPI_CPOL ? "" : "~", + spi->mode & SPI_CPHA ? "" : "~", + spi->max_speed_hz); + + + switch (spi->chip_select) { + case 0: + cs_bit = SLINK_CS_POLARITY; + break; + + case 1: + cs_bit = SLINK_CS_POLARITY1; + break; + + case 2: + cs_bit = SLINK_CS_POLARITY2; + break; + + case 4: + cs_bit = SLINK_CS_POLARITY3; + break; + + default: + return -EINVAL; + } + + spin_lock_irqsave(&tspi->lock, flags); + + val = spi_tegra_readl(tspi, SLINK_COMMAND); + if (spi->mode & SPI_CS_HIGH) + val |= cs_bit; + else + val &= ~cs_bit; + spi_tegra_writel(tspi, val, SLINK_COMMAND); + + spin_unlock_irqrestore(&tspi->lock, flags); + + return 0; +} + +static int spi_tegra_transfer(struct spi_device *spi, struct spi_message *m) +{ + struct spi_tegra_data *tspi = spi_master_get_devdata(spi->master); + struct spi_transfer *t; + unsigned long flags; + int was_empty; + + if (list_empty(&m->transfers) || !m->complete) + return -EINVAL; + + list_for_each_entry(t, &m->transfers, transfer_list) { + if (t->bits_per_word < 0 || t->bits_per_word > 32) + return -EINVAL; + + if (t->len == 0) + return -EINVAL; + + if (!t->rx_buf && !t->tx_buf) + return -EINVAL; + } + + m->state = spi; + + spin_lock_irqsave(&tspi->lock, flags); + was_empty = list_empty(&tspi->queue); + list_add_tail(&m->queue, &tspi->queue); + + if (was_empty) + spi_tegra_start_message(spi, m); + + spin_unlock_irqrestore(&tspi->lock, flags); + + return 0; +} + +static int __init spi_tegra_probe(struct platform_device *pdev) +{ + struct spi_master *master; + struct spi_tegra_data *tspi; + struct resource *r; + int ret; + + master = spi_alloc_master(&pdev->dev, sizeof *tspi); + if (master == NULL) { + dev_err(&pdev->dev, "master allocation failed\n"); + return -ENOMEM; + } + + /* the spi->mode bits understood by this driver: */ + master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; + + master->bus_num = pdev->id; + + master->setup = spi_tegra_setup; + master->transfer = spi_tegra_transfer; + master->num_chipselect = 4; + + dev_set_drvdata(&pdev->dev, master); + tspi = spi_master_get_devdata(master); + tspi->master = master; + tspi->pdev = pdev; + spin_lock_init(&tspi->lock); + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (r == NULL) { + ret = -ENODEV; + goto err0; + } + + if (!request_mem_region(r->start, (r->end - r->start) + 1, + dev_name(&pdev->dev))) { + ret = -EBUSY; + goto err0; + } + + tspi->phys = r->start; + tspi->base = ioremap(r->start, r->end - r->start + 1); + if (!tspi->base) { + dev_err(&pdev->dev, "can't ioremap iomem\n"); + ret = -ENOMEM; + goto err1; + } + + tspi->clk = clk_get(&pdev->dev, NULL); + if (IS_ERR(tspi->clk)) { + dev_err(&pdev->dev, "can not get clock\n"); + ret = PTR_ERR(tspi->clk); + goto err2; + } + + INIT_LIST_HEAD(&tspi->queue); + + tspi->rx_dma = tegra_dma_allocate_channel(TEGRA_DMA_MODE_ONESHOT); + if (!tspi->rx_dma) { + dev_err(&pdev->dev, "can not allocate rx dma channel\n"); + ret = -ENODEV; + goto err3; + } + + tspi->rx_bb = dma_alloc_coherent(&pdev->dev, sizeof(u32) * BB_LEN, + &tspi->rx_bb_phys, GFP_KERNEL); + if (!tspi->rx_bb) { + dev_err(&pdev->dev, "can not allocate rx bounce buffer\n"); + ret = -ENOMEM; + goto err4; + } + + tspi->rx_dma_req.complete = tegra_spi_rx_dma_complete; + tspi->rx_dma_req.to_memory = 1; + tspi->rx_dma_req.dest_addr = tspi->rx_bb_phys; + tspi->rx_dma_req.dest_bus_width = 32; + tspi->rx_dma_req.source_addr = tspi->phys + SLINK_RX_FIFO; + tspi->rx_dma_req.source_bus_width = 32; + tspi->rx_dma_req.source_wrap = 4; + tspi->rx_dma_req.req_sel = spi_tegra_req_sels[pdev->id]; + tspi->rx_dma_req.dev = tspi; + + ret = spi_register_master(master); + + if (ret < 0) + goto err5; + + return ret; + +err5: + dma_free_coherent(&pdev->dev, sizeof(u32) * BB_LEN, + tspi->rx_bb, tspi->rx_bb_phys); +err4: + tegra_dma_free_channel(tspi->rx_dma); +err3: + clk_put(tspi->clk); +err2: + iounmap(tspi->base); +err1: + release_mem_region(r->start, (r->end - r->start) + 1); +err0: + spi_master_put(master); + return ret; +} + +static int __devexit spi_tegra_remove(struct platform_device *pdev) +{ + struct spi_master *master; + struct spi_tegra_data *tspi; + struct resource *r; + + master = dev_get_drvdata(&pdev->dev); + tspi = spi_master_get_devdata(master); + + spi_unregister_master(master); + tegra_dma_free_channel(tspi->rx_dma); + + dma_free_coherent(&pdev->dev, sizeof(u32) * BB_LEN, + tspi->rx_bb, tspi->rx_bb_phys); + + clk_put(tspi->clk); + iounmap(tspi->base); + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + release_mem_region(r->start, (r->end - r->start) + 1); + + return 0; +} + +MODULE_ALIAS("platform:spi_tegra"); + +static struct platform_driver spi_tegra_driver = { + .driver = { + .name = "spi_tegra", + .owner = THIS_MODULE, + }, + .remove = __devexit_p(spi_tegra_remove), +}; + +static int __init spi_tegra_init(void) +{ + return platform_driver_probe(&spi_tegra_driver, spi_tegra_probe); +} +module_init(spi_tegra_init); + +static void __exit spi_tegra_exit(void) +{ + platform_driver_unregister(&spi_tegra_driver); +} +module_exit(spi_tegra_exit); + +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-ti-ssp.c b/drivers/spi/spi-ti-ssp.c new file mode 100644 index 0000000..ee22795 --- /dev/null +++ b/drivers/spi/spi-ti-ssp.c @@ -0,0 +1,402 @@ +/* + * Sequencer Serial Port (SSP) based SPI master driver + * + * Copyright (C) 2010 Texas Instruments Inc + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include +#include +#include +#include +#include +#include +#include + +#define MODE_BITS (SPI_CPHA | SPI_CPOL | SPI_CS_HIGH) + +struct ti_ssp_spi { + struct spi_master *master; + struct device *dev; + spinlock_t lock; + struct list_head msg_queue; + struct completion complete; + bool shutdown; + struct workqueue_struct *workqueue; + struct work_struct work; + u8 mode, bpw; + int cs_active; + u32 pc_en, pc_dis, pc_wr, pc_rd; + void (*select)(int cs); +}; + +static u32 ti_ssp_spi_rx(struct ti_ssp_spi *hw) +{ + u32 ret; + + ti_ssp_run(hw->dev, hw->pc_rd, 0, &ret); + return ret; +} + +static void ti_ssp_spi_tx(struct ti_ssp_spi *hw, u32 data) +{ + ti_ssp_run(hw->dev, hw->pc_wr, data << (32 - hw->bpw), NULL); +} + +static int ti_ssp_spi_txrx(struct ti_ssp_spi *hw, struct spi_message *msg, + struct spi_transfer *t) +{ + int count; + + if (hw->bpw <= 8) { + u8 *rx = t->rx_buf; + const u8 *tx = t->tx_buf; + + for (count = 0; count < t->len; count += 1) { + if (t->tx_buf) + ti_ssp_spi_tx(hw, *tx++); + if (t->rx_buf) + *rx++ = ti_ssp_spi_rx(hw); + } + } else if (hw->bpw <= 16) { + u16 *rx = t->rx_buf; + const u16 *tx = t->tx_buf; + + for (count = 0; count < t->len; count += 2) { + if (t->tx_buf) + ti_ssp_spi_tx(hw, *tx++); + if (t->rx_buf) + *rx++ = ti_ssp_spi_rx(hw); + } + } else { + u32 *rx = t->rx_buf; + const u32 *tx = t->tx_buf; + + for (count = 0; count < t->len; count += 4) { + if (t->tx_buf) + ti_ssp_spi_tx(hw, *tx++); + if (t->rx_buf) + *rx++ = ti_ssp_spi_rx(hw); + } + } + + msg->actual_length += count; /* bytes transferred */ + + dev_dbg(&msg->spi->dev, "xfer %s%s, %d bytes, %d bpw, count %d%s\n", + t->tx_buf ? "tx" : "", t->rx_buf ? "rx" : "", t->len, + hw->bpw, count, (count < t->len) ? " (under)" : ""); + + return (count < t->len) ? -EIO : 0; /* left over data */ +} + +static void ti_ssp_spi_chip_select(struct ti_ssp_spi *hw, int cs_active) +{ + cs_active = !!cs_active; + if (cs_active == hw->cs_active) + return; + ti_ssp_run(hw->dev, cs_active ? hw->pc_en : hw->pc_dis, 0, NULL); + hw->cs_active = cs_active; +} + +#define __SHIFT_OUT(bits) (SSP_OPCODE_SHIFT | SSP_OUT_MODE | \ + cs_en | clk | SSP_COUNT((bits) * 2 - 1)) +#define __SHIFT_IN(bits) (SSP_OPCODE_SHIFT | SSP_IN_MODE | \ + cs_en | clk | SSP_COUNT((bits) * 2 - 1)) + +static int ti_ssp_spi_setup_transfer(struct ti_ssp_spi *hw, u8 bpw, u8 mode) +{ + int error, idx = 0; + u32 seqram[16]; + u32 cs_en, cs_dis, clk; + u32 topbits, botbits; + + mode &= MODE_BITS; + if (mode == hw->mode && bpw == hw->bpw) + return 0; + + cs_en = (mode & SPI_CS_HIGH) ? SSP_CS_HIGH : SSP_CS_LOW; + cs_dis = (mode & SPI_CS_HIGH) ? SSP_CS_LOW : SSP_CS_HIGH; + clk = (mode & SPI_CPOL) ? SSP_CLK_HIGH : SSP_CLK_LOW; + + /* Construct instructions */ + + /* Disable Chip Select */ + hw->pc_dis = idx; + seqram[idx++] = SSP_OPCODE_DIRECT | SSP_OUT_MODE | cs_dis | clk; + seqram[idx++] = SSP_OPCODE_STOP | SSP_OUT_MODE | cs_dis | clk; + + /* Enable Chip Select */ + hw->pc_en = idx; + seqram[idx++] = SSP_OPCODE_DIRECT | SSP_OUT_MODE | cs_en | clk; + seqram[idx++] = SSP_OPCODE_STOP | SSP_OUT_MODE | cs_en | clk; + + /* Reads and writes need to be split for bpw > 16 */ + topbits = (bpw > 16) ? 16 : bpw; + botbits = bpw - topbits; + + /* Write */ + hw->pc_wr = idx; + seqram[idx++] = __SHIFT_OUT(topbits) | SSP_ADDR_REG; + if (botbits) + seqram[idx++] = __SHIFT_OUT(botbits) | SSP_DATA_REG; + seqram[idx++] = SSP_OPCODE_STOP | SSP_OUT_MODE | cs_en | clk; + + /* Read */ + hw->pc_rd = idx; + if (botbits) + seqram[idx++] = __SHIFT_IN(botbits) | SSP_ADDR_REG; + seqram[idx++] = __SHIFT_IN(topbits) | SSP_DATA_REG; + seqram[idx++] = SSP_OPCODE_STOP | SSP_OUT_MODE | cs_en | clk; + + error = ti_ssp_load(hw->dev, 0, seqram, idx); + if (error < 0) + return error; + + error = ti_ssp_set_mode(hw->dev, ((mode & SPI_CPHA) ? + 0 : SSP_EARLY_DIN)); + if (error < 0) + return error; + + hw->bpw = bpw; + hw->mode = mode; + + return error; +} + +static void ti_ssp_spi_work(struct work_struct *work) +{ + struct ti_ssp_spi *hw = container_of(work, struct ti_ssp_spi, work); + + spin_lock(&hw->lock); + + while (!list_empty(&hw->msg_queue)) { + struct spi_message *m; + struct spi_device *spi; + struct spi_transfer *t = NULL; + int status = 0; + + m = container_of(hw->msg_queue.next, struct spi_message, + queue); + + list_del_init(&m->queue); + + spin_unlock(&hw->lock); + + spi = m->spi; + + if (hw->select) + hw->select(spi->chip_select); + + list_for_each_entry(t, &m->transfers, transfer_list) { + int bpw = spi->bits_per_word; + int xfer_status; + + if (t->bits_per_word) + bpw = t->bits_per_word; + + if (ti_ssp_spi_setup_transfer(hw, bpw, spi->mode) < 0) + break; + + ti_ssp_spi_chip_select(hw, 1); + + xfer_status = ti_ssp_spi_txrx(hw, m, t); + if (xfer_status < 0) + status = xfer_status; + + if (t->delay_usecs) + udelay(t->delay_usecs); + + if (t->cs_change) + ti_ssp_spi_chip_select(hw, 0); + } + + ti_ssp_spi_chip_select(hw, 0); + m->status = status; + m->complete(m->context); + + spin_lock(&hw->lock); + } + + if (hw->shutdown) + complete(&hw->complete); + + spin_unlock(&hw->lock); +} + +static int ti_ssp_spi_setup(struct spi_device *spi) +{ + if (spi->bits_per_word > 32) + return -EINVAL; + + return 0; +} + +static int ti_ssp_spi_transfer(struct spi_device *spi, struct spi_message *m) +{ + struct ti_ssp_spi *hw; + struct spi_transfer *t; + int error = 0; + + m->actual_length = 0; + m->status = -EINPROGRESS; + + hw = spi_master_get_devdata(spi->master); + + if (list_empty(&m->transfers) || !m->complete) + return -EINVAL; + + list_for_each_entry(t, &m->transfers, transfer_list) { + if (t->len && !(t->rx_buf || t->tx_buf)) { + dev_err(&spi->dev, "invalid xfer, no buffer\n"); + return -EINVAL; + } + + if (t->len && t->rx_buf && t->tx_buf) { + dev_err(&spi->dev, "invalid xfer, full duplex\n"); + return -EINVAL; + } + + if (t->bits_per_word > 32) { + dev_err(&spi->dev, "invalid xfer width %d\n", + t->bits_per_word); + return -EINVAL; + } + } + + spin_lock(&hw->lock); + if (hw->shutdown) { + error = -ESHUTDOWN; + goto error_unlock; + } + list_add_tail(&m->queue, &hw->msg_queue); + queue_work(hw->workqueue, &hw->work); +error_unlock: + spin_unlock(&hw->lock); + return error; +} + +static int __devinit ti_ssp_spi_probe(struct platform_device *pdev) +{ + const struct ti_ssp_spi_data *pdata; + struct ti_ssp_spi *hw; + struct spi_master *master; + struct device *dev = &pdev->dev; + int error = 0; + + pdata = dev->platform_data; + if (!pdata) { + dev_err(dev, "platform data not found\n"); + return -EINVAL; + } + + master = spi_alloc_master(dev, sizeof(struct ti_ssp_spi)); + if (!master) { + dev_err(dev, "cannot allocate SPI master\n"); + return -ENOMEM; + } + + hw = spi_master_get_devdata(master); + platform_set_drvdata(pdev, hw); + + hw->master = master; + hw->dev = dev; + hw->select = pdata->select; + + spin_lock_init(&hw->lock); + init_completion(&hw->complete); + INIT_LIST_HEAD(&hw->msg_queue); + INIT_WORK(&hw->work, ti_ssp_spi_work); + + hw->workqueue = create_singlethread_workqueue(dev_name(dev)); + if (!hw->workqueue) { + error = -ENOMEM; + dev_err(dev, "work queue creation failed\n"); + goto error_wq; + } + + error = ti_ssp_set_iosel(hw->dev, pdata->iosel); + if (error < 0) { + dev_err(dev, "io setup failed\n"); + goto error_iosel; + } + + master->bus_num = pdev->id; + master->num_chipselect = pdata->num_cs; + master->mode_bits = MODE_BITS; + master->flags = SPI_MASTER_HALF_DUPLEX; + master->setup = ti_ssp_spi_setup; + master->transfer = ti_ssp_spi_transfer; + + error = spi_register_master(master); + if (error) { + dev_err(dev, "master registration failed\n"); + goto error_reg; + } + + return 0; + +error_reg: +error_iosel: + destroy_workqueue(hw->workqueue); +error_wq: + spi_master_put(master); + return error; +} + +static int __devexit ti_ssp_spi_remove(struct platform_device *pdev) +{ + struct ti_ssp_spi *hw = platform_get_drvdata(pdev); + int error; + + hw->shutdown = 1; + while (!list_empty(&hw->msg_queue)) { + error = wait_for_completion_interruptible(&hw->complete); + if (error < 0) { + hw->shutdown = 0; + return error; + } + } + destroy_workqueue(hw->workqueue); + spi_unregister_master(hw->master); + + return 0; +} + +static struct platform_driver ti_ssp_spi_driver = { + .probe = ti_ssp_spi_probe, + .remove = __devexit_p(ti_ssp_spi_remove), + .driver = { + .name = "ti-ssp-spi", + .owner = THIS_MODULE, + }, +}; + +static int __init ti_ssp_spi_init(void) +{ + return platform_driver_register(&ti_ssp_spi_driver); +} +module_init(ti_ssp_spi_init); + +static void __exit ti_ssp_spi_exit(void) +{ + platform_driver_unregister(&ti_ssp_spi_driver); +} +module_exit(ti_ssp_spi_exit); + +MODULE_DESCRIPTION("SSP SPI Master"); +MODULE_AUTHOR("Cyril Chemparathy"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:ti-ssp-spi"); diff --git a/drivers/spi/spi-tle62x0.c b/drivers/spi/spi-tle62x0.c new file mode 100644 index 0000000..940e73d --- /dev/null +++ b/drivers/spi/spi-tle62x0.c @@ -0,0 +1,334 @@ +/* + * Support Infineon TLE62x0 driver chips + * + * Copyright (c) 2007 Simtec Electronics + * Ben Dooks, + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include + +#include +#include + + +#define CMD_READ 0x00 +#define CMD_SET 0xff + +#define DIAG_NORMAL 0x03 +#define DIAG_OVERLOAD 0x02 +#define DIAG_OPEN 0x01 +#define DIAG_SHORTGND 0x00 + +struct tle62x0_state { + struct spi_device *us; + struct mutex lock; + unsigned int nr_gpio; + unsigned int gpio_state; + + unsigned char tx_buff[4]; + unsigned char rx_buff[4]; +}; + +static int to_gpio_num(struct device_attribute *attr); + +static inline int tle62x0_write(struct tle62x0_state *st) +{ + unsigned char *buff = st->tx_buff; + unsigned int gpio_state = st->gpio_state; + + buff[0] = CMD_SET; + + if (st->nr_gpio == 16) { + buff[1] = gpio_state >> 8; + buff[2] = gpio_state; + } else { + buff[1] = gpio_state; + } + + dev_dbg(&st->us->dev, "buff %02x,%02x,%02x\n", + buff[0], buff[1], buff[2]); + + return spi_write(st->us, buff, (st->nr_gpio == 16) ? 3 : 2); +} + +static inline int tle62x0_read(struct tle62x0_state *st) +{ + unsigned char *txbuff = st->tx_buff; + struct spi_transfer xfer = { + .tx_buf = txbuff, + .rx_buf = st->rx_buff, + .len = (st->nr_gpio * 2) / 8, + }; + struct spi_message msg; + + txbuff[0] = CMD_READ; + txbuff[1] = 0x00; + txbuff[2] = 0x00; + txbuff[3] = 0x00; + + spi_message_init(&msg); + spi_message_add_tail(&xfer, &msg); + + return spi_sync(st->us, &msg); +} + +static unsigned char *decode_fault(unsigned int fault_code) +{ + fault_code &= 3; + + switch (fault_code) { + case DIAG_NORMAL: + return "N"; + case DIAG_OVERLOAD: + return "V"; + case DIAG_OPEN: + return "O"; + case DIAG_SHORTGND: + return "G"; + } + + return "?"; +} + +static ssize_t tle62x0_status_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct tle62x0_state *st = dev_get_drvdata(dev); + char *bp = buf; + unsigned char *buff = st->rx_buff; + unsigned long fault = 0; + int ptr; + int ret; + + mutex_lock(&st->lock); + ret = tle62x0_read(st); + dev_dbg(dev, "tle62x0_read() returned %d\n", ret); + if (ret < 0) { + mutex_unlock(&st->lock); + return ret; + } + + for (ptr = 0; ptr < (st->nr_gpio * 2)/8; ptr += 1) { + fault <<= 8; + fault |= ((unsigned long)buff[ptr]); + + dev_dbg(dev, "byte %d is %02x\n", ptr, buff[ptr]); + } + + for (ptr = 0; ptr < st->nr_gpio; ptr++) { + bp += sprintf(bp, "%s ", decode_fault(fault >> (ptr * 2))); + } + + *bp++ = '\n'; + + mutex_unlock(&st->lock); + return bp - buf; +} + +static DEVICE_ATTR(status_show, S_IRUGO, tle62x0_status_show, NULL); + +static ssize_t tle62x0_gpio_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct tle62x0_state *st = dev_get_drvdata(dev); + int gpio_num = to_gpio_num(attr); + int value; + + mutex_lock(&st->lock); + value = (st->gpio_state >> gpio_num) & 1; + mutex_unlock(&st->lock); + + return snprintf(buf, PAGE_SIZE, "%d", value); +} + +static ssize_t tle62x0_gpio_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct tle62x0_state *st = dev_get_drvdata(dev); + int gpio_num = to_gpio_num(attr); + unsigned long val; + char *endp; + + val = simple_strtoul(buf, &endp, 0); + if (buf == endp) + return -EINVAL; + + dev_dbg(dev, "setting gpio %d to %ld\n", gpio_num, val); + + mutex_lock(&st->lock); + + if (val) + st->gpio_state |= 1 << gpio_num; + else + st->gpio_state &= ~(1 << gpio_num); + + tle62x0_write(st); + mutex_unlock(&st->lock); + + return len; +} + +static DEVICE_ATTR(gpio1, S_IWUSR|S_IRUGO, + tle62x0_gpio_show, tle62x0_gpio_store); +static DEVICE_ATTR(gpio2, S_IWUSR|S_IRUGO, + tle62x0_gpio_show, tle62x0_gpio_store); +static DEVICE_ATTR(gpio3, S_IWUSR|S_IRUGO, + tle62x0_gpio_show, tle62x0_gpio_store); +static DEVICE_ATTR(gpio4, S_IWUSR|S_IRUGO, + tle62x0_gpio_show, tle62x0_gpio_store); +static DEVICE_ATTR(gpio5, S_IWUSR|S_IRUGO, + tle62x0_gpio_show, tle62x0_gpio_store); +static DEVICE_ATTR(gpio6, S_IWUSR|S_IRUGO, + tle62x0_gpio_show, tle62x0_gpio_store); +static DEVICE_ATTR(gpio7, S_IWUSR|S_IRUGO, + tle62x0_gpio_show, tle62x0_gpio_store); +static DEVICE_ATTR(gpio8, S_IWUSR|S_IRUGO, + tle62x0_gpio_show, tle62x0_gpio_store); +static DEVICE_ATTR(gpio9, S_IWUSR|S_IRUGO, + tle62x0_gpio_show, tle62x0_gpio_store); +static DEVICE_ATTR(gpio10, S_IWUSR|S_IRUGO, + tle62x0_gpio_show, tle62x0_gpio_store); +static DEVICE_ATTR(gpio11, S_IWUSR|S_IRUGO, + tle62x0_gpio_show, tle62x0_gpio_store); +static DEVICE_ATTR(gpio12, S_IWUSR|S_IRUGO, + tle62x0_gpio_show, tle62x0_gpio_store); +static DEVICE_ATTR(gpio13, S_IWUSR|S_IRUGO, + tle62x0_gpio_show, tle62x0_gpio_store); +static DEVICE_ATTR(gpio14, S_IWUSR|S_IRUGO, + tle62x0_gpio_show, tle62x0_gpio_store); +static DEVICE_ATTR(gpio15, S_IWUSR|S_IRUGO, + tle62x0_gpio_show, tle62x0_gpio_store); +static DEVICE_ATTR(gpio16, S_IWUSR|S_IRUGO, + tle62x0_gpio_show, tle62x0_gpio_store); + +static struct device_attribute *gpio_attrs[] = { + [0] = &dev_attr_gpio1, + [1] = &dev_attr_gpio2, + [2] = &dev_attr_gpio3, + [3] = &dev_attr_gpio4, + [4] = &dev_attr_gpio5, + [5] = &dev_attr_gpio6, + [6] = &dev_attr_gpio7, + [7] = &dev_attr_gpio8, + [8] = &dev_attr_gpio9, + [9] = &dev_attr_gpio10, + [10] = &dev_attr_gpio11, + [11] = &dev_attr_gpio12, + [12] = &dev_attr_gpio13, + [13] = &dev_attr_gpio14, + [14] = &dev_attr_gpio15, + [15] = &dev_attr_gpio16 +}; + +static int to_gpio_num(struct device_attribute *attr) +{ + int ptr; + + for (ptr = 0; ptr < ARRAY_SIZE(gpio_attrs); ptr++) { + if (gpio_attrs[ptr] == attr) + return ptr; + } + + return -1; +} + +static int __devinit tle62x0_probe(struct spi_device *spi) +{ + struct tle62x0_state *st; + struct tle62x0_pdata *pdata; + int ptr; + int ret; + + pdata = spi->dev.platform_data; + if (pdata == NULL) { + dev_err(&spi->dev, "no device data specified\n"); + return -EINVAL; + } + + st = kzalloc(sizeof(struct tle62x0_state), GFP_KERNEL); + if (st == NULL) { + dev_err(&spi->dev, "no memory for device state\n"); + return -ENOMEM; + } + + st->us = spi; + st->nr_gpio = pdata->gpio_count; + st->gpio_state = pdata->init_state; + + mutex_init(&st->lock); + + ret = device_create_file(&spi->dev, &dev_attr_status_show); + if (ret) { + dev_err(&spi->dev, "cannot create status attribute\n"); + goto err_status; + } + + for (ptr = 0; ptr < pdata->gpio_count; ptr++) { + ret = device_create_file(&spi->dev, gpio_attrs[ptr]); + if (ret) { + dev_err(&spi->dev, "cannot create gpio attribute\n"); + goto err_gpios; + } + } + + /* tle62x0_write(st); */ + spi_set_drvdata(spi, st); + return 0; + + err_gpios: + while (--ptr >= 0) + device_remove_file(&spi->dev, gpio_attrs[ptr]); + + device_remove_file(&spi->dev, &dev_attr_status_show); + + err_status: + kfree(st); + return ret; +} + +static int __devexit tle62x0_remove(struct spi_device *spi) +{ + struct tle62x0_state *st = spi_get_drvdata(spi); + int ptr; + + for (ptr = 0; ptr < st->nr_gpio; ptr++) + device_remove_file(&spi->dev, gpio_attrs[ptr]); + + device_remove_file(&spi->dev, &dev_attr_status_show); + kfree(st); + return 0; +} + +static struct spi_driver tle62x0_driver = { + .driver = { + .name = "tle62x0", + .owner = THIS_MODULE, + }, + .probe = tle62x0_probe, + .remove = __devexit_p(tle62x0_remove), +}; + +static __init int tle62x0_init(void) +{ + return spi_register_driver(&tle62x0_driver); +} + +static __exit void tle62x0_exit(void) +{ + spi_unregister_driver(&tle62x0_driver); +} + +module_init(tle62x0_init); +module_exit(tle62x0_exit); + +MODULE_AUTHOR("Ben Dooks "); +MODULE_DESCRIPTION("TLE62x0 SPI driver"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("spi:tle62x0"); diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c new file mode 100644 index 0000000..79e48d4 --- /dev/null +++ b/drivers/spi/spi-topcliff-pch.c @@ -0,0 +1,1303 @@ +/* + * SPI bus driver for the Topcliff PCH used by Intel SoCs + * + * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Register offsets */ +#define PCH_SPCR 0x00 /* SPI control register */ +#define PCH_SPBRR 0x04 /* SPI baud rate register */ +#define PCH_SPSR 0x08 /* SPI status register */ +#define PCH_SPDWR 0x0C /* SPI write data register */ +#define PCH_SPDRR 0x10 /* SPI read data register */ +#define PCH_SSNXCR 0x18 /* SSN Expand Control Register */ +#define PCH_SRST 0x1C /* SPI reset register */ + +#define PCH_SPSR_TFD 0x000007C0 +#define PCH_SPSR_RFD 0x0000F800 + +#define PCH_READABLE(x) (((x) & PCH_SPSR_RFD)>>11) +#define PCH_WRITABLE(x) (((x) & PCH_SPSR_TFD)>>6) + +#define PCH_RX_THOLD 7 +#define PCH_RX_THOLD_MAX 15 + +#define PCH_MAX_BAUDRATE 5000000 +#define PCH_MAX_FIFO_DEPTH 16 + +#define STATUS_RUNNING 1 +#define STATUS_EXITING 2 +#define PCH_SLEEP_TIME 10 + +#define PCH_ADDRESS_SIZE 0x20 + +#define SSN_LOW 0x02U +#define SSN_NO_CONTROL 0x00U +#define PCH_MAX_CS 0xFF +#define PCI_DEVICE_ID_GE_SPI 0x8816 + +#define SPCR_SPE_BIT (1 << 0) +#define SPCR_MSTR_BIT (1 << 1) +#define SPCR_LSBF_BIT (1 << 4) +#define SPCR_CPHA_BIT (1 << 5) +#define SPCR_CPOL_BIT (1 << 6) +#define SPCR_TFIE_BIT (1 << 8) +#define SPCR_RFIE_BIT (1 << 9) +#define SPCR_FIE_BIT (1 << 10) +#define SPCR_ORIE_BIT (1 << 11) +#define SPCR_MDFIE_BIT (1 << 12) +#define SPCR_FICLR_BIT (1 << 24) +#define SPSR_TFI_BIT (1 << 0) +#define SPSR_RFI_BIT (1 << 1) +#define SPSR_FI_BIT (1 << 2) +#define SPBRR_SIZE_BIT (1 << 10) + +#define PCH_ALL (SPCR_TFIE_BIT|SPCR_RFIE_BIT|SPCR_FIE_BIT|SPCR_ORIE_BIT|SPCR_MDFIE_BIT) + +#define SPCR_RFIC_FIELD 20 +#define SPCR_TFIC_FIELD 16 + +#define SPSR_INT_BITS 0x1F +#define MASK_SPBRR_SPBR_BITS (~((1 << 10) - 1)) +#define MASK_RFIC_SPCR_BITS (~(0xf << 20)) +#define MASK_TFIC_SPCR_BITS (~(0xf000f << 12)) + +#define PCH_CLOCK_HZ 50000000 +#define PCH_MAX_SPBR 1023 + + +/** + * struct pch_spi_data - Holds the SPI channel specific details + * @io_remap_addr: The remapped PCI base address + * @master: Pointer to the SPI master structure + * @work: Reference to work queue handler + * @wk: Workqueue for carrying out execution of the + * requests + * @wait: Wait queue for waking up upon receiving an + * interrupt. + * @transfer_complete: Status of SPI Transfer + * @bcurrent_msg_processing: Status flag for message processing + * @lock: Lock for protecting this structure + * @queue: SPI Message queue + * @status: Status of the SPI driver + * @bpw_len: Length of data to be transferred in bits per + * word + * @transfer_active: Flag showing active transfer + * @tx_index: Transmit data count; for bookkeeping during + * transfer + * @rx_index: Receive data count; for bookkeeping during + * transfer + * @tx_buff: Buffer for data to be transmitted + * @rx_index: Buffer for Received data + * @n_curnt_chip: The chip number that this SPI driver currently + * operates on + * @current_chip: Reference to the current chip that this SPI + * driver currently operates on + * @current_msg: The current message that this SPI driver is + * handling + * @cur_trans: The current transfer that this SPI driver is + * handling + * @board_dat: Reference to the SPI device data structure + */ +struct pch_spi_data { + void __iomem *io_remap_addr; + struct spi_master *master; + struct work_struct work; + struct workqueue_struct *wk; + wait_queue_head_t wait; + u8 transfer_complete; + u8 bcurrent_msg_processing; + spinlock_t lock; + struct list_head queue; + u8 status; + u32 bpw_len; + u8 transfer_active; + u32 tx_index; + u32 rx_index; + u16 *pkt_tx_buff; + u16 *pkt_rx_buff; + u8 n_curnt_chip; + struct spi_device *current_chip; + struct spi_message *current_msg; + struct spi_transfer *cur_trans; + struct pch_spi_board_data *board_dat; +}; + +/** + * struct pch_spi_board_data - Holds the SPI device specific details + * @pdev: Pointer to the PCI device + * @irq_reg_sts: Status of IRQ registration + * @pci_req_sts: Status of pci_request_regions + * @suspend_sts: Status of suspend + * @data: Pointer to SPI channel data structure + */ +struct pch_spi_board_data { + struct pci_dev *pdev; + u8 irq_reg_sts; + u8 pci_req_sts; + u8 suspend_sts; + struct pch_spi_data *data; +}; + +static struct pci_device_id pch_spi_pcidev_id[] = { + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_GE_SPI)}, + {0,} +}; + +/** + * pch_spi_writereg() - Performs register writes + * @master: Pointer to struct spi_master. + * @idx: Register offset. + * @val: Value to be written to register. + */ +static inline void pch_spi_writereg(struct spi_master *master, int idx, u32 val) +{ + struct pch_spi_data *data = spi_master_get_devdata(master); + iowrite32(val, (data->io_remap_addr + idx)); +} + +/** + * pch_spi_readreg() - Performs register reads + * @master: Pointer to struct spi_master. + * @idx: Register offset. + */ +static inline u32 pch_spi_readreg(struct spi_master *master, int idx) +{ + struct pch_spi_data *data = spi_master_get_devdata(master); + return ioread32(data->io_remap_addr + idx); +} + +static inline void pch_spi_setclr_reg(struct spi_master *master, int idx, + u32 set, u32 clr) +{ + u32 tmp = pch_spi_readreg(master, idx); + tmp = (tmp & ~clr) | set; + pch_spi_writereg(master, idx, tmp); +} + +static void pch_spi_set_master_mode(struct spi_master *master) +{ + pch_spi_setclr_reg(master, PCH_SPCR, SPCR_MSTR_BIT, 0); +} + +/** + * pch_spi_clear_fifo() - Clears the Transmit and Receive FIFOs + * @master: Pointer to struct spi_master. + */ +static void pch_spi_clear_fifo(struct spi_master *master) +{ + pch_spi_setclr_reg(master, PCH_SPCR, SPCR_FICLR_BIT, 0); + pch_spi_setclr_reg(master, PCH_SPCR, 0, SPCR_FICLR_BIT); +} + +static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val, + void __iomem *io_remap_addr) +{ + u32 n_read, tx_index, rx_index, bpw_len; + u16 *pkt_rx_buffer, *pkt_tx_buff; + int read_cnt; + u32 reg_spcr_val; + void __iomem *spsr; + void __iomem *spdrr; + void __iomem *spdwr; + + spsr = io_remap_addr + PCH_SPSR; + iowrite32(reg_spsr_val, spsr); + + if (data->transfer_active) { + rx_index = data->rx_index; + tx_index = data->tx_index; + bpw_len = data->bpw_len; + pkt_rx_buffer = data->pkt_rx_buff; + pkt_tx_buff = data->pkt_tx_buff; + + spdrr = io_remap_addr + PCH_SPDRR; + spdwr = io_remap_addr + PCH_SPDWR; + + n_read = PCH_READABLE(reg_spsr_val); + + for (read_cnt = 0; (read_cnt < n_read); read_cnt++) { + pkt_rx_buffer[rx_index++] = ioread32(spdrr); + if (tx_index < bpw_len) + iowrite32(pkt_tx_buff[tx_index++], spdwr); + } + + /* disable RFI if not needed */ + if ((bpw_len - rx_index) <= PCH_MAX_FIFO_DEPTH) { + reg_spcr_val = ioread32(io_remap_addr + PCH_SPCR); + reg_spcr_val &= ~SPCR_RFIE_BIT; /* disable RFI */ + + /* reset rx threshold */ + reg_spcr_val &= MASK_RFIC_SPCR_BITS; + reg_spcr_val |= (PCH_RX_THOLD_MAX << SPCR_RFIC_FIELD); + iowrite32(((reg_spcr_val) &= (~(SPCR_RFIE_BIT))), + (io_remap_addr + PCH_SPCR)); + } + + /* update counts */ + data->tx_index = tx_index; + data->rx_index = rx_index; + + } + + /* if transfer complete interrupt */ + if (reg_spsr_val & SPSR_FI_BIT) { + /* disable FI & RFI interrupts */ + pch_spi_setclr_reg(data->master, PCH_SPCR, 0, + SPCR_FIE_BIT | SPCR_RFIE_BIT); + + /* transfer is completed;inform pch_spi_process_messages */ + data->transfer_complete = true; + wake_up(&data->wait); + } +} + +/** + * pch_spi_handler() - Interrupt handler + * @irq: The interrupt number. + * @dev_id: Pointer to struct pch_spi_board_data. + */ +static irqreturn_t pch_spi_handler(int irq, void *dev_id) +{ + u32 reg_spsr_val; + struct pch_spi_data *data; + void __iomem *spsr; + void __iomem *io_remap_addr; + irqreturn_t ret = IRQ_NONE; + struct pch_spi_board_data *board_dat = dev_id; + + if (board_dat->suspend_sts) { + dev_dbg(&board_dat->pdev->dev, + "%s returning due to suspend\n", __func__); + return IRQ_NONE; + } + + data = board_dat->data; + io_remap_addr = data->io_remap_addr; + spsr = io_remap_addr + PCH_SPSR; + + reg_spsr_val = ioread32(spsr); + + /* Check if the interrupt is for SPI device */ + if (reg_spsr_val & (SPSR_FI_BIT | SPSR_RFI_BIT)) { + pch_spi_handler_sub(data, reg_spsr_val, io_remap_addr); + ret = IRQ_HANDLED; + } + + dev_dbg(&board_dat->pdev->dev, "%s EXIT return value=%d\n", + __func__, ret); + + return ret; +} + +/** + * pch_spi_set_baud_rate() - Sets SPBR field in SPBRR + * @master: Pointer to struct spi_master. + * @speed_hz: Baud rate. + */ +static void pch_spi_set_baud_rate(struct spi_master *master, u32 speed_hz) +{ + u32 n_spbr = PCH_CLOCK_HZ / (speed_hz * 2); + + /* if baud rate is less than we can support limit it */ + if (n_spbr > PCH_MAX_SPBR) + n_spbr = PCH_MAX_SPBR; + + pch_spi_setclr_reg(master, PCH_SPBRR, n_spbr, ~MASK_SPBRR_SPBR_BITS); +} + +/** + * pch_spi_set_bits_per_word() - Sets SIZE field in SPBRR + * @master: Pointer to struct spi_master. + * @bits_per_word: Bits per word for SPI transfer. + */ +static void pch_spi_set_bits_per_word(struct spi_master *master, + u8 bits_per_word) +{ + if (bits_per_word == 8) + pch_spi_setclr_reg(master, PCH_SPBRR, 0, SPBRR_SIZE_BIT); + else + pch_spi_setclr_reg(master, PCH_SPBRR, SPBRR_SIZE_BIT, 0); +} + +/** + * pch_spi_setup_transfer() - Configures the PCH SPI hardware for transfer + * @spi: Pointer to struct spi_device. + */ +static void pch_spi_setup_transfer(struct spi_device *spi) +{ + u32 flags = 0; + + dev_dbg(&spi->dev, "%s SPBRR content =%x setting baud rate=%d\n", + __func__, pch_spi_readreg(spi->master, PCH_SPBRR), + spi->max_speed_hz); + pch_spi_set_baud_rate(spi->master, spi->max_speed_hz); + + /* set bits per word */ + pch_spi_set_bits_per_word(spi->master, spi->bits_per_word); + + if (!(spi->mode & SPI_LSB_FIRST)) + flags |= SPCR_LSBF_BIT; + if (spi->mode & SPI_CPOL) + flags |= SPCR_CPOL_BIT; + if (spi->mode & SPI_CPHA) + flags |= SPCR_CPHA_BIT; + pch_spi_setclr_reg(spi->master, PCH_SPCR, flags, + (SPCR_LSBF_BIT | SPCR_CPOL_BIT | SPCR_CPHA_BIT)); + + /* Clear the FIFO by toggling FICLR to 1 and back to 0 */ + pch_spi_clear_fifo(spi->master); +} + +/** + * pch_spi_reset() - Clears SPI registers + * @master: Pointer to struct spi_master. + */ +static void pch_spi_reset(struct spi_master *master) +{ + /* write 1 to reset SPI */ + pch_spi_writereg(master, PCH_SRST, 0x1); + + /* clear reset */ + pch_spi_writereg(master, PCH_SRST, 0x0); +} + +static int pch_spi_setup(struct spi_device *pspi) +{ + /* check bits per word */ + if (pspi->bits_per_word == 0) { + pspi->bits_per_word = 8; + dev_dbg(&pspi->dev, "%s 8 bits per word\n", __func__); + } + + if ((pspi->bits_per_word != 8) && (pspi->bits_per_word != 16)) { + dev_err(&pspi->dev, "%s Invalid bits per word\n", __func__); + return -EINVAL; + } + + /* Check baud rate setting */ + /* if baud rate of chip is greater than + max we can support,return error */ + if ((pspi->max_speed_hz) > PCH_MAX_BAUDRATE) + pspi->max_speed_hz = PCH_MAX_BAUDRATE; + + dev_dbg(&pspi->dev, "%s MODE = %x\n", __func__, + (pspi->mode) & (SPI_CPOL | SPI_CPHA)); + + return 0; +} + +static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg) +{ + + struct spi_transfer *transfer; + struct pch_spi_data *data = spi_master_get_devdata(pspi->master); + int retval; + unsigned long flags; + + /* validate spi message and baud rate */ + if (unlikely(list_empty(&pmsg->transfers) == 1)) { + dev_err(&pspi->dev, "%s list empty\n", __func__); + retval = -EINVAL; + goto err_out; + } + + if (unlikely(pspi->max_speed_hz == 0)) { + dev_err(&pspi->dev, "%s pch_spi_tranfer maxspeed=%d\n", + __func__, pspi->max_speed_hz); + retval = -EINVAL; + goto err_out; + } + + dev_dbg(&pspi->dev, "%s Transfer List not empty. " + "Transfer Speed is set.\n", __func__); + + /* validate Tx/Rx buffers and Transfer length */ + list_for_each_entry(transfer, &pmsg->transfers, transfer_list) { + if (!transfer->tx_buf && !transfer->rx_buf) { + dev_err(&pspi->dev, + "%s Tx and Rx buffer NULL\n", __func__); + retval = -EINVAL; + goto err_out; + } + + if (!transfer->len) { + dev_err(&pspi->dev, "%s Transfer length invalid\n", + __func__); + retval = -EINVAL; + goto err_out; + } + + dev_dbg(&pspi->dev, "%s Tx/Rx buffer valid. Transfer length" + " valid\n", __func__); + + /* if baud rate hs been specified validate the same */ + if (transfer->speed_hz > PCH_MAX_BAUDRATE) + transfer->speed_hz = PCH_MAX_BAUDRATE; + + /* if bits per word has been specified validate the same */ + if (transfer->bits_per_word) { + if ((transfer->bits_per_word != 8) + && (transfer->bits_per_word != 16)) { + retval = -EINVAL; + dev_err(&pspi->dev, + "%s Invalid bits per word\n", __func__); + goto err_out; + } + } + } + + spin_lock_irqsave(&data->lock, flags); + + /* We won't process any messages if we have been asked to terminate */ + if (data->status == STATUS_EXITING) { + dev_err(&pspi->dev, "%s status = STATUS_EXITING.\n", __func__); + retval = -ESHUTDOWN; + goto err_return_spinlock; + } + + /* If suspended ,return -EINVAL */ + if (data->board_dat->suspend_sts) { + dev_err(&pspi->dev, "%s suspend; returning EINVAL\n", __func__); + retval = -EINVAL; + goto err_return_spinlock; + } + + /* set status of message */ + pmsg->actual_length = 0; + dev_dbg(&pspi->dev, "%s - pmsg->status =%d\n", __func__, pmsg->status); + + pmsg->status = -EINPROGRESS; + + /* add message to queue */ + list_add_tail(&pmsg->queue, &data->queue); + dev_dbg(&pspi->dev, "%s - Invoked list_add_tail\n", __func__); + + /* schedule work queue to run */ + queue_work(data->wk, &data->work); + dev_dbg(&pspi->dev, "%s - Invoked queue work\n", __func__); + + retval = 0; + +err_return_spinlock: + spin_unlock_irqrestore(&data->lock, flags); +err_out: + dev_dbg(&pspi->dev, "%s RETURN=%d\n", __func__, retval); + return retval; +} + +static inline void pch_spi_select_chip(struct pch_spi_data *data, + struct spi_device *pspi) +{ + if (data->current_chip != NULL) { + if (pspi->chip_select != data->n_curnt_chip) { + dev_dbg(&pspi->dev, "%s : different slave\n", __func__); + data->current_chip = NULL; + } + } + + data->current_chip = pspi; + + data->n_curnt_chip = data->current_chip->chip_select; + + dev_dbg(&pspi->dev, "%s :Invoking pch_spi_setup_transfer\n", __func__); + pch_spi_setup_transfer(pspi); +} + +static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw, + struct spi_message **ppmsg) +{ + int size; + u32 n_writes; + int j; + struct spi_message *pmsg; + const u8 *tx_buf; + const u16 *tx_sbuf; + + pmsg = *ppmsg; + + /* set baud rate if needed */ + if (data->cur_trans->speed_hz) { + dev_dbg(&data->master->dev, "%s:setting baud rate\n", __func__); + pch_spi_set_baud_rate(data->master, data->cur_trans->speed_hz); + } + + /* set bits per word if needed */ + if (data->cur_trans->bits_per_word && + (data->current_msg->spi->bits_per_word != data->cur_trans->bits_per_word)) { + dev_dbg(&data->master->dev, "%s:set bits per word\n", __func__); + pch_spi_set_bits_per_word(data->master, + data->cur_trans->bits_per_word); + *bpw = data->cur_trans->bits_per_word; + } else { + *bpw = data->current_msg->spi->bits_per_word; + } + + /* reset Tx/Rx index */ + data->tx_index = 0; + data->rx_index = 0; + + data->bpw_len = data->cur_trans->len / (*bpw / 8); + + /* find alloc size */ + size = data->cur_trans->len * sizeof(*data->pkt_tx_buff); + + /* allocate memory for pkt_tx_buff & pkt_rx_buffer */ + data->pkt_tx_buff = kzalloc(size, GFP_KERNEL); + if (data->pkt_tx_buff != NULL) { + data->pkt_rx_buff = kzalloc(size, GFP_KERNEL); + if (!data->pkt_rx_buff) + kfree(data->pkt_tx_buff); + } + + if (!data->pkt_rx_buff) { + /* flush queue and set status of all transfers to -ENOMEM */ + dev_err(&data->master->dev, "%s :kzalloc failed\n", __func__); + list_for_each_entry(pmsg, data->queue.next, queue) { + pmsg->status = -ENOMEM; + + if (pmsg->complete != 0) + pmsg->complete(pmsg->context); + + /* delete from queue */ + list_del_init(&pmsg->queue); + } + return; + } + + /* copy Tx Data */ + if (data->cur_trans->tx_buf != NULL) { + if (*bpw == 8) { + tx_buf = data->cur_trans->tx_buf; + for (j = 0; j < data->bpw_len; j++) + data->pkt_tx_buff[j] = *tx_buf++; + } else { + tx_sbuf = data->cur_trans->tx_buf; + for (j = 0; j < data->bpw_len; j++) + data->pkt_tx_buff[j] = *tx_sbuf++; + } + } + + /* if len greater than PCH_MAX_FIFO_DEPTH, write 16,else len bytes */ + n_writes = data->bpw_len; + if (n_writes > PCH_MAX_FIFO_DEPTH) + n_writes = PCH_MAX_FIFO_DEPTH; + + dev_dbg(&data->master->dev, "\n%s:Pulling down SSN low - writing " + "0x2 to SSNXCR\n", __func__); + pch_spi_writereg(data->master, PCH_SSNXCR, SSN_LOW); + + for (j = 0; j < n_writes; j++) + pch_spi_writereg(data->master, PCH_SPDWR, data->pkt_tx_buff[j]); + + /* update tx_index */ + data->tx_index = j; + + /* reset transfer complete flag */ + data->transfer_complete = false; + data->transfer_active = true; +} + + +static void pch_spi_nomore_transfer(struct pch_spi_data *data, + struct spi_message *pmsg) +{ + dev_dbg(&data->master->dev, "%s called\n", __func__); + /* Invoke complete callback + * [To the spi core..indicating end of transfer] */ + data->current_msg->status = 0; + + if (data->current_msg->complete != 0) { + dev_dbg(&data->master->dev, + "%s:Invoking callback of SPI core\n", __func__); + data->current_msg->complete(data->current_msg->context); + } + + /* update status in global variable */ + data->bcurrent_msg_processing = false; + + dev_dbg(&data->master->dev, + "%s:data->bcurrent_msg_processing = false\n", __func__); + + data->current_msg = NULL; + data->cur_trans = NULL; + + /* check if we have items in list and not suspending + * return 1 if list empty */ + if ((list_empty(&data->queue) == 0) && + (!data->board_dat->suspend_sts) && + (data->status != STATUS_EXITING)) { + /* We have some more work to do (either there is more tranint + * bpw;sfer requests in the current message or there are + *more messages) + */ + dev_dbg(&data->master->dev, "%s:Invoke queue_work\n", __func__); + queue_work(data->wk, &data->work); + } else if (data->board_dat->suspend_sts || + data->status == STATUS_EXITING) { + dev_dbg(&data->master->dev, + "%s suspend/remove initiated, flushing queue\n", + __func__); + list_for_each_entry(pmsg, data->queue.next, queue) { + pmsg->status = -EIO; + + if (pmsg->complete) + pmsg->complete(pmsg->context); + + /* delete from queue */ + list_del_init(&pmsg->queue); + } + } +} + +static void pch_spi_set_ir(struct pch_spi_data *data) +{ + /* enable interrupts */ + if ((data->bpw_len) > PCH_MAX_FIFO_DEPTH) { + /* set receive threshold to PCH_RX_THOLD */ + pch_spi_setclr_reg(data->master, PCH_SPCR, + PCH_RX_THOLD << SPCR_RFIC_FIELD, + ~MASK_RFIC_SPCR_BITS); + /* enable FI and RFI interrupts */ + pch_spi_setclr_reg(data->master, PCH_SPCR, + SPCR_RFIE_BIT | SPCR_FIE_BIT, 0); + } else { + /* set receive threshold to maximum */ + pch_spi_setclr_reg(data->master, PCH_SPCR, + PCH_RX_THOLD_MAX << SPCR_TFIC_FIELD, + ~MASK_TFIC_SPCR_BITS); + /* enable FI interrupt */ + pch_spi_setclr_reg(data->master, PCH_SPCR, SPCR_FIE_BIT, 0); + } + + dev_dbg(&data->master->dev, + "%s:invoking pch_spi_set_enable to enable SPI\n", __func__); + + /* SPI set enable */ + pch_spi_setclr_reg(data->current_chip->master, PCH_SPCR, SPCR_SPE_BIT, 0); + + /* Wait until the transfer completes; go to sleep after + initiating the transfer. */ + dev_dbg(&data->master->dev, + "%s:waiting for transfer to get over\n", __func__); + + wait_event_interruptible(data->wait, data->transfer_complete); + + pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL); + dev_dbg(&data->master->dev, + "%s:no more control over SSN-writing 0 to SSNXCR.", __func__); + + data->transfer_active = false; + dev_dbg(&data->master->dev, + "%s set data->transfer_active = false\n", __func__); + + /* clear all interrupts */ + pch_spi_writereg(data->master, PCH_SPSR, + pch_spi_readreg(data->master, PCH_SPSR)); + /* disable interrupts */ + pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL); +} + +static void pch_spi_copy_rx_data(struct pch_spi_data *data, int bpw) +{ + int j; + u8 *rx_buf; + u16 *rx_sbuf; + + /* copy Rx Data */ + if (!data->cur_trans->rx_buf) + return; + + if (bpw == 8) { + rx_buf = data->cur_trans->rx_buf; + for (j = 0; j < data->bpw_len; j++) + *rx_buf++ = data->pkt_rx_buff[j] & 0xFF; + } else { + rx_sbuf = data->cur_trans->rx_buf; + for (j = 0; j < data->bpw_len; j++) + *rx_sbuf++ = data->pkt_rx_buff[j]; + } +} + + +static void pch_spi_process_messages(struct work_struct *pwork) +{ + struct spi_message *pmsg; + struct pch_spi_data *data; + int bpw; + + data = container_of(pwork, struct pch_spi_data, work); + dev_dbg(&data->master->dev, "%s data initialized\n", __func__); + + spin_lock(&data->lock); + + /* check if suspend has been initiated;if yes flush queue */ + if (data->board_dat->suspend_sts || (data->status == STATUS_EXITING)) { + dev_dbg(&data->master->dev, + "%s suspend/remove initiated,flushing queue\n", + __func__); + + list_for_each_entry(pmsg, data->queue.next, queue) { + pmsg->status = -EIO; + + if (pmsg->complete != 0) { + spin_unlock(&data->lock); + pmsg->complete(pmsg->context); + spin_lock(&data->lock); + } + + /* delete from queue */ + list_del_init(&pmsg->queue); + } + + spin_unlock(&data->lock); + return; + } + + data->bcurrent_msg_processing = true; + dev_dbg(&data->master->dev, + "%s Set data->bcurrent_msg_processing= true\n", __func__); + + /* Get the message from the queue and delete it from there. */ + data->current_msg = list_entry(data->queue.next, struct spi_message, + queue); + + list_del_init(&data->current_msg->queue); + + data->current_msg->status = 0; + + pch_spi_select_chip(data, data->current_msg->spi); + + spin_unlock(&data->lock); + + do { + /* If we are already processing a message get the next + transfer structure from the message otherwise retrieve + the 1st transfer request from the message. */ + spin_lock(&data->lock); + + if (data->cur_trans == NULL) { + data->cur_trans = + list_entry(data->current_msg->transfers. + next, struct spi_transfer, + transfer_list); + dev_dbg(&data->master->dev, + "%s :Getting 1st transfer message\n", __func__); + } else { + data->cur_trans = + list_entry(data->cur_trans->transfer_list.next, + struct spi_transfer, + transfer_list); + dev_dbg(&data->master->dev, + "%s :Getting next transfer message\n", + __func__); + } + + spin_unlock(&data->lock); + + pch_spi_set_tx(data, &bpw, &pmsg); + + /* Control interrupt*/ + pch_spi_set_ir(data); + + /* Disable SPI transfer */ + pch_spi_setclr_reg(data->current_chip->master, PCH_SPCR, 0, + SPCR_SPE_BIT); + + /* clear FIFO */ + pch_spi_clear_fifo(data->master); + + /* copy Rx Data */ + pch_spi_copy_rx_data(data, bpw); + + /* free memory */ + kfree(data->pkt_rx_buff); + data->pkt_rx_buff = NULL; + + kfree(data->pkt_tx_buff); + data->pkt_tx_buff = NULL; + + /* increment message count */ + data->current_msg->actual_length += data->cur_trans->len; + + dev_dbg(&data->master->dev, + "%s:data->current_msg->actual_length=%d\n", + __func__, data->current_msg->actual_length); + + /* check for delay */ + if (data->cur_trans->delay_usecs) { + dev_dbg(&data->master->dev, "%s:" + "delay in usec=%d\n", __func__, + data->cur_trans->delay_usecs); + udelay(data->cur_trans->delay_usecs); + } + + spin_lock(&data->lock); + + /* No more transfer in this message. */ + if ((data->cur_trans->transfer_list.next) == + &(data->current_msg->transfers)) { + pch_spi_nomore_transfer(data, pmsg); + } + + spin_unlock(&data->lock); + + } while (data->cur_trans != NULL); +} + +static void pch_spi_free_resources(struct pch_spi_board_data *board_dat) +{ + dev_dbg(&board_dat->pdev->dev, "%s ENTRY\n", __func__); + + /* free workqueue */ + if (board_dat->data->wk != NULL) { + destroy_workqueue(board_dat->data->wk); + board_dat->data->wk = NULL; + dev_dbg(&board_dat->pdev->dev, + "%s destroy_workqueue invoked successfully\n", + __func__); + } + + /* disable interrupts & free IRQ */ + if (board_dat->irq_reg_sts) { + /* disable interrupts */ + pch_spi_setclr_reg(board_dat->data->master, PCH_SPCR, 0, + PCH_ALL); + + /* free IRQ */ + free_irq(board_dat->pdev->irq, board_dat); + + dev_dbg(&board_dat->pdev->dev, + "%s free_irq invoked successfully\n", __func__); + + board_dat->irq_reg_sts = false; + } + + /* unmap PCI base address */ + if (board_dat->data->io_remap_addr != 0) { + pci_iounmap(board_dat->pdev, board_dat->data->io_remap_addr); + + board_dat->data->io_remap_addr = 0; + + dev_dbg(&board_dat->pdev->dev, + "%s pci_iounmap invoked successfully\n", __func__); + } + + /* release PCI region */ + if (board_dat->pci_req_sts) { + pci_release_regions(board_dat->pdev); + dev_dbg(&board_dat->pdev->dev, + "%s pci_release_regions invoked successfully\n", + __func__); + board_dat->pci_req_sts = false; + } +} + +static int pch_spi_get_resources(struct pch_spi_board_data *board_dat) +{ + void __iomem *io_remap_addr; + int retval; + dev_dbg(&board_dat->pdev->dev, "%s ENTRY\n", __func__); + + /* create workqueue */ + board_dat->data->wk = create_singlethread_workqueue(KBUILD_MODNAME); + if (!board_dat->data->wk) { + dev_err(&board_dat->pdev->dev, + "%s create_singlet hread_workqueue failed\n", __func__); + retval = -EBUSY; + goto err_return; + } + + dev_dbg(&board_dat->pdev->dev, + "%s create_singlethread_workqueue success\n", __func__); + + retval = pci_request_regions(board_dat->pdev, KBUILD_MODNAME); + if (retval != 0) { + dev_err(&board_dat->pdev->dev, + "%s request_region failed\n", __func__); + goto err_return; + } + + board_dat->pci_req_sts = true; + + io_remap_addr = pci_iomap(board_dat->pdev, 1, 0); + if (io_remap_addr == 0) { + dev_err(&board_dat->pdev->dev, + "%s pci_iomap failed\n", __func__); + retval = -ENOMEM; + goto err_return; + } + + /* calculate base address for all channels */ + board_dat->data->io_remap_addr = io_remap_addr; + + /* reset PCH SPI h/w */ + pch_spi_reset(board_dat->data->master); + dev_dbg(&board_dat->pdev->dev, + "%s pch_spi_reset invoked successfully\n", __func__); + + /* register IRQ */ + retval = request_irq(board_dat->pdev->irq, pch_spi_handler, + IRQF_SHARED, KBUILD_MODNAME, board_dat); + if (retval != 0) { + dev_err(&board_dat->pdev->dev, + "%s request_irq failed\n", __func__); + goto err_return; + } + + dev_dbg(&board_dat->pdev->dev, "%s request_irq returned=%d\n", + __func__, retval); + + board_dat->irq_reg_sts = true; + dev_dbg(&board_dat->pdev->dev, "%s data->irq_reg_sts=true\n", __func__); + +err_return: + if (retval != 0) { + dev_err(&board_dat->pdev->dev, + "%s FAIL:invoking pch_spi_free_resources\n", __func__); + pch_spi_free_resources(board_dat); + } + + dev_dbg(&board_dat->pdev->dev, "%s Return=%d\n", __func__, retval); + + return retval; +} + +static int pch_spi_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + + struct spi_master *master; + + struct pch_spi_board_data *board_dat; + int retval; + + dev_dbg(&pdev->dev, "%s ENTRY\n", __func__); + + /* allocate memory for private data */ + board_dat = kzalloc(sizeof(struct pch_spi_board_data), GFP_KERNEL); + if (board_dat == NULL) { + dev_err(&pdev->dev, + " %s memory allocation for private data failed\n", + __func__); + retval = -ENOMEM; + goto err_kmalloc; + } + + dev_dbg(&pdev->dev, + "%s memory allocation for private data success\n", __func__); + + /* enable PCI device */ + retval = pci_enable_device(pdev); + if (retval != 0) { + dev_err(&pdev->dev, "%s pci_enable_device FAILED\n", __func__); + + goto err_pci_en_device; + } + + dev_dbg(&pdev->dev, "%s pci_enable_device returned=%d\n", + __func__, retval); + + board_dat->pdev = pdev; + + /* alllocate memory for SPI master */ + master = spi_alloc_master(&pdev->dev, sizeof(struct pch_spi_data)); + if (master == NULL) { + retval = -ENOMEM; + dev_err(&pdev->dev, "%s Fail.\n", __func__); + goto err_spi_alloc_master; + } + + dev_dbg(&pdev->dev, + "%s spi_alloc_master returned non NULL\n", __func__); + + /* initialize members of SPI master */ + master->bus_num = -1; + master->num_chipselect = PCH_MAX_CS; + master->setup = pch_spi_setup; + master->transfer = pch_spi_transfer; + dev_dbg(&pdev->dev, + "%s transfer member of SPI master initialized\n", __func__); + + board_dat->data = spi_master_get_devdata(master); + + board_dat->data->master = master; + board_dat->data->n_curnt_chip = 255; + board_dat->data->board_dat = board_dat; + board_dat->data->status = STATUS_RUNNING; + + INIT_LIST_HEAD(&board_dat->data->queue); + spin_lock_init(&board_dat->data->lock); + INIT_WORK(&board_dat->data->work, pch_spi_process_messages); + init_waitqueue_head(&board_dat->data->wait); + + /* allocate resources for PCH SPI */ + retval = pch_spi_get_resources(board_dat); + if (retval) { + dev_err(&pdev->dev, "%s fail(retval=%d)\n", __func__, retval); + goto err_spi_get_resources; + } + + dev_dbg(&pdev->dev, "%s pch_spi_get_resources returned=%d\n", + __func__, retval); + + /* save private data in dev */ + pci_set_drvdata(pdev, board_dat); + dev_dbg(&pdev->dev, "%s invoked pci_set_drvdata\n", __func__); + + /* set master mode */ + pch_spi_set_master_mode(master); + dev_dbg(&pdev->dev, + "%s invoked pch_spi_set_master_mode\n", __func__); + + /* Register the controller with the SPI core. */ + retval = spi_register_master(master); + if (retval != 0) { + dev_err(&pdev->dev, + "%s spi_register_master FAILED\n", __func__); + goto err_spi_reg_master; + } + + dev_dbg(&pdev->dev, "%s spi_register_master returned=%d\n", + __func__, retval); + + + return 0; + +err_spi_reg_master: + spi_unregister_master(master); +err_spi_get_resources: +err_spi_alloc_master: + spi_master_put(master); + pci_disable_device(pdev); +err_pci_en_device: + kfree(board_dat); +err_kmalloc: + return retval; +} + +static void pch_spi_remove(struct pci_dev *pdev) +{ + struct pch_spi_board_data *board_dat = pci_get_drvdata(pdev); + int count; + + dev_dbg(&pdev->dev, "%s ENTRY\n", __func__); + + if (!board_dat) { + dev_err(&pdev->dev, + "%s pci_get_drvdata returned NULL\n", __func__); + return; + } + + /* check for any pending messages; no action is taken if the queue + * is still full; but at least we tried. Unload anyway */ + count = 500; + spin_lock(&board_dat->data->lock); + board_dat->data->status = STATUS_EXITING; + while ((list_empty(&board_dat->data->queue) == 0) && --count) { + dev_dbg(&board_dat->pdev->dev, "%s :queue not empty\n", + __func__); + spin_unlock(&board_dat->data->lock); + msleep(PCH_SLEEP_TIME); + spin_lock(&board_dat->data->lock); + } + spin_unlock(&board_dat->data->lock); + + /* Free resources allocated for PCH SPI */ + pch_spi_free_resources(board_dat); + + spi_unregister_master(board_dat->data->master); + + /* free memory for private data */ + kfree(board_dat); + + pci_set_drvdata(pdev, NULL); + + /* disable PCI device */ + pci_disable_device(pdev); + + dev_dbg(&pdev->dev, "%s invoked pci_disable_device\n", __func__); +} + +#ifdef CONFIG_PM +static int pch_spi_suspend(struct pci_dev *pdev, pm_message_t state) +{ + u8 count; + int retval; + + struct pch_spi_board_data *board_dat = pci_get_drvdata(pdev); + + dev_dbg(&pdev->dev, "%s ENTRY\n", __func__); + + if (!board_dat) { + dev_err(&pdev->dev, + "%s pci_get_drvdata returned NULL\n", __func__); + return -EFAULT; + } + + retval = 0; + board_dat->suspend_sts = true; + + /* check if the current message is processed: + Only after thats done the transfer will be suspended */ + count = 255; + while ((--count) > 0) { + if (!(board_dat->data->bcurrent_msg_processing)) { + dev_dbg(&pdev->dev, "%s board_dat->data->bCurrent_" + "msg_processing = false\n", __func__); + break; + } else { + dev_dbg(&pdev->dev, "%s board_dat->data->bCurrent_msg_" + "processing = true\n", __func__); + } + msleep(PCH_SLEEP_TIME); + } + + /* Free IRQ */ + if (board_dat->irq_reg_sts) { + /* disable all interrupts */ + pch_spi_setclr_reg(board_dat->data->master, PCH_SPCR, 0, + PCH_ALL); + pch_spi_reset(board_dat->data->master); + + free_irq(board_dat->pdev->irq, board_dat); + + board_dat->irq_reg_sts = false; + dev_dbg(&pdev->dev, + "%s free_irq invoked successfully.\n", __func__); + } + + /* save config space */ + retval = pci_save_state(pdev); + + if (retval == 0) { + dev_dbg(&pdev->dev, "%s pci_save_state returned=%d\n", + __func__, retval); + /* disable PM notifications */ + pci_enable_wake(pdev, PCI_D3hot, 0); + dev_dbg(&pdev->dev, + "%s pci_enable_wake invoked successfully\n", __func__); + /* disable PCI device */ + pci_disable_device(pdev); + dev_dbg(&pdev->dev, + "%s pci_disable_device invoked successfully\n", + __func__); + /* move device to D3hot state */ + pci_set_power_state(pdev, PCI_D3hot); + dev_dbg(&pdev->dev, + "%s pci_set_power_state invoked successfully\n", + __func__); + } else { + dev_err(&pdev->dev, "%s pci_save_state failed\n", __func__); + } + + dev_dbg(&pdev->dev, "%s return=%d\n", __func__, retval); + + return retval; +} + +static int pch_spi_resume(struct pci_dev *pdev) +{ + int retval; + + struct pch_spi_board_data *board = pci_get_drvdata(pdev); + dev_dbg(&pdev->dev, "%s ENTRY\n", __func__); + + if (!board) { + dev_err(&pdev->dev, + "%s pci_get_drvdata returned NULL\n", __func__); + return -EFAULT; + } + + /* move device to DO power state */ + pci_set_power_state(pdev, PCI_D0); + + /* restore state */ + pci_restore_state(pdev); + + retval = pci_enable_device(pdev); + if (retval < 0) { + dev_err(&pdev->dev, + "%s pci_enable_device failed\n", __func__); + } else { + /* disable PM notifications */ + pci_enable_wake(pdev, PCI_D3hot, 0); + + /* register IRQ handler */ + if (!board->irq_reg_sts) { + /* register IRQ */ + retval = request_irq(board->pdev->irq, pch_spi_handler, + IRQF_SHARED, KBUILD_MODNAME, + board); + if (retval < 0) { + dev_err(&pdev->dev, + "%s request_irq failed\n", __func__); + return retval; + } + board->irq_reg_sts = true; + + /* reset PCH SPI h/w */ + pch_spi_reset(board->data->master); + pch_spi_set_master_mode(board->data->master); + + /* set suspend status to false */ + board->suspend_sts = false; + + } + } + + dev_dbg(&pdev->dev, "%s returning=%d\n", __func__, retval); + + return retval; +} +#else +#define pch_spi_suspend NULL +#define pch_spi_resume NULL + +#endif + +static struct pci_driver pch_spi_pcidev = { + .name = "pch_spi", + .id_table = pch_spi_pcidev_id, + .probe = pch_spi_probe, + .remove = pch_spi_remove, + .suspend = pch_spi_suspend, + .resume = pch_spi_resume, +}; + +static int __init pch_spi_init(void) +{ + return pci_register_driver(&pch_spi_pcidev); +} +module_init(pch_spi_init); + +static void __exit pch_spi_exit(void) +{ + pci_unregister_driver(&pch_spi_pcidev); +} +module_exit(pch_spi_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Topcliff PCH SPI PCI Driver"); diff --git a/drivers/spi/spi-txx9.c b/drivers/spi/spi-txx9.c new file mode 100644 index 0000000..f0a2ab0 --- /dev/null +++ b/drivers/spi/spi-txx9.c @@ -0,0 +1,472 @@ +/* + * TXx9 SPI controller driver. + * + * Based on linux/arch/mips/tx4938/toshiba_rbtx4938/spi_txx9.c + * Copyright (C) 2000-2001 Toshiba Corporation + * + * 2003-2005 (c) MontaVista Software, Inc. This file is licensed under the + * terms of the GNU General Public License version 2. This program is + * licensed "as is" without any warranty of any kind, whether express + * or implied. + * + * Support for TX4938 in 2.6 - Manish Lachwani (mlachwani@mvista.com) + * + * Convert to generic SPI framework - Atsushi Nemoto (anemo@mba.ocn.ne.jp) + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#define SPI_FIFO_SIZE 4 +#define SPI_MAX_DIVIDER 0xff /* Max. value for SPCR1.SER */ +#define SPI_MIN_DIVIDER 1 /* Min. value for SPCR1.SER */ + +#define TXx9_SPMCR 0x00 +#define TXx9_SPCR0 0x04 +#define TXx9_SPCR1 0x08 +#define TXx9_SPFS 0x0c +#define TXx9_SPSR 0x14 +#define TXx9_SPDR 0x18 + +/* SPMCR : SPI Master Control */ +#define TXx9_SPMCR_OPMODE 0xc0 +#define TXx9_SPMCR_CONFIG 0x40 +#define TXx9_SPMCR_ACTIVE 0x80 +#define TXx9_SPMCR_SPSTP 0x02 +#define TXx9_SPMCR_BCLR 0x01 + +/* SPCR0 : SPI Control 0 */ +#define TXx9_SPCR0_TXIFL_MASK 0xc000 +#define TXx9_SPCR0_RXIFL_MASK 0x3000 +#define TXx9_SPCR0_SIDIE 0x0800 +#define TXx9_SPCR0_SOEIE 0x0400 +#define TXx9_SPCR0_RBSIE 0x0200 +#define TXx9_SPCR0_TBSIE 0x0100 +#define TXx9_SPCR0_IFSPSE 0x0010 +#define TXx9_SPCR0_SBOS 0x0004 +#define TXx9_SPCR0_SPHA 0x0002 +#define TXx9_SPCR0_SPOL 0x0001 + +/* SPSR : SPI Status */ +#define TXx9_SPSR_TBSI 0x8000 +#define TXx9_SPSR_RBSI 0x4000 +#define TXx9_SPSR_TBS_MASK 0x3800 +#define TXx9_SPSR_RBS_MASK 0x0700 +#define TXx9_SPSR_SPOE 0x0080 +#define TXx9_SPSR_IFSD 0x0008 +#define TXx9_SPSR_SIDLE 0x0004 +#define TXx9_SPSR_STRDY 0x0002 +#define TXx9_SPSR_SRRDY 0x0001 + + +struct txx9spi { + struct workqueue_struct *workqueue; + struct work_struct work; + spinlock_t lock; /* protect 'queue' */ + struct list_head queue; + wait_queue_head_t waitq; + void __iomem *membase; + int baseclk; + struct clk *clk; + u32 max_speed_hz, min_speed_hz; + int last_chipselect; + int last_chipselect_val; +}; + +static u32 txx9spi_rd(struct txx9spi *c, int reg) +{ + return __raw_readl(c->membase + reg); +} +static void txx9spi_wr(struct txx9spi *c, u32 val, int reg) +{ + __raw_writel(val, c->membase + reg); +} + +static void txx9spi_cs_func(struct spi_device *spi, struct txx9spi *c, + int on, unsigned int cs_delay) +{ + int val = (spi->mode & SPI_CS_HIGH) ? on : !on; + if (on) { + /* deselect the chip with cs_change hint in last transfer */ + if (c->last_chipselect >= 0) + gpio_set_value(c->last_chipselect, + !c->last_chipselect_val); + c->last_chipselect = spi->chip_select; + c->last_chipselect_val = val; + } else { + c->last_chipselect = -1; + ndelay(cs_delay); /* CS Hold Time */ + } + gpio_set_value(spi->chip_select, val); + ndelay(cs_delay); /* CS Setup Time / CS Recovery Time */ +} + +static int txx9spi_setup(struct spi_device *spi) +{ + struct txx9spi *c = spi_master_get_devdata(spi->master); + u8 bits_per_word; + + if (!spi->max_speed_hz + || spi->max_speed_hz > c->max_speed_hz + || spi->max_speed_hz < c->min_speed_hz) + return -EINVAL; + + bits_per_word = spi->bits_per_word; + if (bits_per_word != 8 && bits_per_word != 16) + return -EINVAL; + + if (gpio_direction_output(spi->chip_select, + !(spi->mode & SPI_CS_HIGH))) { + dev_err(&spi->dev, "Cannot setup GPIO for chipselect.\n"); + return -EINVAL; + } + + /* deselect chip */ + spin_lock(&c->lock); + txx9spi_cs_func(spi, c, 0, (NSEC_PER_SEC / 2) / spi->max_speed_hz); + spin_unlock(&c->lock); + + return 0; +} + +static irqreturn_t txx9spi_interrupt(int irq, void *dev_id) +{ + struct txx9spi *c = dev_id; + + /* disable rx intr */ + txx9spi_wr(c, txx9spi_rd(c, TXx9_SPCR0) & ~TXx9_SPCR0_RBSIE, + TXx9_SPCR0); + wake_up(&c->waitq); + return IRQ_HANDLED; +} + +static void txx9spi_work_one(struct txx9spi *c, struct spi_message *m) +{ + struct spi_device *spi = m->spi; + struct spi_transfer *t; + unsigned int cs_delay; + unsigned int cs_change = 1; + int status = 0; + u32 mcr; + u32 prev_speed_hz = 0; + u8 prev_bits_per_word = 0; + + /* CS setup/hold/recovery time in nsec */ + cs_delay = 100 + (NSEC_PER_SEC / 2) / spi->max_speed_hz; + + mcr = txx9spi_rd(c, TXx9_SPMCR); + if (unlikely((mcr & TXx9_SPMCR_OPMODE) == TXx9_SPMCR_ACTIVE)) { + dev_err(&spi->dev, "Bad mode.\n"); + status = -EIO; + goto exit; + } + mcr &= ~(TXx9_SPMCR_OPMODE | TXx9_SPMCR_SPSTP | TXx9_SPMCR_BCLR); + + /* enter config mode */ + txx9spi_wr(c, mcr | TXx9_SPMCR_CONFIG | TXx9_SPMCR_BCLR, TXx9_SPMCR); + txx9spi_wr(c, TXx9_SPCR0_SBOS + | ((spi->mode & SPI_CPOL) ? TXx9_SPCR0_SPOL : 0) + | ((spi->mode & SPI_CPHA) ? TXx9_SPCR0_SPHA : 0) + | 0x08, + TXx9_SPCR0); + + list_for_each_entry (t, &m->transfers, transfer_list) { + const void *txbuf = t->tx_buf; + void *rxbuf = t->rx_buf; + u32 data; + unsigned int len = t->len; + unsigned int wsize; + u32 speed_hz = t->speed_hz ? : spi->max_speed_hz; + u8 bits_per_word = t->bits_per_word ? : spi->bits_per_word; + + bits_per_word = bits_per_word ? : 8; + wsize = bits_per_word >> 3; /* in bytes */ + + if (prev_speed_hz != speed_hz + || prev_bits_per_word != bits_per_word) { + int n = DIV_ROUND_UP(c->baseclk, speed_hz) - 1; + n = clamp(n, SPI_MIN_DIVIDER, SPI_MAX_DIVIDER); + /* enter config mode */ + txx9spi_wr(c, mcr | TXx9_SPMCR_CONFIG | TXx9_SPMCR_BCLR, + TXx9_SPMCR); + txx9spi_wr(c, (n << 8) | bits_per_word, TXx9_SPCR1); + /* enter active mode */ + txx9spi_wr(c, mcr | TXx9_SPMCR_ACTIVE, TXx9_SPMCR); + + prev_speed_hz = speed_hz; + prev_bits_per_word = bits_per_word; + } + + if (cs_change) + txx9spi_cs_func(spi, c, 1, cs_delay); + cs_change = t->cs_change; + while (len) { + unsigned int count = SPI_FIFO_SIZE; + int i; + u32 cr0; + + if (len < count * wsize) + count = len / wsize; + /* now tx must be idle... */ + while (!(txx9spi_rd(c, TXx9_SPSR) & TXx9_SPSR_SIDLE)) + cpu_relax(); + cr0 = txx9spi_rd(c, TXx9_SPCR0); + cr0 &= ~TXx9_SPCR0_RXIFL_MASK; + cr0 |= (count - 1) << 12; + /* enable rx intr */ + cr0 |= TXx9_SPCR0_RBSIE; + txx9spi_wr(c, cr0, TXx9_SPCR0); + /* send */ + for (i = 0; i < count; i++) { + if (txbuf) { + data = (wsize == 1) + ? *(const u8 *)txbuf + : *(const u16 *)txbuf; + txx9spi_wr(c, data, TXx9_SPDR); + txbuf += wsize; + } else + txx9spi_wr(c, 0, TXx9_SPDR); + } + /* wait all rx data */ + wait_event(c->waitq, + txx9spi_rd(c, TXx9_SPSR) & TXx9_SPSR_RBSI); + /* receive */ + for (i = 0; i < count; i++) { + data = txx9spi_rd(c, TXx9_SPDR); + if (rxbuf) { + if (wsize == 1) + *(u8 *)rxbuf = data; + else + *(u16 *)rxbuf = data; + rxbuf += wsize; + } + } + len -= count * wsize; + } + m->actual_length += t->len; + if (t->delay_usecs) + udelay(t->delay_usecs); + + if (!cs_change) + continue; + if (t->transfer_list.next == &m->transfers) + break; + /* sometimes a short mid-message deselect of the chip + * may be needed to terminate a mode or command + */ + txx9spi_cs_func(spi, c, 0, cs_delay); + } + +exit: + m->status = status; + m->complete(m->context); + + /* normally deactivate chipselect ... unless no error and + * cs_change has hinted that the next message will probably + * be for this chip too. + */ + if (!(status == 0 && cs_change)) + txx9spi_cs_func(spi, c, 0, cs_delay); + + /* enter config mode */ + txx9spi_wr(c, mcr | TXx9_SPMCR_CONFIG | TXx9_SPMCR_BCLR, TXx9_SPMCR); +} + +static void txx9spi_work(struct work_struct *work) +{ + struct txx9spi *c = container_of(work, struct txx9spi, work); + unsigned long flags; + + spin_lock_irqsave(&c->lock, flags); + while (!list_empty(&c->queue)) { + struct spi_message *m; + + m = container_of(c->queue.next, struct spi_message, queue); + list_del_init(&m->queue); + spin_unlock_irqrestore(&c->lock, flags); + + txx9spi_work_one(c, m); + + spin_lock_irqsave(&c->lock, flags); + } + spin_unlock_irqrestore(&c->lock, flags); +} + +static int txx9spi_transfer(struct spi_device *spi, struct spi_message *m) +{ + struct spi_master *master = spi->master; + struct txx9spi *c = spi_master_get_devdata(master); + struct spi_transfer *t; + unsigned long flags; + + m->actual_length = 0; + + /* check each transfer's parameters */ + list_for_each_entry (t, &m->transfers, transfer_list) { + u32 speed_hz = t->speed_hz ? : spi->max_speed_hz; + u8 bits_per_word = t->bits_per_word ? : spi->bits_per_word; + + bits_per_word = bits_per_word ? : 8; + if (!t->tx_buf && !t->rx_buf && t->len) + return -EINVAL; + if (bits_per_word != 8 && bits_per_word != 16) + return -EINVAL; + if (t->len & ((bits_per_word >> 3) - 1)) + return -EINVAL; + if (speed_hz < c->min_speed_hz || speed_hz > c->max_speed_hz) + return -EINVAL; + } + + spin_lock_irqsave(&c->lock, flags); + list_add_tail(&m->queue, &c->queue); + queue_work(c->workqueue, &c->work); + spin_unlock_irqrestore(&c->lock, flags); + + return 0; +} + +static int __init txx9spi_probe(struct platform_device *dev) +{ + struct spi_master *master; + struct txx9spi *c; + struct resource *res; + int ret = -ENODEV; + u32 mcr; + int irq; + + master = spi_alloc_master(&dev->dev, sizeof(*c)); + if (!master) + return ret; + c = spi_master_get_devdata(master); + platform_set_drvdata(dev, master); + + INIT_WORK(&c->work, txx9spi_work); + spin_lock_init(&c->lock); + INIT_LIST_HEAD(&c->queue); + init_waitqueue_head(&c->waitq); + + c->clk = clk_get(&dev->dev, "spi-baseclk"); + if (IS_ERR(c->clk)) { + ret = PTR_ERR(c->clk); + c->clk = NULL; + goto exit; + } + ret = clk_enable(c->clk); + if (ret) { + clk_put(c->clk); + c->clk = NULL; + goto exit; + } + c->baseclk = clk_get_rate(c->clk); + c->min_speed_hz = DIV_ROUND_UP(c->baseclk, SPI_MAX_DIVIDER + 1); + c->max_speed_hz = c->baseclk / (SPI_MIN_DIVIDER + 1); + + res = platform_get_resource(dev, IORESOURCE_MEM, 0); + if (!res) + goto exit_busy; + if (!devm_request_mem_region(&dev->dev, res->start, resource_size(res), + "spi_txx9")) + goto exit_busy; + c->membase = devm_ioremap(&dev->dev, res->start, resource_size(res)); + if (!c->membase) + goto exit_busy; + + /* enter config mode */ + mcr = txx9spi_rd(c, TXx9_SPMCR); + mcr &= ~(TXx9_SPMCR_OPMODE | TXx9_SPMCR_SPSTP | TXx9_SPMCR_BCLR); + txx9spi_wr(c, mcr | TXx9_SPMCR_CONFIG | TXx9_SPMCR_BCLR, TXx9_SPMCR); + + irq = platform_get_irq(dev, 0); + if (irq < 0) + goto exit_busy; + ret = devm_request_irq(&dev->dev, irq, txx9spi_interrupt, 0, + "spi_txx9", c); + if (ret) + goto exit; + + c->workqueue = create_singlethread_workqueue( + dev_name(master->dev.parent)); + if (!c->workqueue) + goto exit_busy; + c->last_chipselect = -1; + + dev_info(&dev->dev, "at %#llx, irq %d, %dMHz\n", + (unsigned long long)res->start, irq, + (c->baseclk + 500000) / 1000000); + + /* the spi->mode bits understood by this driver: */ + master->mode_bits = SPI_CS_HIGH | SPI_CPOL | SPI_CPHA; + + master->bus_num = dev->id; + master->setup = txx9spi_setup; + master->transfer = txx9spi_transfer; + master->num_chipselect = (u16)UINT_MAX; /* any GPIO numbers */ + + ret = spi_register_master(master); + if (ret) + goto exit; + return 0; +exit_busy: + ret = -EBUSY; +exit: + if (c->workqueue) + destroy_workqueue(c->workqueue); + if (c->clk) { + clk_disable(c->clk); + clk_put(c->clk); + } + platform_set_drvdata(dev, NULL); + spi_master_put(master); + return ret; +} + +static int __exit txx9spi_remove(struct platform_device *dev) +{ + struct spi_master *master = spi_master_get(platform_get_drvdata(dev)); + struct txx9spi *c = spi_master_get_devdata(master); + + spi_unregister_master(master); + platform_set_drvdata(dev, NULL); + destroy_workqueue(c->workqueue); + clk_disable(c->clk); + clk_put(c->clk); + spi_master_put(master); + return 0; +} + +/* work with hotplug and coldplug */ +MODULE_ALIAS("platform:spi_txx9"); + +static struct platform_driver txx9spi_driver = { + .remove = __exit_p(txx9spi_remove), + .driver = { + .name = "spi_txx9", + .owner = THIS_MODULE, + }, +}; + +static int __init txx9spi_init(void) +{ + return platform_driver_probe(&txx9spi_driver, txx9spi_probe); +} +subsys_initcall(txx9spi_init); + +static void __exit txx9spi_exit(void) +{ + platform_driver_unregister(&txx9spi_driver); +} +module_exit(txx9spi_exit); + +MODULE_DESCRIPTION("TXx9 SPI Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c new file mode 100644 index 0000000..4d2c75d --- /dev/null +++ b/drivers/spi/spi-xilinx.c @@ -0,0 +1,556 @@ +/* + * Xilinx SPI controller driver (master mode only) + * + * Author: MontaVista Software, Inc. + * source@mvista.com + * + * Copyright (c) 2010 Secret Lab Technologies, Ltd. + * Copyright (c) 2009 Intel Corporation + * 2002-2007 (c) MontaVista Software, Inc. + + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define XILINX_SPI_NAME "xilinx_spi" + +/* Register definitions as per "OPB Serial Peripheral Interface (SPI) (v1.00e) + * Product Specification", DS464 + */ +#define XSPI_CR_OFFSET 0x60 /* Control Register */ + +#define XSPI_CR_ENABLE 0x02 +#define XSPI_CR_MASTER_MODE 0x04 +#define XSPI_CR_CPOL 0x08 +#define XSPI_CR_CPHA 0x10 +#define XSPI_CR_MODE_MASK (XSPI_CR_CPHA | XSPI_CR_CPOL) +#define XSPI_CR_TXFIFO_RESET 0x20 +#define XSPI_CR_RXFIFO_RESET 0x40 +#define XSPI_CR_MANUAL_SSELECT 0x80 +#define XSPI_CR_TRANS_INHIBIT 0x100 +#define XSPI_CR_LSB_FIRST 0x200 + +#define XSPI_SR_OFFSET 0x64 /* Status Register */ + +#define XSPI_SR_RX_EMPTY_MASK 0x01 /* Receive FIFO is empty */ +#define XSPI_SR_RX_FULL_MASK 0x02 /* Receive FIFO is full */ +#define XSPI_SR_TX_EMPTY_MASK 0x04 /* Transmit FIFO is empty */ +#define XSPI_SR_TX_FULL_MASK 0x08 /* Transmit FIFO is full */ +#define XSPI_SR_MODE_FAULT_MASK 0x10 /* Mode fault error */ + +#define XSPI_TXD_OFFSET 0x68 /* Data Transmit Register */ +#define XSPI_RXD_OFFSET 0x6c /* Data Receive Register */ + +#define XSPI_SSR_OFFSET 0x70 /* 32-bit Slave Select Register */ + +/* Register definitions as per "OPB IPIF (v3.01c) Product Specification", DS414 + * IPIF registers are 32 bit + */ +#define XIPIF_V123B_DGIER_OFFSET 0x1c /* IPIF global int enable reg */ +#define XIPIF_V123B_GINTR_ENABLE 0x80000000 + +#define XIPIF_V123B_IISR_OFFSET 0x20 /* IPIF interrupt status reg */ +#define XIPIF_V123B_IIER_OFFSET 0x28 /* IPIF interrupt enable reg */ + +#define XSPI_INTR_MODE_FAULT 0x01 /* Mode fault error */ +#define XSPI_INTR_SLAVE_MODE_FAULT 0x02 /* Selected as slave while + * disabled */ +#define XSPI_INTR_TX_EMPTY 0x04 /* TxFIFO is empty */ +#define XSPI_INTR_TX_UNDERRUN 0x08 /* TxFIFO was underrun */ +#define XSPI_INTR_RX_FULL 0x10 /* RxFIFO is full */ +#define XSPI_INTR_RX_OVERRUN 0x20 /* RxFIFO was overrun */ +#define XSPI_INTR_TX_HALF_EMPTY 0x40 /* TxFIFO is half empty */ + +#define XIPIF_V123B_RESETR_OFFSET 0x40 /* IPIF reset register */ +#define XIPIF_V123B_RESET_MASK 0x0a /* the value to write */ + +struct xilinx_spi { + /* bitbang has to be first */ + struct spi_bitbang bitbang; + struct completion done; + struct resource mem; /* phys mem */ + void __iomem *regs; /* virt. address of the control registers */ + + u32 irq; + + u8 *rx_ptr; /* pointer in the Tx buffer */ + const u8 *tx_ptr; /* pointer in the Rx buffer */ + int remaining_bytes; /* the number of bytes left to transfer */ + u8 bits_per_word; + unsigned int (*read_fn) (void __iomem *); + void (*write_fn) (u32, void __iomem *); + void (*tx_fn) (struct xilinx_spi *); + void (*rx_fn) (struct xilinx_spi *); +}; + +static void xspi_write32(u32 val, void __iomem *addr) +{ + iowrite32(val, addr); +} + +static unsigned int xspi_read32(void __iomem *addr) +{ + return ioread32(addr); +} + +static void xspi_write32_be(u32 val, void __iomem *addr) +{ + iowrite32be(val, addr); +} + +static unsigned int xspi_read32_be(void __iomem *addr) +{ + return ioread32be(addr); +} + +static void xspi_tx8(struct xilinx_spi *xspi) +{ + xspi->write_fn(*xspi->tx_ptr, xspi->regs + XSPI_TXD_OFFSET); + xspi->tx_ptr++; +} + +static void xspi_tx16(struct xilinx_spi *xspi) +{ + xspi->write_fn(*(u16 *)(xspi->tx_ptr), xspi->regs + XSPI_TXD_OFFSET); + xspi->tx_ptr += 2; +} + +static void xspi_tx32(struct xilinx_spi *xspi) +{ + xspi->write_fn(*(u32 *)(xspi->tx_ptr), xspi->regs + XSPI_TXD_OFFSET); + xspi->tx_ptr += 4; +} + +static void xspi_rx8(struct xilinx_spi *xspi) +{ + u32 data = xspi->read_fn(xspi->regs + XSPI_RXD_OFFSET); + if (xspi->rx_ptr) { + *xspi->rx_ptr = data & 0xff; + xspi->rx_ptr++; + } +} + +static void xspi_rx16(struct xilinx_spi *xspi) +{ + u32 data = xspi->read_fn(xspi->regs + XSPI_RXD_OFFSET); + if (xspi->rx_ptr) { + *(u16 *)(xspi->rx_ptr) = data & 0xffff; + xspi->rx_ptr += 2; + } +} + +static void xspi_rx32(struct xilinx_spi *xspi) +{ + u32 data = xspi->read_fn(xspi->regs + XSPI_RXD_OFFSET); + if (xspi->rx_ptr) { + *(u32 *)(xspi->rx_ptr) = data; + xspi->rx_ptr += 4; + } +} + +static void xspi_init_hw(struct xilinx_spi *xspi) +{ + void __iomem *regs_base = xspi->regs; + + /* Reset the SPI device */ + xspi->write_fn(XIPIF_V123B_RESET_MASK, + regs_base + XIPIF_V123B_RESETR_OFFSET); + /* Disable all the interrupts just in case */ + xspi->write_fn(0, regs_base + XIPIF_V123B_IIER_OFFSET); + /* Enable the global IPIF interrupt */ + xspi->write_fn(XIPIF_V123B_GINTR_ENABLE, + regs_base + XIPIF_V123B_DGIER_OFFSET); + /* Deselect the slave on the SPI bus */ + xspi->write_fn(0xffff, regs_base + XSPI_SSR_OFFSET); + /* Disable the transmitter, enable Manual Slave Select Assertion, + * put SPI controller into master mode, and enable it */ + xspi->write_fn(XSPI_CR_TRANS_INHIBIT | XSPI_CR_MANUAL_SSELECT | + XSPI_CR_MASTER_MODE | XSPI_CR_ENABLE | XSPI_CR_TXFIFO_RESET | + XSPI_CR_RXFIFO_RESET, regs_base + XSPI_CR_OFFSET); +} + +static void xilinx_spi_chipselect(struct spi_device *spi, int is_on) +{ + struct xilinx_spi *xspi = spi_master_get_devdata(spi->master); + + if (is_on == BITBANG_CS_INACTIVE) { + /* Deselect the slave on the SPI bus */ + xspi->write_fn(0xffff, xspi->regs + XSPI_SSR_OFFSET); + } else if (is_on == BITBANG_CS_ACTIVE) { + /* Set the SPI clock phase and polarity */ + u16 cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET) + & ~XSPI_CR_MODE_MASK; + if (spi->mode & SPI_CPHA) + cr |= XSPI_CR_CPHA; + if (spi->mode & SPI_CPOL) + cr |= XSPI_CR_CPOL; + xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET); + + /* We do not check spi->max_speed_hz here as the SPI clock + * frequency is not software programmable (the IP block design + * parameter) + */ + + /* Activate the chip select */ + xspi->write_fn(~(0x0001 << spi->chip_select), + xspi->regs + XSPI_SSR_OFFSET); + } +} + +/* spi_bitbang requires custom setup_transfer() to be defined if there is a + * custom txrx_bufs(). We have nothing to setup here as the SPI IP block + * supports 8 or 16 bits per word which cannot be changed in software. + * SPI clock can't be changed in software either. + * Check for correct bits per word. Chip select delay calculations could be + * added here as soon as bitbang_work() can be made aware of the delay value. + */ +static int xilinx_spi_setup_transfer(struct spi_device *spi, + struct spi_transfer *t) +{ + struct xilinx_spi *xspi = spi_master_get_devdata(spi->master); + u8 bits_per_word; + + bits_per_word = (t && t->bits_per_word) + ? t->bits_per_word : spi->bits_per_word; + if (bits_per_word != xspi->bits_per_word) { + dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n", + __func__, bits_per_word); + return -EINVAL; + } + + return 0; +} + +static int xilinx_spi_setup(struct spi_device *spi) +{ + /* always return 0, we can not check the number of bits. + * There are cases when SPI setup is called before any driver is + * there, in that case the SPI core defaults to 8 bits, which we + * do not support in some cases. But if we return an error, the + * SPI device would not be registered and no driver can get hold of it + * When the driver is there, it will call SPI setup again with the + * correct number of bits per transfer. + * If a driver setups with the wrong bit number, it will fail when + * it tries to do a transfer + */ + return 0; +} + +static void xilinx_spi_fill_tx_fifo(struct xilinx_spi *xspi) +{ + u8 sr; + + /* Fill the Tx FIFO with as many bytes as possible */ + sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET); + while ((sr & XSPI_SR_TX_FULL_MASK) == 0 && xspi->remaining_bytes > 0) { + if (xspi->tx_ptr) + xspi->tx_fn(xspi); + else + xspi->write_fn(0, xspi->regs + XSPI_TXD_OFFSET); + xspi->remaining_bytes -= xspi->bits_per_word / 8; + sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET); + } +} + +static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t) +{ + struct xilinx_spi *xspi = spi_master_get_devdata(spi->master); + u32 ipif_ier; + u16 cr; + + /* We get here with transmitter inhibited */ + + xspi->tx_ptr = t->tx_buf; + xspi->rx_ptr = t->rx_buf; + xspi->remaining_bytes = t->len; + INIT_COMPLETION(xspi->done); + + xilinx_spi_fill_tx_fifo(xspi); + + /* Enable the transmit empty interrupt, which we use to determine + * progress on the transmission. + */ + ipif_ier = xspi->read_fn(xspi->regs + XIPIF_V123B_IIER_OFFSET); + xspi->write_fn(ipif_ier | XSPI_INTR_TX_EMPTY, + xspi->regs + XIPIF_V123B_IIER_OFFSET); + + /* Start the transfer by not inhibiting the transmitter any longer */ + cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET) & + ~XSPI_CR_TRANS_INHIBIT; + xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET); + + wait_for_completion(&xspi->done); + + /* Disable the transmit empty interrupt */ + xspi->write_fn(ipif_ier, xspi->regs + XIPIF_V123B_IIER_OFFSET); + + return t->len - xspi->remaining_bytes; +} + + +/* This driver supports single master mode only. Hence Tx FIFO Empty + * is the only interrupt we care about. + * Receive FIFO Overrun, Transmit FIFO Underrun, Mode Fault, and Slave Mode + * Fault are not to happen. + */ +static irqreturn_t xilinx_spi_irq(int irq, void *dev_id) +{ + struct xilinx_spi *xspi = dev_id; + u32 ipif_isr; + + /* Get the IPIF interrupts, and clear them immediately */ + ipif_isr = xspi->read_fn(xspi->regs + XIPIF_V123B_IISR_OFFSET); + xspi->write_fn(ipif_isr, xspi->regs + XIPIF_V123B_IISR_OFFSET); + + if (ipif_isr & XSPI_INTR_TX_EMPTY) { /* Transmission completed */ + u16 cr; + u8 sr; + + /* A transmit has just completed. Process received data and + * check for more data to transmit. Always inhibit the + * transmitter while the Isr refills the transmit register/FIFO, + * or make sure it is stopped if we're done. + */ + cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET); + xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT, + xspi->regs + XSPI_CR_OFFSET); + + /* Read out all the data from the Rx FIFO */ + sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET); + while ((sr & XSPI_SR_RX_EMPTY_MASK) == 0) { + xspi->rx_fn(xspi); + sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET); + } + + /* See if there is more data to send */ + if (xspi->remaining_bytes > 0) { + xilinx_spi_fill_tx_fifo(xspi); + /* Start the transfer by not inhibiting the + * transmitter any longer + */ + xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET); + } else { + /* No more data to send. + * Indicate the transfer is completed. + */ + complete(&xspi->done); + } + } + + return IRQ_HANDLED; +} + +static const struct of_device_id xilinx_spi_of_match[] = { + { .compatible = "xlnx,xps-spi-2.00.a", }, + { .compatible = "xlnx,xps-spi-2.00.b", }, + {} +}; +MODULE_DEVICE_TABLE(of, xilinx_spi_of_match); + +struct spi_master *xilinx_spi_init(struct device *dev, struct resource *mem, + u32 irq, s16 bus_num, int num_cs, int little_endian, int bits_per_word) +{ + struct spi_master *master; + struct xilinx_spi *xspi; + int ret; + + master = spi_alloc_master(dev, sizeof(struct xilinx_spi)); + if (!master) + return NULL; + + /* the spi->mode bits understood by this driver: */ + master->mode_bits = SPI_CPOL | SPI_CPHA; + + xspi = spi_master_get_devdata(master); + xspi->bitbang.master = spi_master_get(master); + xspi->bitbang.chipselect = xilinx_spi_chipselect; + xspi->bitbang.setup_transfer = xilinx_spi_setup_transfer; + xspi->bitbang.txrx_bufs = xilinx_spi_txrx_bufs; + xspi->bitbang.master->setup = xilinx_spi_setup; + init_completion(&xspi->done); + + if (!request_mem_region(mem->start, resource_size(mem), + XILINX_SPI_NAME)) + goto put_master; + + xspi->regs = ioremap(mem->start, resource_size(mem)); + if (xspi->regs == NULL) { + dev_warn(dev, "ioremap failure\n"); + goto map_failed; + } + + master->bus_num = bus_num; + master->num_chipselect = num_cs; + master->dev.of_node = dev->of_node; + + xspi->mem = *mem; + xspi->irq = irq; + if (little_endian) { + xspi->read_fn = xspi_read32; + xspi->write_fn = xspi_write32; + } else { + xspi->read_fn = xspi_read32_be; + xspi->write_fn = xspi_write32_be; + } + xspi->bits_per_word = bits_per_word; + if (xspi->bits_per_word == 8) { + xspi->tx_fn = xspi_tx8; + xspi->rx_fn = xspi_rx8; + } else if (xspi->bits_per_word == 16) { + xspi->tx_fn = xspi_tx16; + xspi->rx_fn = xspi_rx16; + } else if (xspi->bits_per_word == 32) { + xspi->tx_fn = xspi_tx32; + xspi->rx_fn = xspi_rx32; + } else + goto unmap_io; + + + /* SPI controller initializations */ + xspi_init_hw(xspi); + + /* Register for SPI Interrupt */ + ret = request_irq(xspi->irq, xilinx_spi_irq, 0, XILINX_SPI_NAME, xspi); + if (ret) + goto unmap_io; + + ret = spi_bitbang_start(&xspi->bitbang); + if (ret) { + dev_err(dev, "spi_bitbang_start FAILED\n"); + goto free_irq; + } + + dev_info(dev, "at 0x%08llX mapped to 0x%p, irq=%d\n", + (unsigned long long)mem->start, xspi->regs, xspi->irq); + return master; + +free_irq: + free_irq(xspi->irq, xspi); +unmap_io: + iounmap(xspi->regs); +map_failed: + release_mem_region(mem->start, resource_size(mem)); +put_master: + spi_master_put(master); + return NULL; +} +EXPORT_SYMBOL(xilinx_spi_init); + +void xilinx_spi_deinit(struct spi_master *master) +{ + struct xilinx_spi *xspi; + + xspi = spi_master_get_devdata(master); + + spi_bitbang_stop(&xspi->bitbang); + free_irq(xspi->irq, xspi); + iounmap(xspi->regs); + + release_mem_region(xspi->mem.start, resource_size(&xspi->mem)); + spi_master_put(xspi->bitbang.master); +} +EXPORT_SYMBOL(xilinx_spi_deinit); + +static int __devinit xilinx_spi_probe(struct platform_device *dev) +{ + struct xspi_platform_data *pdata; + struct resource *r; + int irq, num_cs = 0, little_endian = 0, bits_per_word = 8; + struct spi_master *master; + u8 i; + + pdata = dev->dev.platform_data; + if (pdata) { + num_cs = pdata->num_chipselect; + little_endian = pdata->little_endian; + bits_per_word = pdata->bits_per_word; + } + +#ifdef CONFIG_OF + if (dev->dev.of_node) { + const __be32 *prop; + int len; + + /* number of slave select bits is required */ + prop = of_get_property(dev->dev.of_node, "xlnx,num-ss-bits", + &len); + if (prop && len >= sizeof(*prop)) + num_cs = __be32_to_cpup(prop); + } +#endif + + if (!num_cs) { + dev_err(&dev->dev, "Missing slave select configuration data\n"); + return -EINVAL; + } + + + r = platform_get_resource(dev, IORESOURCE_MEM, 0); + if (!r) + return -ENODEV; + + irq = platform_get_irq(dev, 0); + if (irq < 0) + return -ENXIO; + + master = xilinx_spi_init(&dev->dev, r, irq, dev->id, num_cs, + little_endian, bits_per_word); + if (!master) + return -ENODEV; + + if (pdata) { + for (i = 0; i < pdata->num_devices; i++) + spi_new_device(master, pdata->devices + i); + } + + platform_set_drvdata(dev, master); + return 0; +} + +static int __devexit xilinx_spi_remove(struct platform_device *dev) +{ + xilinx_spi_deinit(platform_get_drvdata(dev)); + platform_set_drvdata(dev, 0); + + return 0; +} + +/* work with hotplug and coldplug */ +MODULE_ALIAS("platform:" XILINX_SPI_NAME); + +static struct platform_driver xilinx_spi_driver = { + .probe = xilinx_spi_probe, + .remove = __devexit_p(xilinx_spi_remove), + .driver = { + .name = XILINX_SPI_NAME, + .owner = THIS_MODULE, + .of_match_table = xilinx_spi_of_match, + }, +}; + +static int __init xilinx_spi_pltfm_init(void) +{ + return platform_driver_register(&xilinx_spi_driver); +} +module_init(xilinx_spi_pltfm_init); + +static void __exit xilinx_spi_pltfm_exit(void) +{ + platform_driver_unregister(&xilinx_spi_driver); +} +module_exit(xilinx_spi_pltfm_exit); + +MODULE_AUTHOR("MontaVista Software, Inc. "); +MODULE_DESCRIPTION("Xilinx SPI driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 2e13a14..4d1b9f5 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -1,5 +1,5 @@ /* - * spi.c - SPI init/core code + * SPI init/core code * * Copyright (C) 2005 David Brownell * diff --git a/drivers/spi/spi_altera.c b/drivers/spi/spi_altera.c deleted file mode 100644 index 4813a63..0000000 --- a/drivers/spi/spi_altera.c +++ /dev/null @@ -1,339 +0,0 @@ -/* - * Altera SPI driver - * - * Copyright (C) 2008 Thomas Chou - * - * Based on spi_s3c24xx.c, which is: - * Copyright (c) 2006 Ben Dooks - * Copyright (c) 2006 Simtec Electronics - * Ben Dooks - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#define DRV_NAME "spi_altera" - -#define ALTERA_SPI_RXDATA 0 -#define ALTERA_SPI_TXDATA 4 -#define ALTERA_SPI_STATUS 8 -#define ALTERA_SPI_CONTROL 12 -#define ALTERA_SPI_SLAVE_SEL 20 - -#define ALTERA_SPI_STATUS_ROE_MSK 0x8 -#define ALTERA_SPI_STATUS_TOE_MSK 0x10 -#define ALTERA_SPI_STATUS_TMT_MSK 0x20 -#define ALTERA_SPI_STATUS_TRDY_MSK 0x40 -#define ALTERA_SPI_STATUS_RRDY_MSK 0x80 -#define ALTERA_SPI_STATUS_E_MSK 0x100 - -#define ALTERA_SPI_CONTROL_IROE_MSK 0x8 -#define ALTERA_SPI_CONTROL_ITOE_MSK 0x10 -#define ALTERA_SPI_CONTROL_ITRDY_MSK 0x40 -#define ALTERA_SPI_CONTROL_IRRDY_MSK 0x80 -#define ALTERA_SPI_CONTROL_IE_MSK 0x100 -#define ALTERA_SPI_CONTROL_SSO_MSK 0x400 - -struct altera_spi { - /* bitbang has to be first */ - struct spi_bitbang bitbang; - struct completion done; - - void __iomem *base; - int irq; - int len; - int count; - int bytes_per_word; - unsigned long imr; - - /* data buffers */ - const unsigned char *tx; - unsigned char *rx; -}; - -static inline struct altera_spi *altera_spi_to_hw(struct spi_device *sdev) -{ - return spi_master_get_devdata(sdev->master); -} - -static void altera_spi_chipsel(struct spi_device *spi, int value) -{ - struct altera_spi *hw = altera_spi_to_hw(spi); - - if (spi->mode & SPI_CS_HIGH) { - switch (value) { - case BITBANG_CS_INACTIVE: - writel(1 << spi->chip_select, - hw->base + ALTERA_SPI_SLAVE_SEL); - hw->imr |= ALTERA_SPI_CONTROL_SSO_MSK; - writel(hw->imr, hw->base + ALTERA_SPI_CONTROL); - break; - - case BITBANG_CS_ACTIVE: - hw->imr &= ~ALTERA_SPI_CONTROL_SSO_MSK; - writel(hw->imr, hw->base + ALTERA_SPI_CONTROL); - writel(0, hw->base + ALTERA_SPI_SLAVE_SEL); - break; - } - } else { - switch (value) { - case BITBANG_CS_INACTIVE: - hw->imr &= ~ALTERA_SPI_CONTROL_SSO_MSK; - writel(hw->imr, hw->base + ALTERA_SPI_CONTROL); - break; - - case BITBANG_CS_ACTIVE: - writel(1 << spi->chip_select, - hw->base + ALTERA_SPI_SLAVE_SEL); - hw->imr |= ALTERA_SPI_CONTROL_SSO_MSK; - writel(hw->imr, hw->base + ALTERA_SPI_CONTROL); - break; - } - } -} - -static int altera_spi_setupxfer(struct spi_device *spi, struct spi_transfer *t) -{ - return 0; -} - -static int altera_spi_setup(struct spi_device *spi) -{ - return 0; -} - -static inline unsigned int hw_txbyte(struct altera_spi *hw, int count) -{ - if (hw->tx) { - switch (hw->bytes_per_word) { - case 1: - return hw->tx[count]; - case 2: - return (hw->tx[count * 2] - | (hw->tx[count * 2 + 1] << 8)); - } - } - return 0; -} - -static int altera_spi_txrx(struct spi_device *spi, struct spi_transfer *t) -{ - struct altera_spi *hw = altera_spi_to_hw(spi); - - hw->tx = t->tx_buf; - hw->rx = t->rx_buf; - hw->count = 0; - hw->bytes_per_word = (t->bits_per_word ? : spi->bits_per_word) / 8; - hw->len = t->len / hw->bytes_per_word; - - if (hw->irq >= 0) { - /* enable receive interrupt */ - hw->imr |= ALTERA_SPI_CONTROL_IRRDY_MSK; - writel(hw->imr, hw->base + ALTERA_SPI_CONTROL); - - /* send the first byte */ - writel(hw_txbyte(hw, 0), hw->base + ALTERA_SPI_TXDATA); - - wait_for_completion(&hw->done); - /* disable receive interrupt */ - hw->imr &= ~ALTERA_SPI_CONTROL_IRRDY_MSK; - writel(hw->imr, hw->base + ALTERA_SPI_CONTROL); - } else { - /* send the first byte */ - writel(hw_txbyte(hw, 0), hw->base + ALTERA_SPI_TXDATA); - - while (1) { - unsigned int rxd; - - while (!(readl(hw->base + ALTERA_SPI_STATUS) & - ALTERA_SPI_STATUS_RRDY_MSK)) - cpu_relax(); - - rxd = readl(hw->base + ALTERA_SPI_RXDATA); - if (hw->rx) { - switch (hw->bytes_per_word) { - case 1: - hw->rx[hw->count] = rxd; - break; - case 2: - hw->rx[hw->count * 2] = rxd; - hw->rx[hw->count * 2 + 1] = rxd >> 8; - break; - } - } - - hw->count++; - - if (hw->count < hw->len) - writel(hw_txbyte(hw, hw->count), - hw->base + ALTERA_SPI_TXDATA); - else - break; - } - - } - - return hw->count * hw->bytes_per_word; -} - -static irqreturn_t altera_spi_irq(int irq, void *dev) -{ - struct altera_spi *hw = dev; - unsigned int rxd; - - rxd = readl(hw->base + ALTERA_SPI_RXDATA); - if (hw->rx) { - switch (hw->bytes_per_word) { - case 1: - hw->rx[hw->count] = rxd; - break; - case 2: - hw->rx[hw->count * 2] = rxd; - hw->rx[hw->count * 2 + 1] = rxd >> 8; - break; - } - } - - hw->count++; - - if (hw->count < hw->len) - writel(hw_txbyte(hw, hw->count), hw->base + ALTERA_SPI_TXDATA); - else - complete(&hw->done); - - return IRQ_HANDLED; -} - -static int __devinit altera_spi_probe(struct platform_device *pdev) -{ - struct altera_spi_platform_data *platp = pdev->dev.platform_data; - struct altera_spi *hw; - struct spi_master *master; - struct resource *res; - int err = -ENODEV; - - master = spi_alloc_master(&pdev->dev, sizeof(struct altera_spi)); - if (!master) - return err; - - /* setup the master state. */ - master->bus_num = pdev->id; - master->num_chipselect = 16; - master->mode_bits = SPI_CS_HIGH; - master->setup = altera_spi_setup; - - hw = spi_master_get_devdata(master); - platform_set_drvdata(pdev, hw); - - /* setup the state for the bitbang driver */ - hw->bitbang.master = spi_master_get(master); - if (!hw->bitbang.master) - return err; - hw->bitbang.setup_transfer = altera_spi_setupxfer; - hw->bitbang.chipselect = altera_spi_chipsel; - hw->bitbang.txrx_bufs = altera_spi_txrx; - - /* find and map our resources */ - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) - goto exit_busy; - if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res), - pdev->name)) - goto exit_busy; - hw->base = devm_ioremap_nocache(&pdev->dev, res->start, - resource_size(res)); - if (!hw->base) - goto exit_busy; - /* program defaults into the registers */ - hw->imr = 0; /* disable spi interrupts */ - writel(hw->imr, hw->base + ALTERA_SPI_CONTROL); - writel(0, hw->base + ALTERA_SPI_STATUS); /* clear status reg */ - if (readl(hw->base + ALTERA_SPI_STATUS) & ALTERA_SPI_STATUS_RRDY_MSK) - readl(hw->base + ALTERA_SPI_RXDATA); /* flush rxdata */ - /* irq is optional */ - hw->irq = platform_get_irq(pdev, 0); - if (hw->irq >= 0) { - init_completion(&hw->done); - err = devm_request_irq(&pdev->dev, hw->irq, altera_spi_irq, 0, - pdev->name, hw); - if (err) - goto exit; - } - /* find platform data */ - if (!platp) - hw->bitbang.master->dev.of_node = pdev->dev.of_node; - - /* register our spi controller */ - err = spi_bitbang_start(&hw->bitbang); - if (err) - goto exit; - dev_info(&pdev->dev, "base %p, irq %d\n", hw->base, hw->irq); - - return 0; - -exit_busy: - err = -EBUSY; -exit: - platform_set_drvdata(pdev, NULL); - spi_master_put(master); - return err; -} - -static int __devexit altera_spi_remove(struct platform_device *dev) -{ - struct altera_spi *hw = platform_get_drvdata(dev); - struct spi_master *master = hw->bitbang.master; - - spi_bitbang_stop(&hw->bitbang); - platform_set_drvdata(dev, NULL); - spi_master_put(master); - return 0; -} - -#ifdef CONFIG_OF -static const struct of_device_id altera_spi_match[] = { - { .compatible = "ALTR,spi-1.0", }, - {}, -}; -MODULE_DEVICE_TABLE(of, altera_spi_match); -#else /* CONFIG_OF */ -#define altera_spi_match NULL -#endif /* CONFIG_OF */ - -static struct platform_driver altera_spi_driver = { - .probe = altera_spi_probe, - .remove = __devexit_p(altera_spi_remove), - .driver = { - .name = DRV_NAME, - .owner = THIS_MODULE, - .pm = NULL, - .of_match_table = altera_spi_match, - }, -}; - -static int __init altera_spi_init(void) -{ - return platform_driver_register(&altera_spi_driver); -} -module_init(altera_spi_init); - -static void __exit altera_spi_exit(void) -{ - platform_driver_unregister(&altera_spi_driver); -} -module_exit(altera_spi_exit); - -MODULE_DESCRIPTION("Altera SPI driver"); -MODULE_AUTHOR("Thomas Chou "); -MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:" DRV_NAME); diff --git a/drivers/spi/spi_bfin5xx.c b/drivers/spi/spi_bfin5xx.c deleted file mode 100644 index f706dba..0000000 --- a/drivers/spi/spi_bfin5xx.c +++ /dev/null @@ -1,1530 +0,0 @@ -/* - * Blackfin On-Chip SPI Driver - * - * Copyright 2004-2010 Analog Devices Inc. - * - * Enter bugs at http://blackfin.uclinux.org/ - * - * Licensed under the GPL-2 or later. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#define DRV_NAME "bfin-spi" -#define DRV_AUTHOR "Bryan Wu, Luke Yang" -#define DRV_DESC "Blackfin on-chip SPI Controller Driver" -#define DRV_VERSION "1.0" - -MODULE_AUTHOR(DRV_AUTHOR); -MODULE_DESCRIPTION(DRV_DESC); -MODULE_LICENSE("GPL"); - -#define START_STATE ((void *)0) -#define RUNNING_STATE ((void *)1) -#define DONE_STATE ((void *)2) -#define ERROR_STATE ((void *)-1) - -struct bfin_spi_master_data; - -struct bfin_spi_transfer_ops { - void (*write) (struct bfin_spi_master_data *); - void (*read) (struct bfin_spi_master_data *); - void (*duplex) (struct bfin_spi_master_data *); -}; - -struct bfin_spi_master_data { - /* Driver model hookup */ - struct platform_device *pdev; - - /* SPI framework hookup */ - struct spi_master *master; - - /* Regs base of SPI controller */ - void __iomem *regs_base; - - /* Pin request list */ - u16 *pin_req; - - /* BFIN hookup */ - struct bfin5xx_spi_master *master_info; - - /* Driver message queue */ - struct workqueue_struct *workqueue; - struct work_struct pump_messages; - spinlock_t lock; - struct list_head queue; - int busy; - bool running; - - /* Message Transfer pump */ - struct tasklet_struct pump_transfers; - - /* Current message transfer state info */ - struct spi_message *cur_msg; - struct spi_transfer *cur_transfer; - struct bfin_spi_slave_data *cur_chip; - size_t len_in_bytes; - size_t len; - void *tx; - void *tx_end; - void *rx; - void *rx_end; - - /* DMA stuffs */ - int dma_channel; - int dma_mapped; - int dma_requested; - dma_addr_t rx_dma; - dma_addr_t tx_dma; - - int irq_requested; - int spi_irq; - - size_t rx_map_len; - size_t tx_map_len; - u8 n_bytes; - u16 ctrl_reg; - u16 flag_reg; - - int cs_change; - const struct bfin_spi_transfer_ops *ops; -}; - -struct bfin_spi_slave_data { - u16 ctl_reg; - u16 baud; - u16 flag; - - u8 chip_select_num; - u8 enable_dma; - u16 cs_chg_udelay; /* Some devices require > 255usec delay */ - u32 cs_gpio; - u16 idle_tx_val; - u8 pio_interrupt; /* use spi data irq */ - const struct bfin_spi_transfer_ops *ops; -}; - -#define DEFINE_SPI_REG(reg, off) \ -static inline u16 read_##reg(struct bfin_spi_master_data *drv_data) \ - { return bfin_read16(drv_data->regs_base + off); } \ -static inline void write_##reg(struct bfin_spi_master_data *drv_data, u16 v) \ - { bfin_write16(drv_data->regs_base + off, v); } - -DEFINE_SPI_REG(CTRL, 0x00) -DEFINE_SPI_REG(FLAG, 0x04) -DEFINE_SPI_REG(STAT, 0x08) -DEFINE_SPI_REG(TDBR, 0x0C) -DEFINE_SPI_REG(RDBR, 0x10) -DEFINE_SPI_REG(BAUD, 0x14) -DEFINE_SPI_REG(SHAW, 0x18) - -static void bfin_spi_enable(struct bfin_spi_master_data *drv_data) -{ - u16 cr; - - cr = read_CTRL(drv_data); - write_CTRL(drv_data, (cr | BIT_CTL_ENABLE)); -} - -static void bfin_spi_disable(struct bfin_spi_master_data *drv_data) -{ - u16 cr; - - cr = read_CTRL(drv_data); - write_CTRL(drv_data, (cr & (~BIT_CTL_ENABLE))); -} - -/* Caculate the SPI_BAUD register value based on input HZ */ -static u16 hz_to_spi_baud(u32 speed_hz) -{ - u_long sclk = get_sclk(); - u16 spi_baud = (sclk / (2 * speed_hz)); - - if ((sclk % (2 * speed_hz)) > 0) - spi_baud++; - - if (spi_baud < MIN_SPI_BAUD_VAL) - spi_baud = MIN_SPI_BAUD_VAL; - - return spi_baud; -} - -static int bfin_spi_flush(struct bfin_spi_master_data *drv_data) -{ - unsigned long limit = loops_per_jiffy << 1; - - /* wait for stop and clear stat */ - while (!(read_STAT(drv_data) & BIT_STAT_SPIF) && --limit) - cpu_relax(); - - write_STAT(drv_data, BIT_STAT_CLR); - - return limit; -} - -/* Chip select operation functions for cs_change flag */ -static void bfin_spi_cs_active(struct bfin_spi_master_data *drv_data, struct bfin_spi_slave_data *chip) -{ - if (likely(chip->chip_select_num < MAX_CTRL_CS)) { - u16 flag = read_FLAG(drv_data); - - flag &= ~chip->flag; - - write_FLAG(drv_data, flag); - } else { - gpio_set_value(chip->cs_gpio, 0); - } -} - -static void bfin_spi_cs_deactive(struct bfin_spi_master_data *drv_data, - struct bfin_spi_slave_data *chip) -{ - if (likely(chip->chip_select_num < MAX_CTRL_CS)) { - u16 flag = read_FLAG(drv_data); - - flag |= chip->flag; - - write_FLAG(drv_data, flag); - } else { - gpio_set_value(chip->cs_gpio, 1); - } - - /* Move delay here for consistency */ - if (chip->cs_chg_udelay) - udelay(chip->cs_chg_udelay); -} - -/* enable or disable the pin muxed by GPIO and SPI CS to work as SPI CS */ -static inline void bfin_spi_cs_enable(struct bfin_spi_master_data *drv_data, - struct bfin_spi_slave_data *chip) -{ - if (chip->chip_select_num < MAX_CTRL_CS) { - u16 flag = read_FLAG(drv_data); - - flag |= (chip->flag >> 8); - - write_FLAG(drv_data, flag); - } -} - -static inline void bfin_spi_cs_disable(struct bfin_spi_master_data *drv_data, - struct bfin_spi_slave_data *chip) -{ - if (chip->chip_select_num < MAX_CTRL_CS) { - u16 flag = read_FLAG(drv_data); - - flag &= ~(chip->flag >> 8); - - write_FLAG(drv_data, flag); - } -} - -/* stop controller and re-config current chip*/ -static void bfin_spi_restore_state(struct bfin_spi_master_data *drv_data) -{ - struct bfin_spi_slave_data *chip = drv_data->cur_chip; - - /* Clear status and disable clock */ - write_STAT(drv_data, BIT_STAT_CLR); - bfin_spi_disable(drv_data); - dev_dbg(&drv_data->pdev->dev, "restoring spi ctl state\n"); - - SSYNC(); - - /* Load the registers */ - write_CTRL(drv_data, chip->ctl_reg); - write_BAUD(drv_data, chip->baud); - - bfin_spi_enable(drv_data); - bfin_spi_cs_active(drv_data, chip); -} - -/* used to kick off transfer in rx mode and read unwanted RX data */ -static inline void bfin_spi_dummy_read(struct bfin_spi_master_data *drv_data) -{ - (void) read_RDBR(drv_data); -} - -static void bfin_spi_u8_writer(struct bfin_spi_master_data *drv_data) -{ - /* clear RXS (we check for RXS inside the loop) */ - bfin_spi_dummy_read(drv_data); - - while (drv_data->tx < drv_data->tx_end) { - write_TDBR(drv_data, (*(u8 *) (drv_data->tx++))); - /* wait until transfer finished. - checking SPIF or TXS may not guarantee transfer completion */ - while (!(read_STAT(drv_data) & BIT_STAT_RXS)) - cpu_relax(); - /* discard RX data and clear RXS */ - bfin_spi_dummy_read(drv_data); - } -} - -static void bfin_spi_u8_reader(struct bfin_spi_master_data *drv_data) -{ - u16 tx_val = drv_data->cur_chip->idle_tx_val; - - /* discard old RX data and clear RXS */ - bfin_spi_dummy_read(drv_data); - - while (drv_data->rx < drv_data->rx_end) { - write_TDBR(drv_data, tx_val); - while (!(read_STAT(drv_data) & BIT_STAT_RXS)) - cpu_relax(); - *(u8 *) (drv_data->rx++) = read_RDBR(drv_data); - } -} - -static void bfin_spi_u8_duplex(struct bfin_spi_master_data *drv_data) -{ - /* discard old RX data and clear RXS */ - bfin_spi_dummy_read(drv_data); - - while (drv_data->rx < drv_data->rx_end) { - write_TDBR(drv_data, (*(u8 *) (drv_data->tx++))); - while (!(read_STAT(drv_data) & BIT_STAT_RXS)) - cpu_relax(); - *(u8 *) (drv_data->rx++) = read_RDBR(drv_data); - } -} - -static const struct bfin_spi_transfer_ops bfin_bfin_spi_transfer_ops_u8 = { - .write = bfin_spi_u8_writer, - .read = bfin_spi_u8_reader, - .duplex = bfin_spi_u8_duplex, -}; - -static void bfin_spi_u16_writer(struct bfin_spi_master_data *drv_data) -{ - /* clear RXS (we check for RXS inside the loop) */ - bfin_spi_dummy_read(drv_data); - - while (drv_data->tx < drv_data->tx_end) { - write_TDBR(drv_data, (*(u16 *) (drv_data->tx))); - drv_data->tx += 2; - /* wait until transfer finished. - checking SPIF or TXS may not guarantee transfer completion */ - while (!(read_STAT(drv_data) & BIT_STAT_RXS)) - cpu_relax(); - /* discard RX data and clear RXS */ - bfin_spi_dummy_read(drv_data); - } -} - -static void bfin_spi_u16_reader(struct bfin_spi_master_data *drv_data) -{ - u16 tx_val = drv_data->cur_chip->idle_tx_val; - - /* discard old RX data and clear RXS */ - bfin_spi_dummy_read(drv_data); - - while (drv_data->rx < drv_data->rx_end) { - write_TDBR(drv_data, tx_val); - while (!(read_STAT(drv_data) & BIT_STAT_RXS)) - cpu_relax(); - *(u16 *) (drv_data->rx) = read_RDBR(drv_data); - drv_data->rx += 2; - } -} - -static void bfin_spi_u16_duplex(struct bfin_spi_master_data *drv_data) -{ - /* discard old RX data and clear RXS */ - bfin_spi_dummy_read(drv_data); - - while (drv_data->rx < drv_data->rx_end) { - write_TDBR(drv_data, (*(u16 *) (drv_data->tx))); - drv_data->tx += 2; - while (!(read_STAT(drv_data) & BIT_STAT_RXS)) - cpu_relax(); - *(u16 *) (drv_data->rx) = read_RDBR(drv_data); - drv_data->rx += 2; - } -} - -static const struct bfin_spi_transfer_ops bfin_bfin_spi_transfer_ops_u16 = { - .write = bfin_spi_u16_writer, - .read = bfin_spi_u16_reader, - .duplex = bfin_spi_u16_duplex, -}; - -/* test if there is more transfer to be done */ -static void *bfin_spi_next_transfer(struct bfin_spi_master_data *drv_data) -{ - struct spi_message *msg = drv_data->cur_msg; - struct spi_transfer *trans = drv_data->cur_transfer; - - /* Move to next transfer */ - if (trans->transfer_list.next != &msg->transfers) { - drv_data->cur_transfer = - list_entry(trans->transfer_list.next, - struct spi_transfer, transfer_list); - return RUNNING_STATE; - } else - return DONE_STATE; -} - -/* - * caller already set message->status; - * dma and pio irqs are blocked give finished message back - */ -static void bfin_spi_giveback(struct bfin_spi_master_data *drv_data) -{ - struct bfin_spi_slave_data *chip = drv_data->cur_chip; - struct spi_transfer *last_transfer; - unsigned long flags; - struct spi_message *msg; - - spin_lock_irqsave(&drv_data->lock, flags); - msg = drv_data->cur_msg; - drv_data->cur_msg = NULL; - drv_data->cur_transfer = NULL; - drv_data->cur_chip = NULL; - queue_work(drv_data->workqueue, &drv_data->pump_messages); - spin_unlock_irqrestore(&drv_data->lock, flags); - - last_transfer = list_entry(msg->transfers.prev, - struct spi_transfer, transfer_list); - - msg->state = NULL; - - if (!drv_data->cs_change) - bfin_spi_cs_deactive(drv_data, chip); - - /* Not stop spi in autobuffer mode */ - if (drv_data->tx_dma != 0xFFFF) - bfin_spi_disable(drv_data); - - if (msg->complete) - msg->complete(msg->context); -} - -/* spi data irq handler */ -static irqreturn_t bfin_spi_pio_irq_handler(int irq, void *dev_id) -{ - struct bfin_spi_master_data *drv_data = dev_id; - struct bfin_spi_slave_data *chip = drv_data->cur_chip; - struct spi_message *msg = drv_data->cur_msg; - int n_bytes = drv_data->n_bytes; - int loop = 0; - - /* wait until transfer finished. */ - while (!(read_STAT(drv_data) & BIT_STAT_RXS)) - cpu_relax(); - - if ((drv_data->tx && drv_data->tx >= drv_data->tx_end) || - (drv_data->rx && drv_data->rx >= (drv_data->rx_end - n_bytes))) { - /* last read */ - if (drv_data->rx) { - dev_dbg(&drv_data->pdev->dev, "last read\n"); - if (n_bytes % 2) { - u16 *buf = (u16 *)drv_data->rx; - for (loop = 0; loop < n_bytes / 2; loop++) - *buf++ = read_RDBR(drv_data); - } else { - u8 *buf = (u8 *)drv_data->rx; - for (loop = 0; loop < n_bytes; loop++) - *buf++ = read_RDBR(drv_data); - } - drv_data->rx += n_bytes; - } - - msg->actual_length += drv_data->len_in_bytes; - if (drv_data->cs_change) - bfin_spi_cs_deactive(drv_data, chip); - /* Move to next transfer */ - msg->state = bfin_spi_next_transfer(drv_data); - - disable_irq_nosync(drv_data->spi_irq); - - /* Schedule transfer tasklet */ - tasklet_schedule(&drv_data->pump_transfers); - return IRQ_HANDLED; - } - - if (drv_data->rx && drv_data->tx) { - /* duplex */ - dev_dbg(&drv_data->pdev->dev, "duplex: write_TDBR\n"); - if (n_bytes % 2) { - u16 *buf = (u16 *)drv_data->rx; - u16 *buf2 = (u16 *)drv_data->tx; - for (loop = 0; loop < n_bytes / 2; loop++) { - *buf++ = read_RDBR(drv_data); - write_TDBR(drv_data, *buf2++); - } - } else { - u8 *buf = (u8 *)drv_data->rx; - u8 *buf2 = (u8 *)drv_data->tx; - for (loop = 0; loop < n_bytes; loop++) { - *buf++ = read_RDBR(drv_data); - write_TDBR(drv_data, *buf2++); - } - } - } else if (drv_data->rx) { - /* read */ - dev_dbg(&drv_data->pdev->dev, "read: write_TDBR\n"); - if (n_bytes % 2) { - u16 *buf = (u16 *)drv_data->rx; - for (loop = 0; loop < n_bytes / 2; loop++) { - *buf++ = read_RDBR(drv_data); - write_TDBR(drv_data, chip->idle_tx_val); - } - } else { - u8 *buf = (u8 *)drv_data->rx; - for (loop = 0; loop < n_bytes; loop++) { - *buf++ = read_RDBR(drv_data); - write_TDBR(drv_data, chip->idle_tx_val); - } - } - } else if (drv_data->tx) { - /* write */ - dev_dbg(&drv_data->pdev->dev, "write: write_TDBR\n"); - if (n_bytes % 2) { - u16 *buf = (u16 *)drv_data->tx; - for (loop = 0; loop < n_bytes / 2; loop++) { - read_RDBR(drv_data); - write_TDBR(drv_data, *buf++); - } - } else { - u8 *buf = (u8 *)drv_data->tx; - for (loop = 0; loop < n_bytes; loop++) { - read_RDBR(drv_data); - write_TDBR(drv_data, *buf++); - } - } - } - - if (drv_data->tx) - drv_data->tx += n_bytes; - if (drv_data->rx) - drv_data->rx += n_bytes; - - return IRQ_HANDLED; -} - -static irqreturn_t bfin_spi_dma_irq_handler(int irq, void *dev_id) -{ - struct bfin_spi_master_data *drv_data = dev_id; - struct bfin_spi_slave_data *chip = drv_data->cur_chip; - struct spi_message *msg = drv_data->cur_msg; - unsigned long timeout; - unsigned short dmastat = get_dma_curr_irqstat(drv_data->dma_channel); - u16 spistat = read_STAT(drv_data); - - dev_dbg(&drv_data->pdev->dev, - "in dma_irq_handler dmastat:0x%x spistat:0x%x\n", - dmastat, spistat); - - if (drv_data->rx != NULL) { - u16 cr = read_CTRL(drv_data); - /* discard old RX data and clear RXS */ - bfin_spi_dummy_read(drv_data); - write_CTRL(drv_data, cr & ~BIT_CTL_ENABLE); /* Disable SPI */ - write_CTRL(drv_data, cr & ~BIT_CTL_TIMOD); /* Restore State */ - write_STAT(drv_data, BIT_STAT_CLR); /* Clear Status */ - } - - clear_dma_irqstat(drv_data->dma_channel); - - /* - * wait for the last transaction shifted out. HRM states: - * at this point there may still be data in the SPI DMA FIFO waiting - * to be transmitted ... software needs to poll TXS in the SPI_STAT - * register until it goes low for 2 successive reads - */ - if (drv_data->tx != NULL) { - while ((read_STAT(drv_data) & BIT_STAT_TXS) || - (read_STAT(drv_data) & BIT_STAT_TXS)) - cpu_relax(); - } - - dev_dbg(&drv_data->pdev->dev, - "in dma_irq_handler dmastat:0x%x spistat:0x%x\n", - dmastat, read_STAT(drv_data)); - - timeout = jiffies + HZ; - while (!(read_STAT(drv_data) & BIT_STAT_SPIF)) - if (!time_before(jiffies, timeout)) { - dev_warn(&drv_data->pdev->dev, "timeout waiting for SPIF"); - break; - } else - cpu_relax(); - - if ((dmastat & DMA_ERR) && (spistat & BIT_STAT_RBSY)) { - msg->state = ERROR_STATE; - dev_err(&drv_data->pdev->dev, "dma receive: fifo/buffer overflow\n"); - } else { - msg->actual_length += drv_data->len_in_bytes; - - if (drv_data->cs_change) - bfin_spi_cs_deactive(drv_data, chip); - - /* Move to next transfer */ - msg->state = bfin_spi_next_transfer(drv_data); - } - - /* Schedule transfer tasklet */ - tasklet_schedule(&drv_data->pump_transfers); - - /* free the irq handler before next transfer */ - dev_dbg(&drv_data->pdev->dev, - "disable dma channel irq%d\n", - drv_data->dma_channel); - dma_disable_irq_nosync(drv_data->dma_channel); - - return IRQ_HANDLED; -} - -static void bfin_spi_pump_transfers(unsigned long data) -{ - struct bfin_spi_master_data *drv_data = (struct bfin_spi_master_data *)data; - struct spi_message *message = NULL; - struct spi_transfer *transfer = NULL; - struct spi_transfer *previous = NULL; - struct bfin_spi_slave_data *chip = NULL; - unsigned int bits_per_word; - u16 cr, cr_width, dma_width, dma_config; - u32 tranf_success = 1; - u8 full_duplex = 0; - - /* Get current state information */ - message = drv_data->cur_msg; - transfer = drv_data->cur_transfer; - chip = drv_data->cur_chip; - - /* - * if msg is error or done, report it back using complete() callback - */ - - /* Handle for abort */ - if (message->state == ERROR_STATE) { - dev_dbg(&drv_data->pdev->dev, "transfer: we've hit an error\n"); - message->status = -EIO; - bfin_spi_giveback(drv_data); - return; - } - - /* Handle end of message */ - if (message->state == DONE_STATE) { - dev_dbg(&drv_data->pdev->dev, "transfer: all done!\n"); - message->status = 0; - bfin_spi_giveback(drv_data); - return; - } - - /* Delay if requested at end of transfer */ - if (message->state == RUNNING_STATE) { - dev_dbg(&drv_data->pdev->dev, "transfer: still running ...\n"); - previous = list_entry(transfer->transfer_list.prev, - struct spi_transfer, transfer_list); - if (previous->delay_usecs) - udelay(previous->delay_usecs); - } - - /* Flush any existing transfers that may be sitting in the hardware */ - if (bfin_spi_flush(drv_data) == 0) { - dev_err(&drv_data->pdev->dev, "pump_transfers: flush failed\n"); - message->status = -EIO; - bfin_spi_giveback(drv_data); - return; - } - - if (transfer->len == 0) { - /* Move to next transfer of this msg */ - message->state = bfin_spi_next_transfer(drv_data); - /* Schedule next transfer tasklet */ - tasklet_schedule(&drv_data->pump_transfers); - return; - } - - if (transfer->tx_buf != NULL) { - drv_data->tx = (void *)transfer->tx_buf; - drv_data->tx_end = drv_data->tx + transfer->len; - dev_dbg(&drv_data->pdev->dev, "tx_buf is %p, tx_end is %p\n", - transfer->tx_buf, drv_data->tx_end); - } else { - drv_data->tx = NULL; - } - - if (transfer->rx_buf != NULL) { - full_duplex = transfer->tx_buf != NULL; - drv_data->rx = transfer->rx_buf; - drv_data->rx_end = drv_data->rx + transfer->len; - dev_dbg(&drv_data->pdev->dev, "rx_buf is %p, rx_end is %p\n", - transfer->rx_buf, drv_data->rx_end); - } else { - drv_data->rx = NULL; - } - - drv_data->rx_dma = transfer->rx_dma; - drv_data->tx_dma = transfer->tx_dma; - drv_data->len_in_bytes = transfer->len; - drv_data->cs_change = transfer->cs_change; - - /* Bits per word setup */ - bits_per_word = transfer->bits_per_word ? : message->spi->bits_per_word; - if ((bits_per_word > 0) && (bits_per_word % 16 == 0)) { - drv_data->n_bytes = bits_per_word/8; - drv_data->len = (transfer->len) >> 1; - cr_width = BIT_CTL_WORDSIZE; - drv_data->ops = &bfin_bfin_spi_transfer_ops_u16; - } else if ((bits_per_word > 0) && (bits_per_word % 8 == 0)) { - drv_data->n_bytes = bits_per_word/8; - drv_data->len = transfer->len; - cr_width = 0; - drv_data->ops = &bfin_bfin_spi_transfer_ops_u8; - } else { - dev_err(&drv_data->pdev->dev, "transfer: unsupported bits_per_word\n"); - message->status = -EINVAL; - bfin_spi_giveback(drv_data); - return; - } - cr = read_CTRL(drv_data) & ~(BIT_CTL_TIMOD | BIT_CTL_WORDSIZE); - cr |= cr_width; - write_CTRL(drv_data, cr); - - dev_dbg(&drv_data->pdev->dev, - "transfer: drv_data->ops is %p, chip->ops is %p, u8_ops is %p\n", - drv_data->ops, chip->ops, &bfin_bfin_spi_transfer_ops_u8); - - message->state = RUNNING_STATE; - dma_config = 0; - - /* Speed setup (surely valid because already checked) */ - if (transfer->speed_hz) - write_BAUD(drv_data, hz_to_spi_baud(transfer->speed_hz)); - else - write_BAUD(drv_data, chip->baud); - - write_STAT(drv_data, BIT_STAT_CLR); - bfin_spi_cs_active(drv_data, chip); - - dev_dbg(&drv_data->pdev->dev, - "now pumping a transfer: width is %d, len is %d\n", - cr_width, transfer->len); - - /* - * Try to map dma buffer and do a dma transfer. If successful use, - * different way to r/w according to the enable_dma settings and if - * we are not doing a full duplex transfer (since the hardware does - * not support full duplex DMA transfers). - */ - if (!full_duplex && drv_data->cur_chip->enable_dma - && drv_data->len > 6) { - - unsigned long dma_start_addr, flags; - - disable_dma(drv_data->dma_channel); - clear_dma_irqstat(drv_data->dma_channel); - - /* config dma channel */ - dev_dbg(&drv_data->pdev->dev, "doing dma transfer\n"); - set_dma_x_count(drv_data->dma_channel, drv_data->len); - if (cr_width == BIT_CTL_WORDSIZE) { - set_dma_x_modify(drv_data->dma_channel, 2); - dma_width = WDSIZE_16; - } else { - set_dma_x_modify(drv_data->dma_channel, 1); - dma_width = WDSIZE_8; - } - - /* poll for SPI completion before start */ - while (!(read_STAT(drv_data) & BIT_STAT_SPIF)) - cpu_relax(); - - /* dirty hack for autobuffer DMA mode */ - if (drv_data->tx_dma == 0xFFFF) { - dev_dbg(&drv_data->pdev->dev, - "doing autobuffer DMA out.\n"); - - /* no irq in autobuffer mode */ - dma_config = - (DMAFLOW_AUTO | RESTART | dma_width | DI_EN); - set_dma_config(drv_data->dma_channel, dma_config); - set_dma_start_addr(drv_data->dma_channel, - (unsigned long)drv_data->tx); - enable_dma(drv_data->dma_channel); - - /* start SPI transfer */ - write_CTRL(drv_data, cr | BIT_CTL_TIMOD_DMA_TX); - - /* just return here, there can only be one transfer - * in this mode - */ - message->status = 0; - bfin_spi_giveback(drv_data); - return; - } - - /* In dma mode, rx or tx must be NULL in one transfer */ - dma_config = (RESTART | dma_width | DI_EN); - if (drv_data->rx != NULL) { - /* set transfer mode, and enable SPI */ - dev_dbg(&drv_data->pdev->dev, "doing DMA in to %p (size %zx)\n", - drv_data->rx, drv_data->len_in_bytes); - - /* invalidate caches, if needed */ - if (bfin_addr_dcacheable((unsigned long) drv_data->rx)) - invalidate_dcache_range((unsigned long) drv_data->rx, - (unsigned long) (drv_data->rx + - drv_data->len_in_bytes)); - - dma_config |= WNR; - dma_start_addr = (unsigned long)drv_data->rx; - cr |= BIT_CTL_TIMOD_DMA_RX | BIT_CTL_SENDOPT; - - } else if (drv_data->tx != NULL) { - dev_dbg(&drv_data->pdev->dev, "doing DMA out.\n"); - - /* flush caches, if needed */ - if (bfin_addr_dcacheable((unsigned long) drv_data->tx)) - flush_dcache_range((unsigned long) drv_data->tx, - (unsigned long) (drv_data->tx + - drv_data->len_in_bytes)); - - dma_start_addr = (unsigned long)drv_data->tx; - cr |= BIT_CTL_TIMOD_DMA_TX; - - } else - BUG(); - - /* oh man, here there be monsters ... and i dont mean the - * fluffy cute ones from pixar, i mean the kind that'll eat - * your data, kick your dog, and love it all. do *not* try - * and change these lines unless you (1) heavily test DMA - * with SPI flashes on a loaded system (e.g. ping floods), - * (2) know just how broken the DMA engine interaction with - * the SPI peripheral is, and (3) have someone else to blame - * when you screw it all up anyways. - */ - set_dma_start_addr(drv_data->dma_channel, dma_start_addr); - set_dma_config(drv_data->dma_channel, dma_config); - local_irq_save(flags); - SSYNC(); - write_CTRL(drv_data, cr); - enable_dma(drv_data->dma_channel); - dma_enable_irq(drv_data->dma_channel); - local_irq_restore(flags); - - return; - } - - /* - * We always use SPI_WRITE mode (transfer starts with TDBR write). - * SPI_READ mode (transfer starts with RDBR read) seems to have - * problems with setting up the output value in TDBR prior to the - * start of the transfer. - */ - write_CTRL(drv_data, cr | BIT_CTL_TXMOD); - - if (chip->pio_interrupt) { - /* SPI irq should have been disabled by now */ - - /* discard old RX data and clear RXS */ - bfin_spi_dummy_read(drv_data); - - /* start transfer */ - if (drv_data->tx == NULL) - write_TDBR(drv_data, chip->idle_tx_val); - else { - int loop; - if (bits_per_word % 16 == 0) { - u16 *buf = (u16 *)drv_data->tx; - for (loop = 0; loop < bits_per_word / 16; - loop++) { - write_TDBR(drv_data, *buf++); - } - } else if (bits_per_word % 8 == 0) { - u8 *buf = (u8 *)drv_data->tx; - for (loop = 0; loop < bits_per_word / 8; loop++) - write_TDBR(drv_data, *buf++); - } - - drv_data->tx += drv_data->n_bytes; - } - - /* once TDBR is empty, interrupt is triggered */ - enable_irq(drv_data->spi_irq); - return; - } - - /* IO mode */ - dev_dbg(&drv_data->pdev->dev, "doing IO transfer\n"); - - if (full_duplex) { - /* full duplex mode */ - BUG_ON((drv_data->tx_end - drv_data->tx) != - (drv_data->rx_end - drv_data->rx)); - dev_dbg(&drv_data->pdev->dev, - "IO duplex: cr is 0x%x\n", cr); - - drv_data->ops->duplex(drv_data); - - if (drv_data->tx != drv_data->tx_end) - tranf_success = 0; - } else if (drv_data->tx != NULL) { - /* write only half duplex */ - dev_dbg(&drv_data->pdev->dev, - "IO write: cr is 0x%x\n", cr); - - drv_data->ops->write(drv_data); - - if (drv_data->tx != drv_data->tx_end) - tranf_success = 0; - } else if (drv_data->rx != NULL) { - /* read only half duplex */ - dev_dbg(&drv_data->pdev->dev, - "IO read: cr is 0x%x\n", cr); - - drv_data->ops->read(drv_data); - if (drv_data->rx != drv_data->rx_end) - tranf_success = 0; - } - - if (!tranf_success) { - dev_dbg(&drv_data->pdev->dev, - "IO write error!\n"); - message->state = ERROR_STATE; - } else { - /* Update total byte transferred */ - message->actual_length += drv_data->len_in_bytes; - /* Move to next transfer of this msg */ - message->state = bfin_spi_next_transfer(drv_data); - if (drv_data->cs_change) - bfin_spi_cs_deactive(drv_data, chip); - } - - /* Schedule next transfer tasklet */ - tasklet_schedule(&drv_data->pump_transfers); -} - -/* pop a msg from queue and kick off real transfer */ -static void bfin_spi_pump_messages(struct work_struct *work) -{ - struct bfin_spi_master_data *drv_data; - unsigned long flags; - - drv_data = container_of(work, struct bfin_spi_master_data, pump_messages); - - /* Lock queue and check for queue work */ - spin_lock_irqsave(&drv_data->lock, flags); - if (list_empty(&drv_data->queue) || !drv_data->running) { - /* pumper kicked off but no work to do */ - drv_data->busy = 0; - spin_unlock_irqrestore(&drv_data->lock, flags); - return; - } - - /* Make sure we are not already running a message */ - if (drv_data->cur_msg) { - spin_unlock_irqrestore(&drv_data->lock, flags); - return; - } - - /* Extract head of queue */ - drv_data->cur_msg = list_entry(drv_data->queue.next, - struct spi_message, queue); - - /* Setup the SSP using the per chip configuration */ - drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi); - bfin_spi_restore_state(drv_data); - - list_del_init(&drv_data->cur_msg->queue); - - /* Initial message state */ - drv_data->cur_msg->state = START_STATE; - drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next, - struct spi_transfer, transfer_list); - - dev_dbg(&drv_data->pdev->dev, "got a message to pump, " - "state is set to: baud %d, flag 0x%x, ctl 0x%x\n", - drv_data->cur_chip->baud, drv_data->cur_chip->flag, - drv_data->cur_chip->ctl_reg); - - dev_dbg(&drv_data->pdev->dev, - "the first transfer len is %d\n", - drv_data->cur_transfer->len); - - /* Mark as busy and launch transfers */ - tasklet_schedule(&drv_data->pump_transfers); - - drv_data->busy = 1; - spin_unlock_irqrestore(&drv_data->lock, flags); -} - -/* - * got a msg to transfer, queue it in drv_data->queue. - * And kick off message pumper - */ -static int bfin_spi_transfer(struct spi_device *spi, struct spi_message *msg) -{ - struct bfin_spi_master_data *drv_data = spi_master_get_devdata(spi->master); - unsigned long flags; - - spin_lock_irqsave(&drv_data->lock, flags); - - if (!drv_data->running) { - spin_unlock_irqrestore(&drv_data->lock, flags); - return -ESHUTDOWN; - } - - msg->actual_length = 0; - msg->status = -EINPROGRESS; - msg->state = START_STATE; - - dev_dbg(&spi->dev, "adding an msg in transfer() \n"); - list_add_tail(&msg->queue, &drv_data->queue); - - if (drv_data->running && !drv_data->busy) - queue_work(drv_data->workqueue, &drv_data->pump_messages); - - spin_unlock_irqrestore(&drv_data->lock, flags); - - return 0; -} - -#define MAX_SPI_SSEL 7 - -static u16 ssel[][MAX_SPI_SSEL] = { - {P_SPI0_SSEL1, P_SPI0_SSEL2, P_SPI0_SSEL3, - P_SPI0_SSEL4, P_SPI0_SSEL5, - P_SPI0_SSEL6, P_SPI0_SSEL7}, - - {P_SPI1_SSEL1, P_SPI1_SSEL2, P_SPI1_SSEL3, - P_SPI1_SSEL4, P_SPI1_SSEL5, - P_SPI1_SSEL6, P_SPI1_SSEL7}, - - {P_SPI2_SSEL1, P_SPI2_SSEL2, P_SPI2_SSEL3, - P_SPI2_SSEL4, P_SPI2_SSEL5, - P_SPI2_SSEL6, P_SPI2_SSEL7}, -}; - -/* setup for devices (may be called multiple times -- not just first setup) */ -static int bfin_spi_setup(struct spi_device *spi) -{ - struct bfin5xx_spi_chip *chip_info; - struct bfin_spi_slave_data *chip = NULL; - struct bfin_spi_master_data *drv_data = spi_master_get_devdata(spi->master); - u16 bfin_ctl_reg; - int ret = -EINVAL; - - /* Only alloc (or use chip_info) on first setup */ - chip_info = NULL; - chip = spi_get_ctldata(spi); - if (chip == NULL) { - chip = kzalloc(sizeof(*chip), GFP_KERNEL); - if (!chip) { - dev_err(&spi->dev, "cannot allocate chip data\n"); - ret = -ENOMEM; - goto error; - } - - chip->enable_dma = 0; - chip_info = spi->controller_data; - } - - /* Let people set non-standard bits directly */ - bfin_ctl_reg = BIT_CTL_OPENDRAIN | BIT_CTL_EMISO | - BIT_CTL_PSSE | BIT_CTL_GM | BIT_CTL_SZ; - - /* chip_info isn't always needed */ - if (chip_info) { - /* Make sure people stop trying to set fields via ctl_reg - * when they should actually be using common SPI framework. - * Currently we let through: WOM EMISO PSSE GM SZ. - * Not sure if a user actually needs/uses any of these, - * but let's assume (for now) they do. - */ - if (chip_info->ctl_reg & ~bfin_ctl_reg) { - dev_err(&spi->dev, "do not set bits in ctl_reg " - "that the SPI framework manages\n"); - goto error; - } - chip->enable_dma = chip_info->enable_dma != 0 - && drv_data->master_info->enable_dma; - chip->ctl_reg = chip_info->ctl_reg; - chip->cs_chg_udelay = chip_info->cs_chg_udelay; - chip->idle_tx_val = chip_info->idle_tx_val; - chip->pio_interrupt = chip_info->pio_interrupt; - spi->bits_per_word = chip_info->bits_per_word; - } else { - /* force a default base state */ - chip->ctl_reg &= bfin_ctl_reg; - } - - if (spi->bits_per_word % 8) { - dev_err(&spi->dev, "%d bits_per_word is not supported\n", - spi->bits_per_word); - goto error; - } - - /* translate common spi framework into our register */ - if (spi->mode & ~(SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST)) { - dev_err(&spi->dev, "unsupported spi modes detected\n"); - goto error; - } - if (spi->mode & SPI_CPOL) - chip->ctl_reg |= BIT_CTL_CPOL; - if (spi->mode & SPI_CPHA) - chip->ctl_reg |= BIT_CTL_CPHA; - if (spi->mode & SPI_LSB_FIRST) - chip->ctl_reg |= BIT_CTL_LSBF; - /* we dont support running in slave mode (yet?) */ - chip->ctl_reg |= BIT_CTL_MASTER; - - /* - * Notice: for blackfin, the speed_hz is the value of register - * SPI_BAUD, not the real baudrate - */ - chip->baud = hz_to_spi_baud(spi->max_speed_hz); - chip->chip_select_num = spi->chip_select; - if (chip->chip_select_num < MAX_CTRL_CS) { - if (!(spi->mode & SPI_CPHA)) - dev_warn(&spi->dev, "Warning: SPI CPHA not set:" - " Slave Select not under software control!\n" - " See Documentation/blackfin/bfin-spi-notes.txt"); - - chip->flag = (1 << spi->chip_select) << 8; - } else - chip->cs_gpio = chip->chip_select_num - MAX_CTRL_CS; - - if (chip->enable_dma && chip->pio_interrupt) { - dev_err(&spi->dev, "enable_dma is set, " - "do not set pio_interrupt\n"); - goto error; - } - /* - * if any one SPI chip is registered and wants DMA, request the - * DMA channel for it - */ - if (chip->enable_dma && !drv_data->dma_requested) { - /* register dma irq handler */ - ret = request_dma(drv_data->dma_channel, "BFIN_SPI_DMA"); - if (ret) { - dev_err(&spi->dev, - "Unable to request BlackFin SPI DMA channel\n"); - goto error; - } - drv_data->dma_requested = 1; - - ret = set_dma_callback(drv_data->dma_channel, - bfin_spi_dma_irq_handler, drv_data); - if (ret) { - dev_err(&spi->dev, "Unable to set dma callback\n"); - goto error; - } - dma_disable_irq(drv_data->dma_channel); - } - - if (chip->pio_interrupt && !drv_data->irq_requested) { - ret = request_irq(drv_data->spi_irq, bfin_spi_pio_irq_handler, - IRQF_DISABLED, "BFIN_SPI", drv_data); - if (ret) { - dev_err(&spi->dev, "Unable to register spi IRQ\n"); - goto error; - } - drv_data->irq_requested = 1; - /* we use write mode, spi irq has to be disabled here */ - disable_irq(drv_data->spi_irq); - } - - if (chip->chip_select_num >= MAX_CTRL_CS) { - /* Only request on first setup */ - if (spi_get_ctldata(spi) == NULL) { - ret = gpio_request(chip->cs_gpio, spi->modalias); - if (ret) { - dev_err(&spi->dev, "gpio_request() error\n"); - goto pin_error; - } - gpio_direction_output(chip->cs_gpio, 1); - } - } - - dev_dbg(&spi->dev, "setup spi chip %s, width is %d, dma is %d\n", - spi->modalias, spi->bits_per_word, chip->enable_dma); - dev_dbg(&spi->dev, "ctl_reg is 0x%x, flag_reg is 0x%x\n", - chip->ctl_reg, chip->flag); - - spi_set_ctldata(spi, chip); - - dev_dbg(&spi->dev, "chip select number is %d\n", chip->chip_select_num); - if (chip->chip_select_num < MAX_CTRL_CS) { - ret = peripheral_request(ssel[spi->master->bus_num] - [chip->chip_select_num-1], spi->modalias); - if (ret) { - dev_err(&spi->dev, "peripheral_request() error\n"); - goto pin_error; - } - } - - bfin_spi_cs_enable(drv_data, chip); - bfin_spi_cs_deactive(drv_data, chip); - - return 0; - - pin_error: - if (chip->chip_select_num >= MAX_CTRL_CS) - gpio_free(chip->cs_gpio); - else - peripheral_free(ssel[spi->master->bus_num] - [chip->chip_select_num - 1]); - error: - if (chip) { - if (drv_data->dma_requested) - free_dma(drv_data->dma_channel); - drv_data->dma_requested = 0; - - kfree(chip); - /* prevent free 'chip' twice */ - spi_set_ctldata(spi, NULL); - } - - return ret; -} - -/* - * callback for spi framework. - * clean driver specific data - */ -static void bfin_spi_cleanup(struct spi_device *spi) -{ - struct bfin_spi_slave_data *chip = spi_get_ctldata(spi); - struct bfin_spi_master_data *drv_data = spi_master_get_devdata(spi->master); - - if (!chip) - return; - - if (chip->chip_select_num < MAX_CTRL_CS) { - peripheral_free(ssel[spi->master->bus_num] - [chip->chip_select_num-1]); - bfin_spi_cs_disable(drv_data, chip); - } else - gpio_free(chip->cs_gpio); - - kfree(chip); - /* prevent free 'chip' twice */ - spi_set_ctldata(spi, NULL); -} - -static inline int bfin_spi_init_queue(struct bfin_spi_master_data *drv_data) -{ - INIT_LIST_HEAD(&drv_data->queue); - spin_lock_init(&drv_data->lock); - - drv_data->running = false; - drv_data->busy = 0; - - /* init transfer tasklet */ - tasklet_init(&drv_data->pump_transfers, - bfin_spi_pump_transfers, (unsigned long)drv_data); - - /* init messages workqueue */ - INIT_WORK(&drv_data->pump_messages, bfin_spi_pump_messages); - drv_data->workqueue = create_singlethread_workqueue( - dev_name(drv_data->master->dev.parent)); - if (drv_data->workqueue == NULL) - return -EBUSY; - - return 0; -} - -static inline int bfin_spi_start_queue(struct bfin_spi_master_data *drv_data) -{ - unsigned long flags; - - spin_lock_irqsave(&drv_data->lock, flags); - - if (drv_data->running || drv_data->busy) { - spin_unlock_irqrestore(&drv_data->lock, flags); - return -EBUSY; - } - - drv_data->running = true; - drv_data->cur_msg = NULL; - drv_data->cur_transfer = NULL; - drv_data->cur_chip = NULL; - spin_unlock_irqrestore(&drv_data->lock, flags); - - queue_work(drv_data->workqueue, &drv_data->pump_messages); - - return 0; -} - -static inline int bfin_spi_stop_queue(struct bfin_spi_master_data *drv_data) -{ - unsigned long flags; - unsigned limit = 500; - int status = 0; - - spin_lock_irqsave(&drv_data->lock, flags); - - /* - * This is a bit lame, but is optimized for the common execution path. - * A wait_queue on the drv_data->busy could be used, but then the common - * execution path (pump_messages) would be required to call wake_up or - * friends on every SPI message. Do this instead - */ - drv_data->running = false; - while ((!list_empty(&drv_data->queue) || drv_data->busy) && limit--) { - spin_unlock_irqrestore(&drv_data->lock, flags); - msleep(10); - spin_lock_irqsave(&drv_data->lock, flags); - } - - if (!list_empty(&drv_data->queue) || drv_data->busy) - status = -EBUSY; - - spin_unlock_irqrestore(&drv_data->lock, flags); - - return status; -} - -static inline int bfin_spi_destroy_queue(struct bfin_spi_master_data *drv_data) -{ - int status; - - status = bfin_spi_stop_queue(drv_data); - if (status != 0) - return status; - - destroy_workqueue(drv_data->workqueue); - - return 0; -} - -static int __init bfin_spi_probe(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct bfin5xx_spi_master *platform_info; - struct spi_master *master; - struct bfin_spi_master_data *drv_data; - struct resource *res; - int status = 0; - - platform_info = dev->platform_data; - - /* Allocate master with space for drv_data */ - master = spi_alloc_master(dev, sizeof(*drv_data)); - if (!master) { - dev_err(&pdev->dev, "can not alloc spi_master\n"); - return -ENOMEM; - } - - drv_data = spi_master_get_devdata(master); - drv_data->master = master; - drv_data->master_info = platform_info; - drv_data->pdev = pdev; - drv_data->pin_req = platform_info->pin_req; - - /* the spi->mode bits supported by this driver: */ - master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST; - - master->bus_num = pdev->id; - master->num_chipselect = platform_info->num_chipselect; - master->cleanup = bfin_spi_cleanup; - master->setup = bfin_spi_setup; - master->transfer = bfin_spi_transfer; - - /* Find and map our resources */ - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (res == NULL) { - dev_err(dev, "Cannot get IORESOURCE_MEM\n"); - status = -ENOENT; - goto out_error_get_res; - } - - drv_data->regs_base = ioremap(res->start, resource_size(res)); - if (drv_data->regs_base == NULL) { - dev_err(dev, "Cannot map IO\n"); - status = -ENXIO; - goto out_error_ioremap; - } - - res = platform_get_resource(pdev, IORESOURCE_DMA, 0); - if (res == NULL) { - dev_err(dev, "No DMA channel specified\n"); - status = -ENOENT; - goto out_error_free_io; - } - drv_data->dma_channel = res->start; - - drv_data->spi_irq = platform_get_irq(pdev, 0); - if (drv_data->spi_irq < 0) { - dev_err(dev, "No spi pio irq specified\n"); - status = -ENOENT; - goto out_error_free_io; - } - - /* Initial and start queue */ - status = bfin_spi_init_queue(drv_data); - if (status != 0) { - dev_err(dev, "problem initializing queue\n"); - goto out_error_queue_alloc; - } - - status = bfin_spi_start_queue(drv_data); - if (status != 0) { - dev_err(dev, "problem starting queue\n"); - goto out_error_queue_alloc; - } - - status = peripheral_request_list(drv_data->pin_req, DRV_NAME); - if (status != 0) { - dev_err(&pdev->dev, ": Requesting Peripherals failed\n"); - goto out_error_queue_alloc; - } - - /* Reset SPI registers. If these registers were used by the boot loader, - * the sky may fall on your head if you enable the dma controller. - */ - write_CTRL(drv_data, BIT_CTL_CPHA | BIT_CTL_MASTER); - write_FLAG(drv_data, 0xFF00); - - /* Register with the SPI framework */ - platform_set_drvdata(pdev, drv_data); - status = spi_register_master(master); - if (status != 0) { - dev_err(dev, "problem registering spi master\n"); - goto out_error_queue_alloc; - } - - dev_info(dev, "%s, Version %s, regs_base@%p, dma channel@%d\n", - DRV_DESC, DRV_VERSION, drv_data->regs_base, - drv_data->dma_channel); - return status; - -out_error_queue_alloc: - bfin_spi_destroy_queue(drv_data); -out_error_free_io: - iounmap((void *) drv_data->regs_base); -out_error_ioremap: -out_error_get_res: - spi_master_put(master); - - return status; -} - -/* stop hardware and remove the driver */ -static int __devexit bfin_spi_remove(struct platform_device *pdev) -{ - struct bfin_spi_master_data *drv_data = platform_get_drvdata(pdev); - int status = 0; - - if (!drv_data) - return 0; - - /* Remove the queue */ - status = bfin_spi_destroy_queue(drv_data); - if (status != 0) - return status; - - /* Disable the SSP at the peripheral and SOC level */ - bfin_spi_disable(drv_data); - - /* Release DMA */ - if (drv_data->master_info->enable_dma) { - if (dma_channel_active(drv_data->dma_channel)) - free_dma(drv_data->dma_channel); - } - - if (drv_data->irq_requested) { - free_irq(drv_data->spi_irq, drv_data); - drv_data->irq_requested = 0; - } - - /* Disconnect from the SPI framework */ - spi_unregister_master(drv_data->master); - - peripheral_free_list(drv_data->pin_req); - - /* Prevent double remove */ - platform_set_drvdata(pdev, NULL); - - return 0; -} - -#ifdef CONFIG_PM -static int bfin_spi_suspend(struct platform_device *pdev, pm_message_t state) -{ - struct bfin_spi_master_data *drv_data = platform_get_drvdata(pdev); - int status = 0; - - status = bfin_spi_stop_queue(drv_data); - if (status != 0) - return status; - - drv_data->ctrl_reg = read_CTRL(drv_data); - drv_data->flag_reg = read_FLAG(drv_data); - - /* - * reset SPI_CTL and SPI_FLG registers - */ - write_CTRL(drv_data, BIT_CTL_CPHA | BIT_CTL_MASTER); - write_FLAG(drv_data, 0xFF00); - - return 0; -} - -static int bfin_spi_resume(struct platform_device *pdev) -{ - struct bfin_spi_master_data *drv_data = platform_get_drvdata(pdev); - int status = 0; - - write_CTRL(drv_data, drv_data->ctrl_reg); - write_FLAG(drv_data, drv_data->flag_reg); - - /* Start the queue running */ - status = bfin_spi_start_queue(drv_data); - if (status != 0) { - dev_err(&pdev->dev, "problem starting queue (%d)\n", status); - return status; - } - - return 0; -} -#else -#define bfin_spi_suspend NULL -#define bfin_spi_resume NULL -#endif /* CONFIG_PM */ - -MODULE_ALIAS("platform:bfin-spi"); -static struct platform_driver bfin_spi_driver = { - .driver = { - .name = DRV_NAME, - .owner = THIS_MODULE, - }, - .suspend = bfin_spi_suspend, - .resume = bfin_spi_resume, - .remove = __devexit_p(bfin_spi_remove), -}; - -static int __init bfin_spi_init(void) -{ - return platform_driver_probe(&bfin_spi_driver, bfin_spi_probe); -} -subsys_initcall(bfin_spi_init); - -static void __exit bfin_spi_exit(void) -{ - platform_driver_unregister(&bfin_spi_driver); -} -module_exit(bfin_spi_exit); diff --git a/drivers/spi/spi_bfin_sport.c b/drivers/spi/spi_bfin_sport.c deleted file mode 100644 index e557ff6..0000000 --- a/drivers/spi/spi_bfin_sport.c +++ /dev/null @@ -1,952 +0,0 @@ -/* - * SPI bus via the Blackfin SPORT peripheral - * - * Enter bugs at http://blackfin.uclinux.org/ - * - * Copyright 2009-2011 Analog Devices Inc. - * - * Licensed under the GPL-2 or later. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -#define DRV_NAME "bfin-sport-spi" -#define DRV_DESC "SPI bus via the Blackfin SPORT" - -MODULE_AUTHOR("Cliff Cai"); -MODULE_DESCRIPTION(DRV_DESC); -MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:bfin-sport-spi"); - -enum bfin_sport_spi_state { - START_STATE, - RUNNING_STATE, - DONE_STATE, - ERROR_STATE, -}; - -struct bfin_sport_spi_master_data; - -struct bfin_sport_transfer_ops { - void (*write) (struct bfin_sport_spi_master_data *); - void (*read) (struct bfin_sport_spi_master_data *); - void (*duplex) (struct bfin_sport_spi_master_data *); -}; - -struct bfin_sport_spi_master_data { - /* Driver model hookup */ - struct device *dev; - - /* SPI framework hookup */ - struct spi_master *master; - - /* Regs base of SPI controller */ - struct sport_register __iomem *regs; - int err_irq; - - /* Pin request list */ - u16 *pin_req; - - /* Driver message queue */ - struct workqueue_struct *workqueue; - struct work_struct pump_messages; - spinlock_t lock; - struct list_head queue; - int busy; - bool run; - - /* Message Transfer pump */ - struct tasklet_struct pump_transfers; - - /* Current message transfer state info */ - enum bfin_sport_spi_state state; - struct spi_message *cur_msg; - struct spi_transfer *cur_transfer; - struct bfin_sport_spi_slave_data *cur_chip; - union { - void *tx; - u8 *tx8; - u16 *tx16; - }; - void *tx_end; - union { - void *rx; - u8 *rx8; - u16 *rx16; - }; - void *rx_end; - - int cs_change; - struct bfin_sport_transfer_ops *ops; -}; - -struct bfin_sport_spi_slave_data { - u16 ctl_reg; - u16 baud; - u16 cs_chg_udelay; /* Some devices require > 255usec delay */ - u32 cs_gpio; - u16 idle_tx_val; - struct bfin_sport_transfer_ops *ops; -}; - -static void -bfin_sport_spi_enable(struct bfin_sport_spi_master_data *drv_data) -{ - bfin_write_or(&drv_data->regs->tcr1, TSPEN); - bfin_write_or(&drv_data->regs->rcr1, TSPEN); - SSYNC(); -} - -static void -bfin_sport_spi_disable(struct bfin_sport_spi_master_data *drv_data) -{ - bfin_write_and(&drv_data->regs->tcr1, ~TSPEN); - bfin_write_and(&drv_data->regs->rcr1, ~TSPEN); - SSYNC(); -} - -/* Caculate the SPI_BAUD register value based on input HZ */ -static u16 -bfin_sport_hz_to_spi_baud(u32 speed_hz) -{ - u_long clk, sclk = get_sclk(); - int div = (sclk / (2 * speed_hz)) - 1; - - if (div < 0) - div = 0; - - clk = sclk / (2 * (div + 1)); - - if (clk > speed_hz) - div++; - - return div; -} - -/* Chip select operation functions for cs_change flag */ -static void -bfin_sport_spi_cs_active(struct bfin_sport_spi_slave_data *chip) -{ - gpio_direction_output(chip->cs_gpio, 0); -} - -static void -bfin_sport_spi_cs_deactive(struct bfin_sport_spi_slave_data *chip) -{ - gpio_direction_output(chip->cs_gpio, 1); - /* Move delay here for consistency */ - if (chip->cs_chg_udelay) - udelay(chip->cs_chg_udelay); -} - -static void -bfin_sport_spi_stat_poll_complete(struct bfin_sport_spi_master_data *drv_data) -{ - unsigned long timeout = jiffies + HZ; - while (!(bfin_read(&drv_data->regs->stat) & RXNE)) { - if (!time_before(jiffies, timeout)) - break; - } -} - -static void -bfin_sport_spi_u8_writer(struct bfin_sport_spi_master_data *drv_data) -{ - u16 dummy; - - while (drv_data->tx < drv_data->tx_end) { - bfin_write(&drv_data->regs->tx16, *drv_data->tx8++); - bfin_sport_spi_stat_poll_complete(drv_data); - dummy = bfin_read(&drv_data->regs->rx16); - } -} - -static void -bfin_sport_spi_u8_reader(struct bfin_sport_spi_master_data *drv_data) -{ - u16 tx_val = drv_data->cur_chip->idle_tx_val; - - while (drv_data->rx < drv_data->rx_end) { - bfin_write(&drv_data->regs->tx16, tx_val); - bfin_sport_spi_stat_poll_complete(drv_data); - *drv_data->rx8++ = bfin_read(&drv_data->regs->rx16); - } -} - -static void -bfin_sport_spi_u8_duplex(struct bfin_sport_spi_master_data *drv_data) -{ - while (drv_data->rx < drv_data->rx_end) { - bfin_write(&drv_data->regs->tx16, *drv_data->tx8++); - bfin_sport_spi_stat_poll_complete(drv_data); - *drv_data->rx8++ = bfin_read(&drv_data->regs->rx16); - } -} - -static struct bfin_sport_transfer_ops bfin_sport_transfer_ops_u8 = { - .write = bfin_sport_spi_u8_writer, - .read = bfin_sport_spi_u8_reader, - .duplex = bfin_sport_spi_u8_duplex, -}; - -static void -bfin_sport_spi_u16_writer(struct bfin_sport_spi_master_data *drv_data) -{ - u16 dummy; - - while (drv_data->tx < drv_data->tx_end) { - bfin_write(&drv_data->regs->tx16, *drv_data->tx16++); - bfin_sport_spi_stat_poll_complete(drv_data); - dummy = bfin_read(&drv_data->regs->rx16); - } -} - -static void -bfin_sport_spi_u16_reader(struct bfin_sport_spi_master_data *drv_data) -{ - u16 tx_val = drv_data->cur_chip->idle_tx_val; - - while (drv_data->rx < drv_data->rx_end) { - bfin_write(&drv_data->regs->tx16, tx_val); - bfin_sport_spi_stat_poll_complete(drv_data); - *drv_data->rx16++ = bfin_read(&drv_data->regs->rx16); - } -} - -static void -bfin_sport_spi_u16_duplex(struct bfin_sport_spi_master_data *drv_data) -{ - while (drv_data->rx < drv_data->rx_end) { - bfin_write(&drv_data->regs->tx16, *drv_data->tx16++); - bfin_sport_spi_stat_poll_complete(drv_data); - *drv_data->rx16++ = bfin_read(&drv_data->regs->rx16); - } -} - -static struct bfin_sport_transfer_ops bfin_sport_transfer_ops_u16 = { - .write = bfin_sport_spi_u16_writer, - .read = bfin_sport_spi_u16_reader, - .duplex = bfin_sport_spi_u16_duplex, -}; - -/* stop controller and re-config current chip */ -static void -bfin_sport_spi_restore_state(struct bfin_sport_spi_master_data *drv_data) -{ - struct bfin_sport_spi_slave_data *chip = drv_data->cur_chip; - unsigned int bits = (drv_data->ops == &bfin_sport_transfer_ops_u8 ? 7 : 15); - - bfin_sport_spi_disable(drv_data); - dev_dbg(drv_data->dev, "restoring spi ctl state\n"); - - bfin_write(&drv_data->regs->tcr1, chip->ctl_reg); - bfin_write(&drv_data->regs->tcr2, bits); - bfin_write(&drv_data->regs->tclkdiv, chip->baud); - bfin_write(&drv_data->regs->tfsdiv, bits); - SSYNC(); - - bfin_write(&drv_data->regs->rcr1, chip->ctl_reg & ~(ITCLK | ITFS)); - bfin_write(&drv_data->regs->rcr2, bits); - SSYNC(); - - bfin_sport_spi_cs_active(chip); -} - -/* test if there is more transfer to be done */ -static enum bfin_sport_spi_state -bfin_sport_spi_next_transfer(struct bfin_sport_spi_master_data *drv_data) -{ - struct spi_message *msg = drv_data->cur_msg; - struct spi_transfer *trans = drv_data->cur_transfer; - - /* Move to next transfer */ - if (trans->transfer_list.next != &msg->transfers) { - drv_data->cur_transfer = - list_entry(trans->transfer_list.next, - struct spi_transfer, transfer_list); - return RUNNING_STATE; - } - - return DONE_STATE; -} - -/* - * caller already set message->status; - * dma and pio irqs are blocked give finished message back - */ -static void -bfin_sport_spi_giveback(struct bfin_sport_spi_master_data *drv_data) -{ - struct bfin_sport_spi_slave_data *chip = drv_data->cur_chip; - unsigned long flags; - struct spi_message *msg; - - spin_lock_irqsave(&drv_data->lock, flags); - msg = drv_data->cur_msg; - drv_data->state = START_STATE; - drv_data->cur_msg = NULL; - drv_data->cur_transfer = NULL; - drv_data->cur_chip = NULL; - queue_work(drv_data->workqueue, &drv_data->pump_messages); - spin_unlock_irqrestore(&drv_data->lock, flags); - - if (!drv_data->cs_change) - bfin_sport_spi_cs_deactive(chip); - - if (msg->complete) - msg->complete(msg->context); -} - -static irqreturn_t -sport_err_handler(int irq, void *dev_id) -{ - struct bfin_sport_spi_master_data *drv_data = dev_id; - u16 status; - - dev_dbg(drv_data->dev, "%s enter\n", __func__); - status = bfin_read(&drv_data->regs->stat) & (TOVF | TUVF | ROVF | RUVF); - - if (status) { - bfin_write(&drv_data->regs->stat, status); - SSYNC(); - - bfin_sport_spi_disable(drv_data); - dev_err(drv_data->dev, "status error:%s%s%s%s\n", - status & TOVF ? " TOVF" : "", - status & TUVF ? " TUVF" : "", - status & ROVF ? " ROVF" : "", - status & RUVF ? " RUVF" : ""); - } - - return IRQ_HANDLED; -} - -static void -bfin_sport_spi_pump_transfers(unsigned long data) -{ - struct bfin_sport_spi_master_data *drv_data = (void *)data; - struct spi_message *message = NULL; - struct spi_transfer *transfer = NULL; - struct spi_transfer *previous = NULL; - struct bfin_sport_spi_slave_data *chip = NULL; - unsigned int bits_per_word; - u32 tranf_success = 1; - u32 transfer_speed; - u8 full_duplex = 0; - - /* Get current state information */ - message = drv_data->cur_msg; - transfer = drv_data->cur_transfer; - chip = drv_data->cur_chip; - - if (transfer->speed_hz) - transfer_speed = bfin_sport_hz_to_spi_baud(transfer->speed_hz); - else - transfer_speed = chip->baud; - bfin_write(&drv_data->regs->tclkdiv, transfer_speed); - SSYNC(); - - /* - * if msg is error or done, report it back using complete() callback - */ - - /* Handle for abort */ - if (drv_data->state == ERROR_STATE) { - dev_dbg(drv_data->dev, "transfer: we've hit an error\n"); - message->status = -EIO; - bfin_sport_spi_giveback(drv_data); - return; - } - - /* Handle end of message */ - if (drv_data->state == DONE_STATE) { - dev_dbg(drv_data->dev, "transfer: all done!\n"); - message->status = 0; - bfin_sport_spi_giveback(drv_data); - return; - } - - /* Delay if requested at end of transfer */ - if (drv_data->state == RUNNING_STATE) { - dev_dbg(drv_data->dev, "transfer: still running ...\n"); - previous = list_entry(transfer->transfer_list.prev, - struct spi_transfer, transfer_list); - if (previous->delay_usecs) - udelay(previous->delay_usecs); - } - - if (transfer->len == 0) { - /* Move to next transfer of this msg */ - drv_data->state = bfin_sport_spi_next_transfer(drv_data); - /* Schedule next transfer tasklet */ - tasklet_schedule(&drv_data->pump_transfers); - } - - if (transfer->tx_buf != NULL) { - drv_data->tx = (void *)transfer->tx_buf; - drv_data->tx_end = drv_data->tx + transfer->len; - dev_dbg(drv_data->dev, "tx_buf is %p, tx_end is %p\n", - transfer->tx_buf, drv_data->tx_end); - } else - drv_data->tx = NULL; - - if (transfer->rx_buf != NULL) { - full_duplex = transfer->tx_buf != NULL; - drv_data->rx = transfer->rx_buf; - drv_data->rx_end = drv_data->rx + transfer->len; - dev_dbg(drv_data->dev, "rx_buf is %p, rx_end is %p\n", - transfer->rx_buf, drv_data->rx_end); - } else - drv_data->rx = NULL; - - drv_data->cs_change = transfer->cs_change; - - /* Bits per word setup */ - bits_per_word = transfer->bits_per_word ? : message->spi->bits_per_word; - if (bits_per_word == 8) - drv_data->ops = &bfin_sport_transfer_ops_u8; - else - drv_data->ops = &bfin_sport_transfer_ops_u16; - - drv_data->state = RUNNING_STATE; - - if (drv_data->cs_change) - bfin_sport_spi_cs_active(chip); - - dev_dbg(drv_data->dev, - "now pumping a transfer: width is %d, len is %d\n", - bits_per_word, transfer->len); - - /* PIO mode write then read */ - dev_dbg(drv_data->dev, "doing IO transfer\n"); - - bfin_sport_spi_enable(drv_data); - if (full_duplex) { - /* full duplex mode */ - BUG_ON((drv_data->tx_end - drv_data->tx) != - (drv_data->rx_end - drv_data->rx)); - drv_data->ops->duplex(drv_data); - - if (drv_data->tx != drv_data->tx_end) - tranf_success = 0; - } else if (drv_data->tx != NULL) { - /* write only half duplex */ - - drv_data->ops->write(drv_data); - - if (drv_data->tx != drv_data->tx_end) - tranf_success = 0; - } else if (drv_data->rx != NULL) { - /* read only half duplex */ - - drv_data->ops->read(drv_data); - if (drv_data->rx != drv_data->rx_end) - tranf_success = 0; - } - bfin_sport_spi_disable(drv_data); - - if (!tranf_success) { - dev_dbg(drv_data->dev, "IO write error!\n"); - drv_data->state = ERROR_STATE; - } else { - /* Update total byte transfered */ - message->actual_length += transfer->len; - /* Move to next transfer of this msg */ - drv_data->state = bfin_sport_spi_next_transfer(drv_data); - if (drv_data->cs_change) - bfin_sport_spi_cs_deactive(chip); - } - - /* Schedule next transfer tasklet */ - tasklet_schedule(&drv_data->pump_transfers); -} - -/* pop a msg from queue and kick off real transfer */ -static void -bfin_sport_spi_pump_messages(struct work_struct *work) -{ - struct bfin_sport_spi_master_data *drv_data; - unsigned long flags; - struct spi_message *next_msg; - - drv_data = container_of(work, struct bfin_sport_spi_master_data, pump_messages); - - /* Lock queue and check for queue work */ - spin_lock_irqsave(&drv_data->lock, flags); - if (list_empty(&drv_data->queue) || !drv_data->run) { - /* pumper kicked off but no work to do */ - drv_data->busy = 0; - spin_unlock_irqrestore(&drv_data->lock, flags); - return; - } - - /* Make sure we are not already running a message */ - if (drv_data->cur_msg) { - spin_unlock_irqrestore(&drv_data->lock, flags); - return; - } - - /* Extract head of queue */ - next_msg = list_entry(drv_data->queue.next, - struct spi_message, queue); - - drv_data->cur_msg = next_msg; - - /* Setup the SSP using the per chip configuration */ - drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi); - - list_del_init(&drv_data->cur_msg->queue); - - /* Initialize message state */ - drv_data->cur_msg->state = START_STATE; - drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next, - struct spi_transfer, transfer_list); - bfin_sport_spi_restore_state(drv_data); - dev_dbg(drv_data->dev, "got a message to pump, " - "state is set to: baud %d, cs_gpio %i, ctl 0x%x\n", - drv_data->cur_chip->baud, drv_data->cur_chip->cs_gpio, - drv_data->cur_chip->ctl_reg); - - dev_dbg(drv_data->dev, - "the first transfer len is %d\n", - drv_data->cur_transfer->len); - - /* Mark as busy and launch transfers */ - tasklet_schedule(&drv_data->pump_transfers); - - drv_data->busy = 1; - spin_unlock_irqrestore(&drv_data->lock, flags); -} - -/* - * got a msg to transfer, queue it in drv_data->queue. - * And kick off message pumper - */ -static int -bfin_sport_spi_transfer(struct spi_device *spi, struct spi_message *msg) -{ - struct bfin_sport_spi_master_data *drv_data = spi_master_get_devdata(spi->master); - unsigned long flags; - - spin_lock_irqsave(&drv_data->lock, flags); - - if (!drv_data->run) { - spin_unlock_irqrestore(&drv_data->lock, flags); - return -ESHUTDOWN; - } - - msg->actual_length = 0; - msg->status = -EINPROGRESS; - msg->state = START_STATE; - - dev_dbg(&spi->dev, "adding an msg in transfer()\n"); - list_add_tail(&msg->queue, &drv_data->queue); - - if (drv_data->run && !drv_data->busy) - queue_work(drv_data->workqueue, &drv_data->pump_messages); - - spin_unlock_irqrestore(&drv_data->lock, flags); - - return 0; -} - -/* Called every time common spi devices change state */ -static int -bfin_sport_spi_setup(struct spi_device *spi) -{ - struct bfin_sport_spi_slave_data *chip, *first = NULL; - int ret; - - /* Only alloc (or use chip_info) on first setup */ - chip = spi_get_ctldata(spi); - if (chip == NULL) { - struct bfin5xx_spi_chip *chip_info; - - chip = first = kzalloc(sizeof(*chip), GFP_KERNEL); - if (!chip) - return -ENOMEM; - - /* platform chip_info isn't required */ - chip_info = spi->controller_data; - if (chip_info) { - /* - * DITFS and TDTYPE are only thing we don't set, but - * they probably shouldn't be changed by people. - */ - if (chip_info->ctl_reg || chip_info->enable_dma) { - ret = -EINVAL; - dev_err(&spi->dev, "don't set ctl_reg/enable_dma fields"); - goto error; - } - chip->cs_chg_udelay = chip_info->cs_chg_udelay; - chip->idle_tx_val = chip_info->idle_tx_val; - spi->bits_per_word = chip_info->bits_per_word; - } - } - - if (spi->bits_per_word != 8 && spi->bits_per_word != 16) { - ret = -EINVAL; - goto error; - } - - /* translate common spi framework into our register - * following configure contents are same for tx and rx. - */ - - if (spi->mode & SPI_CPHA) - chip->ctl_reg &= ~TCKFE; - else - chip->ctl_reg |= TCKFE; - - if (spi->mode & SPI_LSB_FIRST) - chip->ctl_reg |= TLSBIT; - else - chip->ctl_reg &= ~TLSBIT; - - /* Sport in master mode */ - chip->ctl_reg |= ITCLK | ITFS | TFSR | LATFS | LTFS; - - chip->baud = bfin_sport_hz_to_spi_baud(spi->max_speed_hz); - - chip->cs_gpio = spi->chip_select; - ret = gpio_request(chip->cs_gpio, spi->modalias); - if (ret) - goto error; - - dev_dbg(&spi->dev, "setup spi chip %s, width is %d\n", - spi->modalias, spi->bits_per_word); - dev_dbg(&spi->dev, "ctl_reg is 0x%x, GPIO is %i\n", - chip->ctl_reg, spi->chip_select); - - spi_set_ctldata(spi, chip); - - bfin_sport_spi_cs_deactive(chip); - - return ret; - - error: - kfree(first); - return ret; -} - -/* - * callback for spi framework. - * clean driver specific data - */ -static void -bfin_sport_spi_cleanup(struct spi_device *spi) -{ - struct bfin_sport_spi_slave_data *chip = spi_get_ctldata(spi); - - if (!chip) - return; - - gpio_free(chip->cs_gpio); - - kfree(chip); -} - -static int -bfin_sport_spi_init_queue(struct bfin_sport_spi_master_data *drv_data) -{ - INIT_LIST_HEAD(&drv_data->queue); - spin_lock_init(&drv_data->lock); - - drv_data->run = false; - drv_data->busy = 0; - - /* init transfer tasklet */ - tasklet_init(&drv_data->pump_transfers, - bfin_sport_spi_pump_transfers, (unsigned long)drv_data); - - /* init messages workqueue */ - INIT_WORK(&drv_data->pump_messages, bfin_sport_spi_pump_messages); - drv_data->workqueue = - create_singlethread_workqueue(dev_name(drv_data->master->dev.parent)); - if (drv_data->workqueue == NULL) - return -EBUSY; - - return 0; -} - -static int -bfin_sport_spi_start_queue(struct bfin_sport_spi_master_data *drv_data) -{ - unsigned long flags; - - spin_lock_irqsave(&drv_data->lock, flags); - - if (drv_data->run || drv_data->busy) { - spin_unlock_irqrestore(&drv_data->lock, flags); - return -EBUSY; - } - - drv_data->run = true; - drv_data->cur_msg = NULL; - drv_data->cur_transfer = NULL; - drv_data->cur_chip = NULL; - spin_unlock_irqrestore(&drv_data->lock, flags); - - queue_work(drv_data->workqueue, &drv_data->pump_messages); - - return 0; -} - -static inline int -bfin_sport_spi_stop_queue(struct bfin_sport_spi_master_data *drv_data) -{ - unsigned long flags; - unsigned limit = 500; - int status = 0; - - spin_lock_irqsave(&drv_data->lock, flags); - - /* - * This is a bit lame, but is optimized for the common execution path. - * A wait_queue on the drv_data->busy could be used, but then the common - * execution path (pump_messages) would be required to call wake_up or - * friends on every SPI message. Do this instead - */ - drv_data->run = false; - while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) { - spin_unlock_irqrestore(&drv_data->lock, flags); - msleep(10); - spin_lock_irqsave(&drv_data->lock, flags); - } - - if (!list_empty(&drv_data->queue) || drv_data->busy) - status = -EBUSY; - - spin_unlock_irqrestore(&drv_data->lock, flags); - - return status; -} - -static inline int -bfin_sport_spi_destroy_queue(struct bfin_sport_spi_master_data *drv_data) -{ - int status; - - status = bfin_sport_spi_stop_queue(drv_data); - if (status) - return status; - - destroy_workqueue(drv_data->workqueue); - - return 0; -} - -static int __devinit -bfin_sport_spi_probe(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct bfin5xx_spi_master *platform_info; - struct spi_master *master; - struct resource *res, *ires; - struct bfin_sport_spi_master_data *drv_data; - int status; - - platform_info = dev->platform_data; - - /* Allocate master with space for drv_data */ - master = spi_alloc_master(dev, sizeof(*master) + 16); - if (!master) { - dev_err(dev, "cannot alloc spi_master\n"); - return -ENOMEM; - } - - drv_data = spi_master_get_devdata(master); - drv_data->master = master; - drv_data->dev = dev; - drv_data->pin_req = platform_info->pin_req; - - master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST; - master->bus_num = pdev->id; - master->num_chipselect = platform_info->num_chipselect; - master->cleanup = bfin_sport_spi_cleanup; - master->setup = bfin_sport_spi_setup; - master->transfer = bfin_sport_spi_transfer; - - /* Find and map our resources */ - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (res == NULL) { - dev_err(dev, "cannot get IORESOURCE_MEM\n"); - status = -ENOENT; - goto out_error_get_res; - } - - drv_data->regs = ioremap(res->start, resource_size(res)); - if (drv_data->regs == NULL) { - dev_err(dev, "cannot map registers\n"); - status = -ENXIO; - goto out_error_ioremap; - } - - ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (!ires) { - dev_err(dev, "cannot get IORESOURCE_IRQ\n"); - status = -ENODEV; - goto out_error_get_ires; - } - drv_data->err_irq = ires->start; - - /* Initial and start queue */ - status = bfin_sport_spi_init_queue(drv_data); - if (status) { - dev_err(dev, "problem initializing queue\n"); - goto out_error_queue_alloc; - } - - status = bfin_sport_spi_start_queue(drv_data); - if (status) { - dev_err(dev, "problem starting queue\n"); - goto out_error_queue_alloc; - } - - status = request_irq(drv_data->err_irq, sport_err_handler, - 0, "sport_spi_err", drv_data); - if (status) { - dev_err(dev, "unable to request sport err irq\n"); - goto out_error_irq; - } - - status = peripheral_request_list(drv_data->pin_req, DRV_NAME); - if (status) { - dev_err(dev, "requesting peripherals failed\n"); - goto out_error_peripheral; - } - - /* Register with the SPI framework */ - platform_set_drvdata(pdev, drv_data); - status = spi_register_master(master); - if (status) { - dev_err(dev, "problem registering spi master\n"); - goto out_error_master; - } - - dev_info(dev, "%s, regs_base@%p\n", DRV_DESC, drv_data->regs); - return 0; - - out_error_master: - peripheral_free_list(drv_data->pin_req); - out_error_peripheral: - free_irq(drv_data->err_irq, drv_data); - out_error_irq: - out_error_queue_alloc: - bfin_sport_spi_destroy_queue(drv_data); - out_error_get_ires: - iounmap(drv_data->regs); - out_error_ioremap: - out_error_get_res: - spi_master_put(master); - - return status; -} - -/* stop hardware and remove the driver */ -static int __devexit -bfin_sport_spi_remove(struct platform_device *pdev) -{ - struct bfin_sport_spi_master_data *drv_data = platform_get_drvdata(pdev); - int status = 0; - - if (!drv_data) - return 0; - - /* Remove the queue */ - status = bfin_sport_spi_destroy_queue(drv_data); - if (status) - return status; - - /* Disable the SSP at the peripheral and SOC level */ - bfin_sport_spi_disable(drv_data); - - /* Disconnect from the SPI framework */ - spi_unregister_master(drv_data->master); - - peripheral_free_list(drv_data->pin_req); - - /* Prevent double remove */ - platform_set_drvdata(pdev, NULL); - - return 0; -} - -#ifdef CONFIG_PM -static int -bfin_sport_spi_suspend(struct platform_device *pdev, pm_message_t state) -{ - struct bfin_sport_spi_master_data *drv_data = platform_get_drvdata(pdev); - int status; - - status = bfin_sport_spi_stop_queue(drv_data); - if (status) - return status; - - /* stop hardware */ - bfin_sport_spi_disable(drv_data); - - return status; -} - -static int -bfin_sport_spi_resume(struct platform_device *pdev) -{ - struct bfin_sport_spi_master_data *drv_data = platform_get_drvdata(pdev); - int status; - - /* Enable the SPI interface */ - bfin_sport_spi_enable(drv_data); - - /* Start the queue running */ - status = bfin_sport_spi_start_queue(drv_data); - if (status) - dev_err(drv_data->dev, "problem resuming queue\n"); - - return status; -} -#else -# define bfin_sport_spi_suspend NULL -# define bfin_sport_spi_resume NULL -#endif - -static struct platform_driver bfin_sport_spi_driver = { - .driver = { - .name = DRV_NAME, - .owner = THIS_MODULE, - }, - .probe = bfin_sport_spi_probe, - .remove = __devexit_p(bfin_sport_spi_remove), - .suspend = bfin_sport_spi_suspend, - .resume = bfin_sport_spi_resume, -}; - -static int __init bfin_sport_spi_init(void) -{ - return platform_driver_register(&bfin_sport_spi_driver); -} -module_init(bfin_sport_spi_init); - -static void __exit bfin_sport_spi_exit(void) -{ - platform_driver_unregister(&bfin_sport_spi_driver); -} -module_exit(bfin_sport_spi_exit); diff --git a/drivers/spi/spi_bitbang.c b/drivers/spi/spi_bitbang.c deleted file mode 100644 index bb38c83..0000000 --- a/drivers/spi/spi_bitbang.c +++ /dev/null @@ -1,505 +0,0 @@ -/* - * spi_bitbang.c - polling/bitbanging SPI master controller driver utilities - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - - -/*----------------------------------------------------------------------*/ - -/* - * FIRST PART (OPTIONAL): word-at-a-time spi_transfer support. - * Use this for GPIO or shift-register level hardware APIs. - * - * spi_bitbang_cs is in spi_device->controller_state, which is unavailable - * to glue code. These bitbang setup() and cleanup() routines are always - * used, though maybe they're called from controller-aware code. - * - * chipselect() and friends may use use spi_device->controller_data and - * controller registers as appropriate. - * - * - * NOTE: SPI controller pins can often be used as GPIO pins instead, - * which means you could use a bitbang driver either to get hardware - * working quickly, or testing for differences that aren't speed related. - */ - -struct spi_bitbang_cs { - unsigned nsecs; /* (clock cycle time)/2 */ - u32 (*txrx_word)(struct spi_device *spi, unsigned nsecs, - u32 word, u8 bits); - unsigned (*txrx_bufs)(struct spi_device *, - u32 (*txrx_word)( - struct spi_device *spi, - unsigned nsecs, - u32 word, u8 bits), - unsigned, struct spi_transfer *); -}; - -static unsigned bitbang_txrx_8( - struct spi_device *spi, - u32 (*txrx_word)(struct spi_device *spi, - unsigned nsecs, - u32 word, u8 bits), - unsigned ns, - struct spi_transfer *t -) { - unsigned bits = t->bits_per_word ? : spi->bits_per_word; - unsigned count = t->len; - const u8 *tx = t->tx_buf; - u8 *rx = t->rx_buf; - - while (likely(count > 0)) { - u8 word = 0; - - if (tx) - word = *tx++; - word = txrx_word(spi, ns, word, bits); - if (rx) - *rx++ = word; - count -= 1; - } - return t->len - count; -} - -static unsigned bitbang_txrx_16( - struct spi_device *spi, - u32 (*txrx_word)(struct spi_device *spi, - unsigned nsecs, - u32 word, u8 bits), - unsigned ns, - struct spi_transfer *t -) { - unsigned bits = t->bits_per_word ? : spi->bits_per_word; - unsigned count = t->len; - const u16 *tx = t->tx_buf; - u16 *rx = t->rx_buf; - - while (likely(count > 1)) { - u16 word = 0; - - if (tx) - word = *tx++; - word = txrx_word(spi, ns, word, bits); - if (rx) - *rx++ = word; - count -= 2; - } - return t->len - count; -} - -static unsigned bitbang_txrx_32( - struct spi_device *spi, - u32 (*txrx_word)(struct spi_device *spi, - unsigned nsecs, - u32 word, u8 bits), - unsigned ns, - struct spi_transfer *t -) { - unsigned bits = t->bits_per_word ? : spi->bits_per_word; - unsigned count = t->len; - const u32 *tx = t->tx_buf; - u32 *rx = t->rx_buf; - - while (likely(count > 3)) { - u32 word = 0; - - if (tx) - word = *tx++; - word = txrx_word(spi, ns, word, bits); - if (rx) - *rx++ = word; - count -= 4; - } - return t->len - count; -} - -int spi_bitbang_setup_transfer(struct spi_device *spi, struct spi_transfer *t) -{ - struct spi_bitbang_cs *cs = spi->controller_state; - u8 bits_per_word; - u32 hz; - - if (t) { - bits_per_word = t->bits_per_word; - hz = t->speed_hz; - } else { - bits_per_word = 0; - hz = 0; - } - - /* spi_transfer level calls that work per-word */ - if (!bits_per_word) - bits_per_word = spi->bits_per_word; - if (bits_per_word <= 8) - cs->txrx_bufs = bitbang_txrx_8; - else if (bits_per_word <= 16) - cs->txrx_bufs = bitbang_txrx_16; - else if (bits_per_word <= 32) - cs->txrx_bufs = bitbang_txrx_32; - else - return -EINVAL; - - /* nsecs = (clock period)/2 */ - if (!hz) - hz = spi->max_speed_hz; - if (hz) { - cs->nsecs = (1000000000/2) / hz; - if (cs->nsecs > (MAX_UDELAY_MS * 1000 * 1000)) - return -EINVAL; - } - - return 0; -} -EXPORT_SYMBOL_GPL(spi_bitbang_setup_transfer); - -/** - * spi_bitbang_setup - default setup for per-word I/O loops - */ -int spi_bitbang_setup(struct spi_device *spi) -{ - struct spi_bitbang_cs *cs = spi->controller_state; - struct spi_bitbang *bitbang; - int retval; - unsigned long flags; - - bitbang = spi_master_get_devdata(spi->master); - - if (!cs) { - cs = kzalloc(sizeof *cs, GFP_KERNEL); - if (!cs) - return -ENOMEM; - spi->controller_state = cs; - } - - /* per-word shift register access, in hardware or bitbanging */ - cs->txrx_word = bitbang->txrx_word[spi->mode & (SPI_CPOL|SPI_CPHA)]; - if (!cs->txrx_word) - return -EINVAL; - - retval = bitbang->setup_transfer(spi, NULL); - if (retval < 0) - return retval; - - dev_dbg(&spi->dev, "%s, %u nsec/bit\n", __func__, 2 * cs->nsecs); - - /* NOTE we _need_ to call chipselect() early, ideally with adapter - * setup, unless the hardware defaults cooperate to avoid confusion - * between normal (active low) and inverted chipselects. - */ - - /* deselect chip (low or high) */ - spin_lock_irqsave(&bitbang->lock, flags); - if (!bitbang->busy) { - bitbang->chipselect(spi, BITBANG_CS_INACTIVE); - ndelay(cs->nsecs); - } - spin_unlock_irqrestore(&bitbang->lock, flags); - - return 0; -} -EXPORT_SYMBOL_GPL(spi_bitbang_setup); - -/** - * spi_bitbang_cleanup - default cleanup for per-word I/O loops - */ -void spi_bitbang_cleanup(struct spi_device *spi) -{ - kfree(spi->controller_state); -} -EXPORT_SYMBOL_GPL(spi_bitbang_cleanup); - -static int spi_bitbang_bufs(struct spi_device *spi, struct spi_transfer *t) -{ - struct spi_bitbang_cs *cs = spi->controller_state; - unsigned nsecs = cs->nsecs; - - return cs->txrx_bufs(spi, cs->txrx_word, nsecs, t); -} - -/*----------------------------------------------------------------------*/ - -/* - * SECOND PART ... simple transfer queue runner. - * - * This costs a task context per controller, running the queue by - * performing each transfer in sequence. Smarter hardware can queue - * several DMA transfers at once, and process several controller queues - * in parallel; this driver doesn't match such hardware very well. - * - * Drivers can provide word-at-a-time i/o primitives, or provide - * transfer-at-a-time ones to leverage dma or fifo hardware. - */ -static void bitbang_work(struct work_struct *work) -{ - struct spi_bitbang *bitbang = - container_of(work, struct spi_bitbang, work); - unsigned long flags; - - spin_lock_irqsave(&bitbang->lock, flags); - bitbang->busy = 1; - while (!list_empty(&bitbang->queue)) { - struct spi_message *m; - struct spi_device *spi; - unsigned nsecs; - struct spi_transfer *t = NULL; - unsigned tmp; - unsigned cs_change; - int status; - int do_setup = -1; - - m = container_of(bitbang->queue.next, struct spi_message, - queue); - list_del_init(&m->queue); - spin_unlock_irqrestore(&bitbang->lock, flags); - - /* FIXME this is made-up ... the correct value is known to - * word-at-a-time bitbang code, and presumably chipselect() - * should enforce these requirements too? - */ - nsecs = 100; - - spi = m->spi; - tmp = 0; - cs_change = 1; - status = 0; - - list_for_each_entry (t, &m->transfers, transfer_list) { - - /* override speed or wordsize? */ - if (t->speed_hz || t->bits_per_word) - do_setup = 1; - - /* init (-1) or override (1) transfer params */ - if (do_setup != 0) { - status = bitbang->setup_transfer(spi, t); - if (status < 0) - break; - if (do_setup == -1) - do_setup = 0; - } - - /* set up default clock polarity, and activate chip; - * this implicitly updates clock and spi modes as - * previously recorded for this device via setup(). - * (and also deselects any other chip that might be - * selected ...) - */ - if (cs_change) { - bitbang->chipselect(spi, BITBANG_CS_ACTIVE); - ndelay(nsecs); - } - cs_change = t->cs_change; - if (!t->tx_buf && !t->rx_buf && t->len) { - status = -EINVAL; - break; - } - - /* transfer data. the lower level code handles any - * new dma mappings it needs. our caller always gave - * us dma-safe buffers. - */ - if (t->len) { - /* REVISIT dma API still needs a designated - * DMA_ADDR_INVALID; ~0 might be better. - */ - if (!m->is_dma_mapped) - t->rx_dma = t->tx_dma = 0; - status = bitbang->txrx_bufs(spi, t); - } - if (status > 0) - m->actual_length += status; - if (status != t->len) { - /* always report some kind of error */ - if (status >= 0) - status = -EREMOTEIO; - break; - } - status = 0; - - /* protocol tweaks before next transfer */ - if (t->delay_usecs) - udelay(t->delay_usecs); - - if (!cs_change) - continue; - if (t->transfer_list.next == &m->transfers) - break; - - /* sometimes a short mid-message deselect of the chip - * may be needed to terminate a mode or command - */ - ndelay(nsecs); - bitbang->chipselect(spi, BITBANG_CS_INACTIVE); - ndelay(nsecs); - } - - m->status = status; - m->complete(m->context); - - /* normally deactivate chipselect ... unless no error and - * cs_change has hinted that the next message will probably - * be for this chip too. - */ - if (!(status == 0 && cs_change)) { - ndelay(nsecs); - bitbang->chipselect(spi, BITBANG_CS_INACTIVE); - ndelay(nsecs); - } - - spin_lock_irqsave(&bitbang->lock, flags); - } - bitbang->busy = 0; - spin_unlock_irqrestore(&bitbang->lock, flags); -} - -/** - * spi_bitbang_transfer - default submit to transfer queue - */ -int spi_bitbang_transfer(struct spi_device *spi, struct spi_message *m) -{ - struct spi_bitbang *bitbang; - unsigned long flags; - int status = 0; - - m->actual_length = 0; - m->status = -EINPROGRESS; - - bitbang = spi_master_get_devdata(spi->master); - - spin_lock_irqsave(&bitbang->lock, flags); - if (!spi->max_speed_hz) - status = -ENETDOWN; - else { - list_add_tail(&m->queue, &bitbang->queue); - queue_work(bitbang->workqueue, &bitbang->work); - } - spin_unlock_irqrestore(&bitbang->lock, flags); - - return status; -} -EXPORT_SYMBOL_GPL(spi_bitbang_transfer); - -/*----------------------------------------------------------------------*/ - -/** - * spi_bitbang_start - start up a polled/bitbanging SPI master driver - * @bitbang: driver handle - * - * Caller should have zero-initialized all parts of the structure, and then - * provided callbacks for chip selection and I/O loops. If the master has - * a transfer method, its final step should call spi_bitbang_transfer; or, - * that's the default if the transfer routine is not initialized. It should - * also set up the bus number and number of chipselects. - * - * For i/o loops, provide callbacks either per-word (for bitbanging, or for - * hardware that basically exposes a shift register) or per-spi_transfer - * (which takes better advantage of hardware like fifos or DMA engines). - * - * Drivers using per-word I/O loops should use (or call) spi_bitbang_setup, - * spi_bitbang_cleanup and spi_bitbang_setup_transfer to handle those spi - * master methods. Those methods are the defaults if the bitbang->txrx_bufs - * routine isn't initialized. - * - * This routine registers the spi_master, which will process requests in a - * dedicated task, keeping IRQs unblocked most of the time. To stop - * processing those requests, call spi_bitbang_stop(). - */ -int spi_bitbang_start(struct spi_bitbang *bitbang) -{ - int status; - - if (!bitbang->master || !bitbang->chipselect) - return -EINVAL; - - INIT_WORK(&bitbang->work, bitbang_work); - spin_lock_init(&bitbang->lock); - INIT_LIST_HEAD(&bitbang->queue); - - if (!bitbang->master->mode_bits) - bitbang->master->mode_bits = SPI_CPOL | SPI_CPHA | bitbang->flags; - - if (!bitbang->master->transfer) - bitbang->master->transfer = spi_bitbang_transfer; - if (!bitbang->txrx_bufs) { - bitbang->use_dma = 0; - bitbang->txrx_bufs = spi_bitbang_bufs; - if (!bitbang->master->setup) { - if (!bitbang->setup_transfer) - bitbang->setup_transfer = - spi_bitbang_setup_transfer; - bitbang->master->setup = spi_bitbang_setup; - bitbang->master->cleanup = spi_bitbang_cleanup; - } - } else if (!bitbang->master->setup) - return -EINVAL; - if (bitbang->master->transfer == spi_bitbang_transfer && - !bitbang->setup_transfer) - return -EINVAL; - - /* this task is the only thing to touch the SPI bits */ - bitbang->busy = 0; - bitbang->workqueue = create_singlethread_workqueue( - dev_name(bitbang->master->dev.parent)); - if (bitbang->workqueue == NULL) { - status = -EBUSY; - goto err1; - } - - /* driver may get busy before register() returns, especially - * if someone registered boardinfo for devices - */ - status = spi_register_master(bitbang->master); - if (status < 0) - goto err2; - - return status; - -err2: - destroy_workqueue(bitbang->workqueue); -err1: - return status; -} -EXPORT_SYMBOL_GPL(spi_bitbang_start); - -/** - * spi_bitbang_stop - stops the task providing spi communication - */ -int spi_bitbang_stop(struct spi_bitbang *bitbang) -{ - spi_unregister_master(bitbang->master); - - WARN_ON(!list_empty(&bitbang->queue)); - - destroy_workqueue(bitbang->workqueue); - - return 0; -} -EXPORT_SYMBOL_GPL(spi_bitbang_stop); - -MODULE_LICENSE("GPL"); - diff --git a/drivers/spi/spi_bitbang_txrx.h b/drivers/spi/spi_bitbang_txrx.h deleted file mode 100644 index c16bf85..0000000 --- a/drivers/spi/spi_bitbang_txrx.h +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Mix this utility code with some glue code to get one of several types of - * simple SPI master driver. Two do polled word-at-a-time I/O: - * - * - GPIO/parport bitbangers. Provide chipselect() and txrx_word[](), - * expanding the per-word routines from the inline templates below. - * - * - Drivers for controllers resembling bare shift registers. Provide - * chipselect() and txrx_word[](), with custom setup()/cleanup() methods - * that use your controller's clock and chipselect registers. - * - * Some hardware works well with requests at spi_transfer scope: - * - * - Drivers leveraging smarter hardware, with fifos or DMA; or for half - * duplex (MicroWire) controllers. Provide chipselect() and txrx_bufs(), - * and custom setup()/cleanup() methods. - */ - -/* - * The code that knows what GPIO pins do what should have declared four - * functions, ideally as inlines, before including this header: - * - * void setsck(struct spi_device *, int is_on); - * void setmosi(struct spi_device *, int is_on); - * int getmiso(struct spi_device *); - * void spidelay(unsigned); - * - * setsck()'s is_on parameter is a zero/nonzero boolean. - * - * setmosi()'s is_on parameter is a zero/nonzero boolean. - * - * getmiso() is required to return 0 or 1 only. Any other value is invalid - * and will result in improper operation. - * - * A non-inlined routine would call bitbang_txrx_*() routines. The - * main loop could easily compile down to a handful of instructions, - * especially if the delay is a NOP (to run at peak speed). - * - * Since this is software, the timings may not be exactly what your board's - * chips need ... there may be several reasons you'd need to tweak timings - * in these routines, not just make to make it faster or slower to match a - * particular CPU clock rate. - */ - -static inline u32 -bitbang_txrx_be_cpha0(struct spi_device *spi, - unsigned nsecs, unsigned cpol, unsigned flags, - u32 word, u8 bits) -{ - /* if (cpol == 0) this is SPI_MODE_0; else this is SPI_MODE_2 */ - - /* clock starts at inactive polarity */ - for (word <<= (32 - bits); likely(bits); bits--) { - - /* setup MSB (to slave) on trailing edge */ - if ((flags & SPI_MASTER_NO_TX) == 0) - setmosi(spi, word & (1 << 31)); - spidelay(nsecs); /* T(setup) */ - - setsck(spi, !cpol); - spidelay(nsecs); - - /* sample MSB (from slave) on leading edge */ - word <<= 1; - if ((flags & SPI_MASTER_NO_RX) == 0) - word |= getmiso(spi); - setsck(spi, cpol); - } - return word; -} - -static inline u32 -bitbang_txrx_be_cpha1(struct spi_device *spi, - unsigned nsecs, unsigned cpol, unsigned flags, - u32 word, u8 bits) -{ - /* if (cpol == 0) this is SPI_MODE_1; else this is SPI_MODE_3 */ - - /* clock starts at inactive polarity */ - for (word <<= (32 - bits); likely(bits); bits--) { - - /* setup MSB (to slave) on leading edge */ - setsck(spi, !cpol); - if ((flags & SPI_MASTER_NO_TX) == 0) - setmosi(spi, word & (1 << 31)); - spidelay(nsecs); /* T(setup) */ - - setsck(spi, cpol); - spidelay(nsecs); - - /* sample MSB (from slave) on trailing edge */ - word <<= 1; - if ((flags & SPI_MASTER_NO_RX) == 0) - word |= getmiso(spi); - } - return word; -} diff --git a/drivers/spi/spi_butterfly.c b/drivers/spi/spi_butterfly.c deleted file mode 100644 index 0d4ceba..0000000 --- a/drivers/spi/spi_butterfly.c +++ /dev/null @@ -1,356 +0,0 @@ -/* - * spi_butterfly.c - parport-to-butterfly adapter - * - * Copyright (C) 2005 David Brownell - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#include - - -/* - * This uses SPI to talk with an "AVR Butterfly", which is a $US20 card - * with a battery powered AVR microcontroller and lots of goodies. You - * can use GCC to develop firmware for this. - * - * See Documentation/spi/butterfly for information about how to build - * and use this custom parallel port cable. - */ - - -/* DATA output bits (pins 2..9 == D0..D7) */ -#define butterfly_nreset (1 << 1) /* pin 3 */ - -#define spi_sck_bit (1 << 0) /* pin 2 */ -#define spi_mosi_bit (1 << 7) /* pin 9 */ - -#define vcc_bits ((1 << 6) | (1 << 5)) /* pins 7, 8 */ - -/* STATUS input bits */ -#define spi_miso_bit PARPORT_STATUS_BUSY /* pin 11 */ - -/* CONTROL output bits */ -#define spi_cs_bit PARPORT_CONTROL_SELECT /* pin 17 */ - - - -static inline struct butterfly *spidev_to_pp(struct spi_device *spi) -{ - return spi->controller_data; -} - - -struct butterfly { - /* REVISIT ... for now, this must be first */ - struct spi_bitbang bitbang; - - struct parport *port; - struct pardevice *pd; - - u8 lastbyte; - - struct spi_device *dataflash; - struct spi_device *butterfly; - struct spi_board_info info[2]; - -}; - -/*----------------------------------------------------------------------*/ - -static inline void -setsck(struct spi_device *spi, int is_on) -{ - struct butterfly *pp = spidev_to_pp(spi); - u8 bit, byte = pp->lastbyte; - - bit = spi_sck_bit; - - if (is_on) - byte |= bit; - else - byte &= ~bit; - parport_write_data(pp->port, byte); - pp->lastbyte = byte; -} - -static inline void -setmosi(struct spi_device *spi, int is_on) -{ - struct butterfly *pp = spidev_to_pp(spi); - u8 bit, byte = pp->lastbyte; - - bit = spi_mosi_bit; - - if (is_on) - byte |= bit; - else - byte &= ~bit; - parport_write_data(pp->port, byte); - pp->lastbyte = byte; -} - -static inline int getmiso(struct spi_device *spi) -{ - struct butterfly *pp = spidev_to_pp(spi); - int value; - u8 bit; - - bit = spi_miso_bit; - - /* only STATUS_BUSY is NOT negated */ - value = !(parport_read_status(pp->port) & bit); - return (bit == PARPORT_STATUS_BUSY) ? value : !value; -} - -static void butterfly_chipselect(struct spi_device *spi, int value) -{ - struct butterfly *pp = spidev_to_pp(spi); - - /* set default clock polarity */ - if (value != BITBANG_CS_INACTIVE) - setsck(spi, spi->mode & SPI_CPOL); - - /* here, value == "activate or not"; - * most PARPORT_CONTROL_* bits are negated, so we must - * morph it to value == "bit value to write in control register" - */ - if (spi_cs_bit == PARPORT_CONTROL_INIT) - value = !value; - - parport_frob_control(pp->port, spi_cs_bit, value ? spi_cs_bit : 0); -} - - -/* we only needed to implement one mode here, and choose SPI_MODE_0 */ - -#define spidelay(X) do{}while(0) -//#define spidelay ndelay - -#include "spi_bitbang_txrx.h" - -static u32 -butterfly_txrx_word_mode0(struct spi_device *spi, - unsigned nsecs, - u32 word, u8 bits) -{ - return bitbang_txrx_be_cpha0(spi, nsecs, 0, 0, word, bits); -} - -/*----------------------------------------------------------------------*/ - -/* override default partitioning with cmdlinepart */ -static struct mtd_partition partitions[] = { { - /* JFFS2 wants partitions of 4*N blocks for this device, - * so sectors 0 and 1 can't be partitions by themselves. - */ - - /* sector 0 = 8 pages * 264 bytes/page (1 block) - * sector 1 = 248 pages * 264 bytes/page - */ - .name = "bookkeeping", // 66 KB - .offset = 0, - .size = (8 + 248) * 264, -// .mask_flags = MTD_WRITEABLE, -}, { - /* sector 2 = 256 pages * 264 bytes/page - * sectors 3-5 = 512 pages * 264 bytes/page - */ - .name = "filesystem", // 462 KB - .offset = MTDPART_OFS_APPEND, - .size = MTDPART_SIZ_FULL, -} }; - -static struct flash_platform_data flash = { - .name = "butterflash", - .parts = partitions, - .nr_parts = ARRAY_SIZE(partitions), -}; - - -/* REVISIT remove this ugly global and its "only one" limitation */ -static struct butterfly *butterfly; - -static void butterfly_attach(struct parport *p) -{ - struct pardevice *pd; - int status; - struct butterfly *pp; - struct spi_master *master; - struct device *dev = p->physport->dev; - - if (butterfly || !dev) - return; - - /* REVISIT: this just _assumes_ a butterfly is there ... no probe, - * and no way to be selective about what it binds to. - */ - - master = spi_alloc_master(dev, sizeof *pp); - if (!master) { - status = -ENOMEM; - goto done; - } - pp = spi_master_get_devdata(master); - - /* - * SPI and bitbang hookup - * - * use default setup(), cleanup(), and transfer() methods; and - * only bother implementing mode 0. Start it later. - */ - master->bus_num = 42; - master->num_chipselect = 2; - - pp->bitbang.master = spi_master_get(master); - pp->bitbang.chipselect = butterfly_chipselect; - pp->bitbang.txrx_word[SPI_MODE_0] = butterfly_txrx_word_mode0; - - /* - * parport hookup - */ - pp->port = p; - pd = parport_register_device(p, "spi_butterfly", - NULL, NULL, NULL, - 0 /* FLAGS */, pp); - if (!pd) { - status = -ENOMEM; - goto clean0; - } - pp->pd = pd; - - status = parport_claim(pd); - if (status < 0) - goto clean1; - - /* - * Butterfly reset, powerup, run firmware - */ - pr_debug("%s: powerup/reset Butterfly\n", p->name); - - /* nCS for dataflash (this bit is inverted on output) */ - parport_frob_control(pp->port, spi_cs_bit, 0); - - /* stabilize power with chip in reset (nRESET), and - * spi_sck_bit clear (CPOL=0) - */ - pp->lastbyte |= vcc_bits; - parport_write_data(pp->port, pp->lastbyte); - msleep(5); - - /* take it out of reset; assume long reset delay */ - pp->lastbyte |= butterfly_nreset; - parport_write_data(pp->port, pp->lastbyte); - msleep(100); - - - /* - * Start SPI ... for now, hide that we're two physical busses. - */ - status = spi_bitbang_start(&pp->bitbang); - if (status < 0) - goto clean2; - - /* Bus 1 lets us talk to at45db041b (firmware disables AVR SPI), AVR - * (firmware resets at45, acts as spi slave) or neither (we ignore - * both, AVR uses AT45). Here we expect firmware for the first option. - */ - - pp->info[0].max_speed_hz = 15 * 1000 * 1000; - strcpy(pp->info[0].modalias, "mtd_dataflash"); - pp->info[0].platform_data = &flash; - pp->info[0].chip_select = 1; - pp->info[0].controller_data = pp; - pp->dataflash = spi_new_device(pp->bitbang.master, &pp->info[0]); - if (pp->dataflash) - pr_debug("%s: dataflash at %s\n", p->name, - dev_name(&pp->dataflash->dev)); - - // dev_info(_what?_, ...) - pr_info("%s: AVR Butterfly\n", p->name); - butterfly = pp; - return; - -clean2: - /* turn off VCC */ - parport_write_data(pp->port, 0); - - parport_release(pp->pd); -clean1: - parport_unregister_device(pd); -clean0: - (void) spi_master_put(pp->bitbang.master); -done: - pr_debug("%s: butterfly probe, fail %d\n", p->name, status); -} - -static void butterfly_detach(struct parport *p) -{ - struct butterfly *pp; - int status; - - /* FIXME this global is ugly ... but, how to quickly get from - * the parport to the "struct butterfly" associated with it? - * "old school" driver-internal device lists? - */ - if (!butterfly || butterfly->port != p) - return; - pp = butterfly; - butterfly = NULL; - - /* stop() unregisters child devices too */ - status = spi_bitbang_stop(&pp->bitbang); - - /* turn off VCC */ - parport_write_data(pp->port, 0); - msleep(10); - - parport_release(pp->pd); - parport_unregister_device(pp->pd); - - (void) spi_master_put(pp->bitbang.master); -} - -static struct parport_driver butterfly_driver = { - .name = "spi_butterfly", - .attach = butterfly_attach, - .detach = butterfly_detach, -}; - - -static int __init butterfly_init(void) -{ - return parport_register_driver(&butterfly_driver); -} -device_initcall(butterfly_init); - -static void __exit butterfly_exit(void) -{ - parport_unregister_driver(&butterfly_driver); -} -module_exit(butterfly_exit); - -MODULE_DESCRIPTION("Parport Adapter driver for AVR Butterfly"); -MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi_fsl_espi.c b/drivers/spi/spi_fsl_espi.c deleted file mode 100644 index 496f895..0000000 --- a/drivers/spi/spi_fsl_espi.c +++ /dev/null @@ -1,762 +0,0 @@ -/* - * Freescale eSPI controller driver. - * - * Copyright 2010 Freescale Semiconductor, Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "spi_fsl_lib.h" - -/* eSPI Controller registers */ -struct fsl_espi_reg { - __be32 mode; /* 0x000 - eSPI mode register */ - __be32 event; /* 0x004 - eSPI event register */ - __be32 mask; /* 0x008 - eSPI mask register */ - __be32 command; /* 0x00c - eSPI command register */ - __be32 transmit; /* 0x010 - eSPI transmit FIFO access register*/ - __be32 receive; /* 0x014 - eSPI receive FIFO access register*/ - u8 res[8]; /* 0x018 - 0x01c reserved */ - __be32 csmode[4]; /* 0x020 - 0x02c eSPI cs mode register */ -}; - -struct fsl_espi_transfer { - const void *tx_buf; - void *rx_buf; - unsigned len; - unsigned n_tx; - unsigned n_rx; - unsigned actual_length; - int status; -}; - -/* eSPI Controller mode register definitions */ -#define SPMODE_ENABLE (1 << 31) -#define SPMODE_LOOP (1 << 30) -#define SPMODE_TXTHR(x) ((x) << 8) -#define SPMODE_RXTHR(x) ((x) << 0) - -/* eSPI Controller CS mode register definitions */ -#define CSMODE_CI_INACTIVEHIGH (1 << 31) -#define CSMODE_CP_BEGIN_EDGECLK (1 << 30) -#define CSMODE_REV (1 << 29) -#define CSMODE_DIV16 (1 << 28) -#define CSMODE_PM(x) ((x) << 24) -#define CSMODE_POL_1 (1 << 20) -#define CSMODE_LEN(x) ((x) << 16) -#define CSMODE_BEF(x) ((x) << 12) -#define CSMODE_AFT(x) ((x) << 8) -#define CSMODE_CG(x) ((x) << 3) - -/* Default mode/csmode for eSPI controller */ -#define SPMODE_INIT_VAL (SPMODE_TXTHR(4) | SPMODE_RXTHR(3)) -#define CSMODE_INIT_VAL (CSMODE_POL_1 | CSMODE_BEF(0) \ - | CSMODE_AFT(0) | CSMODE_CG(1)) - -/* SPIE register values */ -#define SPIE_NE 0x00000200 /* Not empty */ -#define SPIE_NF 0x00000100 /* Not full */ - -/* SPIM register values */ -#define SPIM_NE 0x00000200 /* Not empty */ -#define SPIM_NF 0x00000100 /* Not full */ -#define SPIE_RXCNT(reg) ((reg >> 24) & 0x3F) -#define SPIE_TXCNT(reg) ((reg >> 16) & 0x3F) - -/* SPCOM register values */ -#define SPCOM_CS(x) ((x) << 30) -#define SPCOM_TRANLEN(x) ((x) << 0) -#define SPCOM_TRANLEN_MAX 0xFFFF /* Max transaction length */ - -static void fsl_espi_change_mode(struct spi_device *spi) -{ - struct mpc8xxx_spi *mspi = spi_master_get_devdata(spi->master); - struct spi_mpc8xxx_cs *cs = spi->controller_state; - struct fsl_espi_reg *reg_base = mspi->reg_base; - __be32 __iomem *mode = ®_base->csmode[spi->chip_select]; - __be32 __iomem *espi_mode = ®_base->mode; - u32 tmp; - unsigned long flags; - - /* Turn off IRQs locally to minimize time that SPI is disabled. */ - local_irq_save(flags); - - /* Turn off SPI unit prior changing mode */ - tmp = mpc8xxx_spi_read_reg(espi_mode); - mpc8xxx_spi_write_reg(espi_mode, tmp & ~SPMODE_ENABLE); - mpc8xxx_spi_write_reg(mode, cs->hw_mode); - mpc8xxx_spi_write_reg(espi_mode, tmp); - - local_irq_restore(flags); -} - -static u32 fsl_espi_tx_buf_lsb(struct mpc8xxx_spi *mpc8xxx_spi) -{ - u32 data; - u16 data_h; - u16 data_l; - const u32 *tx = mpc8xxx_spi->tx; - - if (!tx) - return 0; - - data = *tx++ << mpc8xxx_spi->tx_shift; - data_l = data & 0xffff; - data_h = (data >> 16) & 0xffff; - swab16s(&data_l); - swab16s(&data_h); - data = data_h | data_l; - - mpc8xxx_spi->tx = tx; - return data; -} - -static int fsl_espi_setup_transfer(struct spi_device *spi, - struct spi_transfer *t) -{ - struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); - int bits_per_word = 0; - u8 pm; - u32 hz = 0; - struct spi_mpc8xxx_cs *cs = spi->controller_state; - - if (t) { - bits_per_word = t->bits_per_word; - hz = t->speed_hz; - } - - /* spi_transfer level calls that work per-word */ - if (!bits_per_word) - bits_per_word = spi->bits_per_word; - - /* Make sure its a bit width we support [4..16] */ - if ((bits_per_word < 4) || (bits_per_word > 16)) - return -EINVAL; - - if (!hz) - hz = spi->max_speed_hz; - - cs->rx_shift = 0; - cs->tx_shift = 0; - cs->get_rx = mpc8xxx_spi_rx_buf_u32; - cs->get_tx = mpc8xxx_spi_tx_buf_u32; - if (bits_per_word <= 8) { - cs->rx_shift = 8 - bits_per_word; - } else if (bits_per_word <= 16) { - cs->rx_shift = 16 - bits_per_word; - if (spi->mode & SPI_LSB_FIRST) - cs->get_tx = fsl_espi_tx_buf_lsb; - } else { - return -EINVAL; - } - - mpc8xxx_spi->rx_shift = cs->rx_shift; - mpc8xxx_spi->tx_shift = cs->tx_shift; - mpc8xxx_spi->get_rx = cs->get_rx; - mpc8xxx_spi->get_tx = cs->get_tx; - - bits_per_word = bits_per_word - 1; - - /* mask out bits we are going to set */ - cs->hw_mode &= ~(CSMODE_LEN(0xF) | CSMODE_DIV16 | CSMODE_PM(0xF)); - - cs->hw_mode |= CSMODE_LEN(bits_per_word); - - if ((mpc8xxx_spi->spibrg / hz) > 64) { - cs->hw_mode |= CSMODE_DIV16; - pm = (mpc8xxx_spi->spibrg - 1) / (hz * 64) + 1; - - WARN_ONCE(pm > 16, "%s: Requested speed is too low: %d Hz. " - "Will use %d Hz instead.\n", dev_name(&spi->dev), - hz, mpc8xxx_spi->spibrg / 1024); - if (pm > 16) - pm = 16; - } else { - pm = (mpc8xxx_spi->spibrg - 1) / (hz * 4) + 1; - } - if (pm) - pm--; - - cs->hw_mode |= CSMODE_PM(pm); - - fsl_espi_change_mode(spi); - return 0; -} - -static int fsl_espi_cpu_bufs(struct mpc8xxx_spi *mspi, struct spi_transfer *t, - unsigned int len) -{ - u32 word; - struct fsl_espi_reg *reg_base = mspi->reg_base; - - mspi->count = len; - - /* enable rx ints */ - mpc8xxx_spi_write_reg(®_base->mask, SPIM_NE); - - /* transmit word */ - word = mspi->get_tx(mspi); - mpc8xxx_spi_write_reg(®_base->transmit, word); - - return 0; -} - -static int fsl_espi_bufs(struct spi_device *spi, struct spi_transfer *t) -{ - struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); - struct fsl_espi_reg *reg_base = mpc8xxx_spi->reg_base; - unsigned int len = t->len; - u8 bits_per_word; - int ret; - - bits_per_word = spi->bits_per_word; - if (t->bits_per_word) - bits_per_word = t->bits_per_word; - - mpc8xxx_spi->len = t->len; - len = roundup(len, 4) / 4; - - mpc8xxx_spi->tx = t->tx_buf; - mpc8xxx_spi->rx = t->rx_buf; - - INIT_COMPLETION(mpc8xxx_spi->done); - - /* Set SPCOM[CS] and SPCOM[TRANLEN] field */ - if ((t->len - 1) > SPCOM_TRANLEN_MAX) { - dev_err(mpc8xxx_spi->dev, "Transaction length (%d)" - " beyond the SPCOM[TRANLEN] field\n", t->len); - return -EINVAL; - } - mpc8xxx_spi_write_reg(®_base->command, - (SPCOM_CS(spi->chip_select) | SPCOM_TRANLEN(t->len - 1))); - - ret = fsl_espi_cpu_bufs(mpc8xxx_spi, t, len); - if (ret) - return ret; - - wait_for_completion(&mpc8xxx_spi->done); - - /* disable rx ints */ - mpc8xxx_spi_write_reg(®_base->mask, 0); - - return mpc8xxx_spi->count; -} - -static inline void fsl_espi_addr2cmd(unsigned int addr, u8 *cmd) -{ - if (cmd) { - cmd[1] = (u8)(addr >> 16); - cmd[2] = (u8)(addr >> 8); - cmd[3] = (u8)(addr >> 0); - } -} - -static inline unsigned int fsl_espi_cmd2addr(u8 *cmd) -{ - if (cmd) - return cmd[1] << 16 | cmd[2] << 8 | cmd[3] << 0; - - return 0; -} - -static void fsl_espi_do_trans(struct spi_message *m, - struct fsl_espi_transfer *tr) -{ - struct spi_device *spi = m->spi; - struct mpc8xxx_spi *mspi = spi_master_get_devdata(spi->master); - struct fsl_espi_transfer *espi_trans = tr; - struct spi_message message; - struct spi_transfer *t, *first, trans; - int status = 0; - - spi_message_init(&message); - memset(&trans, 0, sizeof(trans)); - - first = list_first_entry(&m->transfers, struct spi_transfer, - transfer_list); - list_for_each_entry(t, &m->transfers, transfer_list) { - if ((first->bits_per_word != t->bits_per_word) || - (first->speed_hz != t->speed_hz)) { - espi_trans->status = -EINVAL; - dev_err(mspi->dev, "bits_per_word/speed_hz should be" - " same for the same SPI transfer\n"); - return; - } - - trans.speed_hz = t->speed_hz; - trans.bits_per_word = t->bits_per_word; - trans.delay_usecs = max(first->delay_usecs, t->delay_usecs); - } - - trans.len = espi_trans->len; - trans.tx_buf = espi_trans->tx_buf; - trans.rx_buf = espi_trans->rx_buf; - spi_message_add_tail(&trans, &message); - - list_for_each_entry(t, &message.transfers, transfer_list) { - if (t->bits_per_word || t->speed_hz) { - status = -EINVAL; - - status = fsl_espi_setup_transfer(spi, t); - if (status < 0) - break; - } - - if (t->len) - status = fsl_espi_bufs(spi, t); - - if (status) { - status = -EMSGSIZE; - break; - } - - if (t->delay_usecs) - udelay(t->delay_usecs); - } - - espi_trans->status = status; - fsl_espi_setup_transfer(spi, NULL); -} - -static void fsl_espi_cmd_trans(struct spi_message *m, - struct fsl_espi_transfer *trans, u8 *rx_buff) -{ - struct spi_transfer *t; - u8 *local_buf; - int i = 0; - struct fsl_espi_transfer *espi_trans = trans; - - local_buf = kzalloc(SPCOM_TRANLEN_MAX, GFP_KERNEL); - if (!local_buf) { - espi_trans->status = -ENOMEM; - return; - } - - list_for_each_entry(t, &m->transfers, transfer_list) { - if (t->tx_buf) { - memcpy(local_buf + i, t->tx_buf, t->len); - i += t->len; - } - } - - espi_trans->tx_buf = local_buf; - espi_trans->rx_buf = local_buf + espi_trans->n_tx; - fsl_espi_do_trans(m, espi_trans); - - espi_trans->actual_length = espi_trans->len; - kfree(local_buf); -} - -static void fsl_espi_rw_trans(struct spi_message *m, - struct fsl_espi_transfer *trans, u8 *rx_buff) -{ - struct fsl_espi_transfer *espi_trans = trans; - unsigned int n_tx = espi_trans->n_tx; - unsigned int n_rx = espi_trans->n_rx; - struct spi_transfer *t; - u8 *local_buf; - u8 *rx_buf = rx_buff; - unsigned int trans_len; - unsigned int addr; - int i, pos, loop; - - local_buf = kzalloc(SPCOM_TRANLEN_MAX, GFP_KERNEL); - if (!local_buf) { - espi_trans->status = -ENOMEM; - return; - } - - for (pos = 0, loop = 0; pos < n_rx; pos += trans_len, loop++) { - trans_len = n_rx - pos; - if (trans_len > SPCOM_TRANLEN_MAX - n_tx) - trans_len = SPCOM_TRANLEN_MAX - n_tx; - - i = 0; - list_for_each_entry(t, &m->transfers, transfer_list) { - if (t->tx_buf) { - memcpy(local_buf + i, t->tx_buf, t->len); - i += t->len; - } - } - - if (pos > 0) { - addr = fsl_espi_cmd2addr(local_buf); - addr += pos; - fsl_espi_addr2cmd(addr, local_buf); - } - - espi_trans->n_tx = n_tx; - espi_trans->n_rx = trans_len; - espi_trans->len = trans_len + n_tx; - espi_trans->tx_buf = local_buf; - espi_trans->rx_buf = local_buf + n_tx; - fsl_espi_do_trans(m, espi_trans); - - memcpy(rx_buf + pos, espi_trans->rx_buf + n_tx, trans_len); - - if (loop > 0) - espi_trans->actual_length += espi_trans->len - n_tx; - else - espi_trans->actual_length += espi_trans->len; - } - - kfree(local_buf); -} - -static void fsl_espi_do_one_msg(struct spi_message *m) -{ - struct spi_transfer *t; - u8 *rx_buf = NULL; - unsigned int n_tx = 0; - unsigned int n_rx = 0; - struct fsl_espi_transfer espi_trans; - - list_for_each_entry(t, &m->transfers, transfer_list) { - if (t->tx_buf) - n_tx += t->len; - if (t->rx_buf) { - n_rx += t->len; - rx_buf = t->rx_buf; - } - } - - espi_trans.n_tx = n_tx; - espi_trans.n_rx = n_rx; - espi_trans.len = n_tx + n_rx; - espi_trans.actual_length = 0; - espi_trans.status = 0; - - if (!rx_buf) - fsl_espi_cmd_trans(m, &espi_trans, NULL); - else - fsl_espi_rw_trans(m, &espi_trans, rx_buf); - - m->actual_length = espi_trans.actual_length; - m->status = espi_trans.status; - m->complete(m->context); -} - -static int fsl_espi_setup(struct spi_device *spi) -{ - struct mpc8xxx_spi *mpc8xxx_spi; - struct fsl_espi_reg *reg_base; - int retval; - u32 hw_mode; - u32 loop_mode; - struct spi_mpc8xxx_cs *cs = spi->controller_state; - - if (!spi->max_speed_hz) - return -EINVAL; - - if (!cs) { - cs = kzalloc(sizeof *cs, GFP_KERNEL); - if (!cs) - return -ENOMEM; - spi->controller_state = cs; - } - - mpc8xxx_spi = spi_master_get_devdata(spi->master); - reg_base = mpc8xxx_spi->reg_base; - - hw_mode = cs->hw_mode; /* Save original settings */ - cs->hw_mode = mpc8xxx_spi_read_reg( - ®_base->csmode[spi->chip_select]); - /* mask out bits we are going to set */ - cs->hw_mode &= ~(CSMODE_CP_BEGIN_EDGECLK | CSMODE_CI_INACTIVEHIGH - | CSMODE_REV); - - if (spi->mode & SPI_CPHA) - cs->hw_mode |= CSMODE_CP_BEGIN_EDGECLK; - if (spi->mode & SPI_CPOL) - cs->hw_mode |= CSMODE_CI_INACTIVEHIGH; - if (!(spi->mode & SPI_LSB_FIRST)) - cs->hw_mode |= CSMODE_REV; - - /* Handle the loop mode */ - loop_mode = mpc8xxx_spi_read_reg(®_base->mode); - loop_mode &= ~SPMODE_LOOP; - if (spi->mode & SPI_LOOP) - loop_mode |= SPMODE_LOOP; - mpc8xxx_spi_write_reg(®_base->mode, loop_mode); - - retval = fsl_espi_setup_transfer(spi, NULL); - if (retval < 0) { - cs->hw_mode = hw_mode; /* Restore settings */ - return retval; - } - return 0; -} - -void fsl_espi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events) -{ - struct fsl_espi_reg *reg_base = mspi->reg_base; - - /* We need handle RX first */ - if (events & SPIE_NE) { - u32 rx_data, tmp; - u8 rx_data_8; - - /* Spin until RX is done */ - while (SPIE_RXCNT(events) < min(4, mspi->len)) { - cpu_relax(); - events = mpc8xxx_spi_read_reg(®_base->event); - } - - if (mspi->len >= 4) { - rx_data = mpc8xxx_spi_read_reg(®_base->receive); - } else { - tmp = mspi->len; - rx_data = 0; - while (tmp--) { - rx_data_8 = in_8((u8 *)®_base->receive); - rx_data |= (rx_data_8 << (tmp * 8)); - } - - rx_data <<= (4 - mspi->len) * 8; - } - - mspi->len -= 4; - - if (mspi->rx) - mspi->get_rx(rx_data, mspi); - } - - if (!(events & SPIE_NF)) { - int ret; - - /* spin until TX is done */ - ret = spin_event_timeout(((events = mpc8xxx_spi_read_reg( - ®_base->event)) & SPIE_NF) == 0, 1000, 0); - if (!ret) { - dev_err(mspi->dev, "tired waiting for SPIE_NF\n"); - return; - } - } - - /* Clear the events */ - mpc8xxx_spi_write_reg(®_base->event, events); - - mspi->count -= 1; - if (mspi->count) { - u32 word = mspi->get_tx(mspi); - - mpc8xxx_spi_write_reg(®_base->transmit, word); - } else { - complete(&mspi->done); - } -} - -static irqreturn_t fsl_espi_irq(s32 irq, void *context_data) -{ - struct mpc8xxx_spi *mspi = context_data; - struct fsl_espi_reg *reg_base = mspi->reg_base; - irqreturn_t ret = IRQ_NONE; - u32 events; - - /* Get interrupt events(tx/rx) */ - events = mpc8xxx_spi_read_reg(®_base->event); - if (events) - ret = IRQ_HANDLED; - - dev_vdbg(mspi->dev, "%s: events %x\n", __func__, events); - - fsl_espi_cpu_irq(mspi, events); - - return ret; -} - -static void fsl_espi_remove(struct mpc8xxx_spi *mspi) -{ - iounmap(mspi->reg_base); -} - -static struct spi_master * __devinit fsl_espi_probe(struct device *dev, - struct resource *mem, unsigned int irq) -{ - struct fsl_spi_platform_data *pdata = dev->platform_data; - struct spi_master *master; - struct mpc8xxx_spi *mpc8xxx_spi; - struct fsl_espi_reg *reg_base; - u32 regval; - int i, ret = 0; - - master = spi_alloc_master(dev, sizeof(struct mpc8xxx_spi)); - if (!master) { - ret = -ENOMEM; - goto err; - } - - dev_set_drvdata(dev, master); - - ret = mpc8xxx_spi_probe(dev, mem, irq); - if (ret) - goto err_probe; - - master->setup = fsl_espi_setup; - - mpc8xxx_spi = spi_master_get_devdata(master); - mpc8xxx_spi->spi_do_one_msg = fsl_espi_do_one_msg; - mpc8xxx_spi->spi_remove = fsl_espi_remove; - - mpc8xxx_spi->reg_base = ioremap(mem->start, resource_size(mem)); - if (!mpc8xxx_spi->reg_base) { - ret = -ENOMEM; - goto err_probe; - } - - reg_base = mpc8xxx_spi->reg_base; - - /* Register for SPI Interrupt */ - ret = request_irq(mpc8xxx_spi->irq, fsl_espi_irq, - 0, "fsl_espi", mpc8xxx_spi); - if (ret) - goto free_irq; - - if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) { - mpc8xxx_spi->rx_shift = 16; - mpc8xxx_spi->tx_shift = 24; - } - - /* SPI controller initializations */ - mpc8xxx_spi_write_reg(®_base->mode, 0); - mpc8xxx_spi_write_reg(®_base->mask, 0); - mpc8xxx_spi_write_reg(®_base->command, 0); - mpc8xxx_spi_write_reg(®_base->event, 0xffffffff); - - /* Init eSPI CS mode register */ - for (i = 0; i < pdata->max_chipselect; i++) - mpc8xxx_spi_write_reg(®_base->csmode[i], CSMODE_INIT_VAL); - - /* Enable SPI interface */ - regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE; - - mpc8xxx_spi_write_reg(®_base->mode, regval); - - ret = spi_register_master(master); - if (ret < 0) - goto unreg_master; - - dev_info(dev, "at 0x%p (irq = %d)\n", reg_base, mpc8xxx_spi->irq); - - return master; - -unreg_master: - free_irq(mpc8xxx_spi->irq, mpc8xxx_spi); -free_irq: - iounmap(mpc8xxx_spi->reg_base); -err_probe: - spi_master_put(master); -err: - return ERR_PTR(ret); -} - -static int of_fsl_espi_get_chipselects(struct device *dev) -{ - struct device_node *np = dev->of_node; - struct fsl_spi_platform_data *pdata = dev->platform_data; - const u32 *prop; - int len; - - prop = of_get_property(np, "fsl,espi-num-chipselects", &len); - if (!prop || len < sizeof(*prop)) { - dev_err(dev, "No 'fsl,espi-num-chipselects' property\n"); - return -EINVAL; - } - - pdata->max_chipselect = *prop; - pdata->cs_control = NULL; - - return 0; -} - -static int __devinit of_fsl_espi_probe(struct platform_device *ofdev) -{ - struct device *dev = &ofdev->dev; - struct device_node *np = ofdev->dev.of_node; - struct spi_master *master; - struct resource mem; - struct resource irq; - int ret = -ENOMEM; - - ret = of_mpc8xxx_spi_probe(ofdev); - if (ret) - return ret; - - ret = of_fsl_espi_get_chipselects(dev); - if (ret) - goto err; - - ret = of_address_to_resource(np, 0, &mem); - if (ret) - goto err; - - ret = of_irq_to_resource(np, 0, &irq); - if (!ret) { - ret = -EINVAL; - goto err; - } - - master = fsl_espi_probe(dev, &mem, irq.start); - if (IS_ERR(master)) { - ret = PTR_ERR(master); - goto err; - } - - return 0; - -err: - return ret; -} - -static int __devexit of_fsl_espi_remove(struct platform_device *dev) -{ - return mpc8xxx_spi_remove(&dev->dev); -} - -static const struct of_device_id of_fsl_espi_match[] = { - { .compatible = "fsl,mpc8536-espi" }, - {} -}; -MODULE_DEVICE_TABLE(of, of_fsl_espi_match); - -static struct platform_driver fsl_espi_driver = { - .driver = { - .name = "fsl_espi", - .owner = THIS_MODULE, - .of_match_table = of_fsl_espi_match, - }, - .probe = of_fsl_espi_probe, - .remove = __devexit_p(of_fsl_espi_remove), -}; - -static int __init fsl_espi_init(void) -{ - return platform_driver_register(&fsl_espi_driver); -} -module_init(fsl_espi_init); - -static void __exit fsl_espi_exit(void) -{ - platform_driver_unregister(&fsl_espi_driver); -} -module_exit(fsl_espi_exit); - -MODULE_AUTHOR("Mingkai Hu"); -MODULE_DESCRIPTION("Enhanced Freescale SPI Driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi_fsl_lib.c b/drivers/spi/spi_fsl_lib.c deleted file mode 100644 index ff59f42..0000000 --- a/drivers/spi/spi_fsl_lib.c +++ /dev/null @@ -1,236 +0,0 @@ -/* - * Freescale SPI/eSPI controller driver library. - * - * Maintainer: Kumar Gala - * - * Copyright (C) 2006 Polycom, Inc. - * - * CPM SPI and QE buffer descriptors mode support: - * Copyright (c) 2009 MontaVista Software, Inc. - * Author: Anton Vorontsov - * - * Copyright 2010 Freescale Semiconductor, Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ -#include -#include -#include -#include -#include -#include -#include -#include - -#include "spi_fsl_lib.h" - -#define MPC8XXX_SPI_RX_BUF(type) \ -void mpc8xxx_spi_rx_buf_##type(u32 data, struct mpc8xxx_spi *mpc8xxx_spi) \ -{ \ - type *rx = mpc8xxx_spi->rx; \ - *rx++ = (type)(data >> mpc8xxx_spi->rx_shift); \ - mpc8xxx_spi->rx = rx; \ -} - -#define MPC8XXX_SPI_TX_BUF(type) \ -u32 mpc8xxx_spi_tx_buf_##type(struct mpc8xxx_spi *mpc8xxx_spi) \ -{ \ - u32 data; \ - const type *tx = mpc8xxx_spi->tx; \ - if (!tx) \ - return 0; \ - data = *tx++ << mpc8xxx_spi->tx_shift; \ - mpc8xxx_spi->tx = tx; \ - return data; \ -} - -MPC8XXX_SPI_RX_BUF(u8) -MPC8XXX_SPI_RX_BUF(u16) -MPC8XXX_SPI_RX_BUF(u32) -MPC8XXX_SPI_TX_BUF(u8) -MPC8XXX_SPI_TX_BUF(u16) -MPC8XXX_SPI_TX_BUF(u32) - -struct mpc8xxx_spi_probe_info *to_of_pinfo(struct fsl_spi_platform_data *pdata) -{ - return container_of(pdata, struct mpc8xxx_spi_probe_info, pdata); -} - -void mpc8xxx_spi_work(struct work_struct *work) -{ - struct mpc8xxx_spi *mpc8xxx_spi = container_of(work, struct mpc8xxx_spi, - work); - - spin_lock_irq(&mpc8xxx_spi->lock); - while (!list_empty(&mpc8xxx_spi->queue)) { - struct spi_message *m = container_of(mpc8xxx_spi->queue.next, - struct spi_message, queue); - - list_del_init(&m->queue); - spin_unlock_irq(&mpc8xxx_spi->lock); - - if (mpc8xxx_spi->spi_do_one_msg) - mpc8xxx_spi->spi_do_one_msg(m); - - spin_lock_irq(&mpc8xxx_spi->lock); - } - spin_unlock_irq(&mpc8xxx_spi->lock); -} - -int mpc8xxx_spi_transfer(struct spi_device *spi, - struct spi_message *m) -{ - struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); - unsigned long flags; - - m->actual_length = 0; - m->status = -EINPROGRESS; - - spin_lock_irqsave(&mpc8xxx_spi->lock, flags); - list_add_tail(&m->queue, &mpc8xxx_spi->queue); - queue_work(mpc8xxx_spi->workqueue, &mpc8xxx_spi->work); - spin_unlock_irqrestore(&mpc8xxx_spi->lock, flags); - - return 0; -} - -void mpc8xxx_spi_cleanup(struct spi_device *spi) -{ - kfree(spi->controller_state); -} - -const char *mpc8xxx_spi_strmode(unsigned int flags) -{ - if (flags & SPI_QE_CPU_MODE) { - return "QE CPU"; - } else if (flags & SPI_CPM_MODE) { - if (flags & SPI_QE) - return "QE"; - else if (flags & SPI_CPM2) - return "CPM2"; - else - return "CPM1"; - } - return "CPU"; -} - -int mpc8xxx_spi_probe(struct device *dev, struct resource *mem, - unsigned int irq) -{ - struct fsl_spi_platform_data *pdata = dev->platform_data; - struct spi_master *master; - struct mpc8xxx_spi *mpc8xxx_spi; - int ret = 0; - - master = dev_get_drvdata(dev); - - /* the spi->mode bits understood by this driver: */ - master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH - | SPI_LSB_FIRST | SPI_LOOP; - - master->transfer = mpc8xxx_spi_transfer; - master->cleanup = mpc8xxx_spi_cleanup; - master->dev.of_node = dev->of_node; - - mpc8xxx_spi = spi_master_get_devdata(master); - mpc8xxx_spi->dev = dev; - mpc8xxx_spi->get_rx = mpc8xxx_spi_rx_buf_u8; - mpc8xxx_spi->get_tx = mpc8xxx_spi_tx_buf_u8; - mpc8xxx_spi->flags = pdata->flags; - mpc8xxx_spi->spibrg = pdata->sysclk; - mpc8xxx_spi->irq = irq; - - mpc8xxx_spi->rx_shift = 0; - mpc8xxx_spi->tx_shift = 0; - - init_completion(&mpc8xxx_spi->done); - - master->bus_num = pdata->bus_num; - master->num_chipselect = pdata->max_chipselect; - - spin_lock_init(&mpc8xxx_spi->lock); - init_completion(&mpc8xxx_spi->done); - INIT_WORK(&mpc8xxx_spi->work, mpc8xxx_spi_work); - INIT_LIST_HEAD(&mpc8xxx_spi->queue); - - mpc8xxx_spi->workqueue = create_singlethread_workqueue( - dev_name(master->dev.parent)); - if (mpc8xxx_spi->workqueue == NULL) { - ret = -EBUSY; - goto err; - } - - return 0; - -err: - return ret; -} - -int __devexit mpc8xxx_spi_remove(struct device *dev) -{ - struct mpc8xxx_spi *mpc8xxx_spi; - struct spi_master *master; - - master = dev_get_drvdata(dev); - mpc8xxx_spi = spi_master_get_devdata(master); - - flush_workqueue(mpc8xxx_spi->workqueue); - destroy_workqueue(mpc8xxx_spi->workqueue); - spi_unregister_master(master); - - free_irq(mpc8xxx_spi->irq, mpc8xxx_spi); - - if (mpc8xxx_spi->spi_remove) - mpc8xxx_spi->spi_remove(mpc8xxx_spi); - - return 0; -} - -int __devinit of_mpc8xxx_spi_probe(struct platform_device *ofdev) -{ - struct device *dev = &ofdev->dev; - struct device_node *np = ofdev->dev.of_node; - struct mpc8xxx_spi_probe_info *pinfo; - struct fsl_spi_platform_data *pdata; - const void *prop; - int ret = -ENOMEM; - - pinfo = kzalloc(sizeof(*pinfo), GFP_KERNEL); - if (!pinfo) - return -ENOMEM; - - pdata = &pinfo->pdata; - dev->platform_data = pdata; - - /* Allocate bus num dynamically. */ - pdata->bus_num = -1; - - /* SPI controller is either clocked from QE or SoC clock. */ - pdata->sysclk = get_brgfreq(); - if (pdata->sysclk == -1) { - pdata->sysclk = fsl_get_sys_freq(); - if (pdata->sysclk == -1) { - ret = -ENODEV; - goto err; - } - } - - prop = of_get_property(np, "mode", NULL); - if (prop && !strcmp(prop, "cpu-qe")) - pdata->flags = SPI_QE_CPU_MODE; - else if (prop && !strcmp(prop, "qe")) - pdata->flags = SPI_CPM_MODE | SPI_QE; - else if (of_device_is_compatible(np, "fsl,cpm2-spi")) - pdata->flags = SPI_CPM_MODE | SPI_CPM2; - else if (of_device_is_compatible(np, "fsl,cpm1-spi")) - pdata->flags = SPI_CPM_MODE | SPI_CPM1; - - return 0; - -err: - kfree(pinfo); - return ret; -} diff --git a/drivers/spi/spi_fsl_lib.h b/drivers/spi/spi_fsl_lib.h deleted file mode 100644 index cbe881b..0000000 --- a/drivers/spi/spi_fsl_lib.h +++ /dev/null @@ -1,123 +0,0 @@ -/* - * Freescale SPI/eSPI controller driver library. - * - * Maintainer: Kumar Gala - * - * Copyright 2010 Freescale Semiconductor, Inc. - * Copyright (C) 2006 Polycom, Inc. - * - * CPM SPI and QE buffer descriptors mode support: - * Copyright (c) 2009 MontaVista Software, Inc. - * Author: Anton Vorontsov - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ -#ifndef __SPI_FSL_LIB_H__ -#define __SPI_FSL_LIB_H__ - -#include - -/* SPI/eSPI Controller driver's private data. */ -struct mpc8xxx_spi { - struct device *dev; - void *reg_base; - - /* rx & tx bufs from the spi_transfer */ - const void *tx; - void *rx; -#ifdef CONFIG_SPI_FSL_ESPI - int len; -#endif - - int subblock; - struct spi_pram __iomem *pram; - struct cpm_buf_desc __iomem *tx_bd; - struct cpm_buf_desc __iomem *rx_bd; - - struct spi_transfer *xfer_in_progress; - - /* dma addresses for CPM transfers */ - dma_addr_t tx_dma; - dma_addr_t rx_dma; - bool map_tx_dma; - bool map_rx_dma; - - dma_addr_t dma_dummy_tx; - dma_addr_t dma_dummy_rx; - - /* functions to deal with different sized buffers */ - void (*get_rx) (u32 rx_data, struct mpc8xxx_spi *); - u32(*get_tx) (struct mpc8xxx_spi *); - - /* hooks for different controller driver */ - void (*spi_do_one_msg) (struct spi_message *m); - void (*spi_remove) (struct mpc8xxx_spi *mspi); - - unsigned int count; - unsigned int irq; - - unsigned nsecs; /* (clock cycle time)/2 */ - - u32 spibrg; /* SPIBRG input clock */ - u32 rx_shift; /* RX data reg shift when in qe mode */ - u32 tx_shift; /* TX data reg shift when in qe mode */ - - unsigned int flags; - - struct workqueue_struct *workqueue; - struct work_struct work; - - struct list_head queue; - spinlock_t lock; - - struct completion done; -}; - -struct spi_mpc8xxx_cs { - /* functions to deal with different sized buffers */ - void (*get_rx) (u32 rx_data, struct mpc8xxx_spi *); - u32 (*get_tx) (struct mpc8xxx_spi *); - u32 rx_shift; /* RX data reg shift when in qe mode */ - u32 tx_shift; /* TX data reg shift when in qe mode */ - u32 hw_mode; /* Holds HW mode register settings */ -}; - -static inline void mpc8xxx_spi_write_reg(__be32 __iomem *reg, u32 val) -{ - out_be32(reg, val); -} - -static inline u32 mpc8xxx_spi_read_reg(__be32 __iomem *reg) -{ - return in_be32(reg); -} - -struct mpc8xxx_spi_probe_info { - struct fsl_spi_platform_data pdata; - int *gpios; - bool *alow_flags; -}; - -extern u32 mpc8xxx_spi_tx_buf_u8(struct mpc8xxx_spi *mpc8xxx_spi); -extern u32 mpc8xxx_spi_tx_buf_u16(struct mpc8xxx_spi *mpc8xxx_spi); -extern u32 mpc8xxx_spi_tx_buf_u32(struct mpc8xxx_spi *mpc8xxx_spi); -extern void mpc8xxx_spi_rx_buf_u8(u32 data, struct mpc8xxx_spi *mpc8xxx_spi); -extern void mpc8xxx_spi_rx_buf_u16(u32 data, struct mpc8xxx_spi *mpc8xxx_spi); -extern void mpc8xxx_spi_rx_buf_u32(u32 data, struct mpc8xxx_spi *mpc8xxx_spi); - -extern struct mpc8xxx_spi_probe_info *to_of_pinfo( - struct fsl_spi_platform_data *pdata); -extern int mpc8xxx_spi_bufs(struct mpc8xxx_spi *mspi, - struct spi_transfer *t, unsigned int len); -extern int mpc8xxx_spi_transfer(struct spi_device *spi, struct spi_message *m); -extern void mpc8xxx_spi_cleanup(struct spi_device *spi); -extern const char *mpc8xxx_spi_strmode(unsigned int flags); -extern int mpc8xxx_spi_probe(struct device *dev, struct resource *mem, - unsigned int irq); -extern int mpc8xxx_spi_remove(struct device *dev); -extern int of_mpc8xxx_spi_probe(struct platform_device *ofdev); - -#endif /* __SPI_FSL_LIB_H__ */ diff --git a/drivers/spi/spi_fsl_spi.c b/drivers/spi/spi_fsl_spi.c deleted file mode 100644 index 7963c9b..0000000 --- a/drivers/spi/spi_fsl_spi.c +++ /dev/null @@ -1,1192 +0,0 @@ -/* - * Freescale SPI controller driver. - * - * Maintainer: Kumar Gala - * - * Copyright (C) 2006 Polycom, Inc. - * Copyright 2010 Freescale Semiconductor, Inc. - * - * CPM SPI and QE buffer descriptors mode support: - * Copyright (c) 2009 MontaVista Software, Inc. - * Author: Anton Vorontsov - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#include "spi_fsl_lib.h" - -/* CPM1 and CPM2 are mutually exclusive. */ -#ifdef CONFIG_CPM1 -#include -#define CPM_SPI_CMD mk_cr_cmd(CPM_CR_CH_SPI, 0) -#else -#include -#define CPM_SPI_CMD mk_cr_cmd(CPM_CR_SPI_PAGE, CPM_CR_SPI_SBLOCK, 0, 0) -#endif - -/* SPI Controller registers */ -struct fsl_spi_reg { - u8 res1[0x20]; - __be32 mode; - __be32 event; - __be32 mask; - __be32 command; - __be32 transmit; - __be32 receive; -}; - -/* SPI Controller mode register definitions */ -#define SPMODE_LOOP (1 << 30) -#define SPMODE_CI_INACTIVEHIGH (1 << 29) -#define SPMODE_CP_BEGIN_EDGECLK (1 << 28) -#define SPMODE_DIV16 (1 << 27) -#define SPMODE_REV (1 << 26) -#define SPMODE_MS (1 << 25) -#define SPMODE_ENABLE (1 << 24) -#define SPMODE_LEN(x) ((x) << 20) -#define SPMODE_PM(x) ((x) << 16) -#define SPMODE_OP (1 << 14) -#define SPMODE_CG(x) ((x) << 7) - -/* - * Default for SPI Mode: - * SPI MODE 0 (inactive low, phase middle, MSB, 8-bit length, slow clk - */ -#define SPMODE_INIT_VAL (SPMODE_CI_INACTIVEHIGH | SPMODE_DIV16 | SPMODE_REV | \ - SPMODE_MS | SPMODE_LEN(7) | SPMODE_PM(0xf)) - -/* SPIE register values */ -#define SPIE_NE 0x00000200 /* Not empty */ -#define SPIE_NF 0x00000100 /* Not full */ - -/* SPIM register values */ -#define SPIM_NE 0x00000200 /* Not empty */ -#define SPIM_NF 0x00000100 /* Not full */ - -#define SPIE_TXB 0x00000200 /* Last char is written to tx fifo */ -#define SPIE_RXB 0x00000100 /* Last char is written to rx buf */ - -/* SPCOM register values */ -#define SPCOM_STR (1 << 23) /* Start transmit */ - -#define SPI_PRAM_SIZE 0x100 -#define SPI_MRBLR ((unsigned int)PAGE_SIZE) - -static void *fsl_dummy_rx; -static DEFINE_MUTEX(fsl_dummy_rx_lock); -static int fsl_dummy_rx_refcnt; - -static void fsl_spi_change_mode(struct spi_device *spi) -{ - struct mpc8xxx_spi *mspi = spi_master_get_devdata(spi->master); - struct spi_mpc8xxx_cs *cs = spi->controller_state; - struct fsl_spi_reg *reg_base = mspi->reg_base; - __be32 __iomem *mode = ®_base->mode; - unsigned long flags; - - if (cs->hw_mode == mpc8xxx_spi_read_reg(mode)) - return; - - /* Turn off IRQs locally to minimize time that SPI is disabled. */ - local_irq_save(flags); - - /* Turn off SPI unit prior changing mode */ - mpc8xxx_spi_write_reg(mode, cs->hw_mode & ~SPMODE_ENABLE); - - /* When in CPM mode, we need to reinit tx and rx. */ - if (mspi->flags & SPI_CPM_MODE) { - if (mspi->flags & SPI_QE) { - qe_issue_cmd(QE_INIT_TX_RX, mspi->subblock, - QE_CR_PROTOCOL_UNSPECIFIED, 0); - } else { - cpm_command(CPM_SPI_CMD, CPM_CR_INIT_TRX); - if (mspi->flags & SPI_CPM1) { - out_be16(&mspi->pram->rbptr, - in_be16(&mspi->pram->rbase)); - out_be16(&mspi->pram->tbptr, - in_be16(&mspi->pram->tbase)); - } - } - } - mpc8xxx_spi_write_reg(mode, cs->hw_mode); - local_irq_restore(flags); -} - -static void fsl_spi_chipselect(struct spi_device *spi, int value) -{ - struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); - struct fsl_spi_platform_data *pdata = spi->dev.parent->platform_data; - bool pol = spi->mode & SPI_CS_HIGH; - struct spi_mpc8xxx_cs *cs = spi->controller_state; - - if (value == BITBANG_CS_INACTIVE) { - if (pdata->cs_control) - pdata->cs_control(spi, !pol); - } - - if (value == BITBANG_CS_ACTIVE) { - mpc8xxx_spi->rx_shift = cs->rx_shift; - mpc8xxx_spi->tx_shift = cs->tx_shift; - mpc8xxx_spi->get_rx = cs->get_rx; - mpc8xxx_spi->get_tx = cs->get_tx; - - fsl_spi_change_mode(spi); - - if (pdata->cs_control) - pdata->cs_control(spi, pol); - } -} - -static int mspi_apply_cpu_mode_quirks(struct spi_mpc8xxx_cs *cs, - struct spi_device *spi, - struct mpc8xxx_spi *mpc8xxx_spi, - int bits_per_word) -{ - cs->rx_shift = 0; - cs->tx_shift = 0; - if (bits_per_word <= 8) { - cs->get_rx = mpc8xxx_spi_rx_buf_u8; - cs->get_tx = mpc8xxx_spi_tx_buf_u8; - if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) { - cs->rx_shift = 16; - cs->tx_shift = 24; - } - } else if (bits_per_word <= 16) { - cs->get_rx = mpc8xxx_spi_rx_buf_u16; - cs->get_tx = mpc8xxx_spi_tx_buf_u16; - if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) { - cs->rx_shift = 16; - cs->tx_shift = 16; - } - } else if (bits_per_word <= 32) { - cs->get_rx = mpc8xxx_spi_rx_buf_u32; - cs->get_tx = mpc8xxx_spi_tx_buf_u32; - } else - return -EINVAL; - - if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE && - spi->mode & SPI_LSB_FIRST) { - cs->tx_shift = 0; - if (bits_per_word <= 8) - cs->rx_shift = 8; - else - cs->rx_shift = 0; - } - mpc8xxx_spi->rx_shift = cs->rx_shift; - mpc8xxx_spi->tx_shift = cs->tx_shift; - mpc8xxx_spi->get_rx = cs->get_rx; - mpc8xxx_spi->get_tx = cs->get_tx; - - return bits_per_word; -} - -static int mspi_apply_qe_mode_quirks(struct spi_mpc8xxx_cs *cs, - struct spi_device *spi, - int bits_per_word) -{ - /* QE uses Little Endian for words > 8 - * so transform all words > 8 into 8 bits - * Unfortnatly that doesn't work for LSB so - * reject these for now */ - /* Note: 32 bits word, LSB works iff - * tfcr/rfcr is set to CPMFCR_GBL */ - if (spi->mode & SPI_LSB_FIRST && - bits_per_word > 8) - return -EINVAL; - if (bits_per_word > 8) - return 8; /* pretend its 8 bits */ - return bits_per_word; -} - -static int fsl_spi_setup_transfer(struct spi_device *spi, - struct spi_transfer *t) -{ - struct mpc8xxx_spi *mpc8xxx_spi; - int bits_per_word = 0; - u8 pm; - u32 hz = 0; - struct spi_mpc8xxx_cs *cs = spi->controller_state; - - mpc8xxx_spi = spi_master_get_devdata(spi->master); - - if (t) { - bits_per_word = t->bits_per_word; - hz = t->speed_hz; - } - - /* spi_transfer level calls that work per-word */ - if (!bits_per_word) - bits_per_word = spi->bits_per_word; - - /* Make sure its a bit width we support [4..16, 32] */ - if ((bits_per_word < 4) - || ((bits_per_word > 16) && (bits_per_word != 32))) - return -EINVAL; - - if (!hz) - hz = spi->max_speed_hz; - - if (!(mpc8xxx_spi->flags & SPI_CPM_MODE)) - bits_per_word = mspi_apply_cpu_mode_quirks(cs, spi, - mpc8xxx_spi, - bits_per_word); - else if (mpc8xxx_spi->flags & SPI_QE) - bits_per_word = mspi_apply_qe_mode_quirks(cs, spi, - bits_per_word); - - if (bits_per_word < 0) - return bits_per_word; - - if (bits_per_word == 32) - bits_per_word = 0; - else - bits_per_word = bits_per_word - 1; - - /* mask out bits we are going to set */ - cs->hw_mode &= ~(SPMODE_LEN(0xF) | SPMODE_DIV16 - | SPMODE_PM(0xF)); - - cs->hw_mode |= SPMODE_LEN(bits_per_word); - - if ((mpc8xxx_spi->spibrg / hz) > 64) { - cs->hw_mode |= SPMODE_DIV16; - pm = (mpc8xxx_spi->spibrg - 1) / (hz * 64) + 1; - - WARN_ONCE(pm > 16, "%s: Requested speed is too low: %d Hz. " - "Will use %d Hz instead.\n", dev_name(&spi->dev), - hz, mpc8xxx_spi->spibrg / 1024); - if (pm > 16) - pm = 16; - } else { - pm = (mpc8xxx_spi->spibrg - 1) / (hz * 4) + 1; - } - if (pm) - pm--; - - cs->hw_mode |= SPMODE_PM(pm); - - fsl_spi_change_mode(spi); - return 0; -} - -static void fsl_spi_cpm_bufs_start(struct mpc8xxx_spi *mspi) -{ - struct cpm_buf_desc __iomem *tx_bd = mspi->tx_bd; - struct cpm_buf_desc __iomem *rx_bd = mspi->rx_bd; - unsigned int xfer_len = min(mspi->count, SPI_MRBLR); - unsigned int xfer_ofs; - struct fsl_spi_reg *reg_base = mspi->reg_base; - - xfer_ofs = mspi->xfer_in_progress->len - mspi->count; - - if (mspi->rx_dma == mspi->dma_dummy_rx) - out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma); - else - out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma + xfer_ofs); - out_be16(&rx_bd->cbd_datlen, 0); - out_be16(&rx_bd->cbd_sc, BD_SC_EMPTY | BD_SC_INTRPT | BD_SC_WRAP); - - if (mspi->tx_dma == mspi->dma_dummy_tx) - out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma); - else - out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma + xfer_ofs); - out_be16(&tx_bd->cbd_datlen, xfer_len); - out_be16(&tx_bd->cbd_sc, BD_SC_READY | BD_SC_INTRPT | BD_SC_WRAP | - BD_SC_LAST); - - /* start transfer */ - mpc8xxx_spi_write_reg(®_base->command, SPCOM_STR); -} - -static int fsl_spi_cpm_bufs(struct mpc8xxx_spi *mspi, - struct spi_transfer *t, bool is_dma_mapped) -{ - struct device *dev = mspi->dev; - struct fsl_spi_reg *reg_base = mspi->reg_base; - - if (is_dma_mapped) { - mspi->map_tx_dma = 0; - mspi->map_rx_dma = 0; - } else { - mspi->map_tx_dma = 1; - mspi->map_rx_dma = 1; - } - - if (!t->tx_buf) { - mspi->tx_dma = mspi->dma_dummy_tx; - mspi->map_tx_dma = 0; - } - - if (!t->rx_buf) { - mspi->rx_dma = mspi->dma_dummy_rx; - mspi->map_rx_dma = 0; - } - - if (mspi->map_tx_dma) { - void *nonconst_tx = (void *)mspi->tx; /* shut up gcc */ - - mspi->tx_dma = dma_map_single(dev, nonconst_tx, t->len, - DMA_TO_DEVICE); - if (dma_mapping_error(dev, mspi->tx_dma)) { - dev_err(dev, "unable to map tx dma\n"); - return -ENOMEM; - } - } else if (t->tx_buf) { - mspi->tx_dma = t->tx_dma; - } - - if (mspi->map_rx_dma) { - mspi->rx_dma = dma_map_single(dev, mspi->rx, t->len, - DMA_FROM_DEVICE); - if (dma_mapping_error(dev, mspi->rx_dma)) { - dev_err(dev, "unable to map rx dma\n"); - goto err_rx_dma; - } - } else if (t->rx_buf) { - mspi->rx_dma = t->rx_dma; - } - - /* enable rx ints */ - mpc8xxx_spi_write_reg(®_base->mask, SPIE_RXB); - - mspi->xfer_in_progress = t; - mspi->count = t->len; - - /* start CPM transfers */ - fsl_spi_cpm_bufs_start(mspi); - - return 0; - -err_rx_dma: - if (mspi->map_tx_dma) - dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE); - return -ENOMEM; -} - -static void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi) -{ - struct device *dev = mspi->dev; - struct spi_transfer *t = mspi->xfer_in_progress; - - if (mspi->map_tx_dma) - dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE); - if (mspi->map_rx_dma) - dma_unmap_single(dev, mspi->rx_dma, t->len, DMA_FROM_DEVICE); - mspi->xfer_in_progress = NULL; -} - -static int fsl_spi_cpu_bufs(struct mpc8xxx_spi *mspi, - struct spi_transfer *t, unsigned int len) -{ - u32 word; - struct fsl_spi_reg *reg_base = mspi->reg_base; - - mspi->count = len; - - /* enable rx ints */ - mpc8xxx_spi_write_reg(®_base->mask, SPIM_NE); - - /* transmit word */ - word = mspi->get_tx(mspi); - mpc8xxx_spi_write_reg(®_base->transmit, word); - - return 0; -} - -static int fsl_spi_bufs(struct spi_device *spi, struct spi_transfer *t, - bool is_dma_mapped) -{ - struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); - struct fsl_spi_reg *reg_base; - unsigned int len = t->len; - u8 bits_per_word; - int ret; - - reg_base = mpc8xxx_spi->reg_base; - bits_per_word = spi->bits_per_word; - if (t->bits_per_word) - bits_per_word = t->bits_per_word; - - if (bits_per_word > 8) { - /* invalid length? */ - if (len & 1) - return -EINVAL; - len /= 2; - } - if (bits_per_word > 16) { - /* invalid length? */ - if (len & 1) - return -EINVAL; - len /= 2; - } - - mpc8xxx_spi->tx = t->tx_buf; - mpc8xxx_spi->rx = t->rx_buf; - - INIT_COMPLETION(mpc8xxx_spi->done); - - if (mpc8xxx_spi->flags & SPI_CPM_MODE) - ret = fsl_spi_cpm_bufs(mpc8xxx_spi, t, is_dma_mapped); - else - ret = fsl_spi_cpu_bufs(mpc8xxx_spi, t, len); - if (ret) - return ret; - - wait_for_completion(&mpc8xxx_spi->done); - - /* disable rx ints */ - mpc8xxx_spi_write_reg(®_base->mask, 0); - - if (mpc8xxx_spi->flags & SPI_CPM_MODE) - fsl_spi_cpm_bufs_complete(mpc8xxx_spi); - - return mpc8xxx_spi->count; -} - -static void fsl_spi_do_one_msg(struct spi_message *m) -{ - struct spi_device *spi = m->spi; - struct spi_transfer *t; - unsigned int cs_change; - const int nsecs = 50; - int status; - - cs_change = 1; - status = 0; - list_for_each_entry(t, &m->transfers, transfer_list) { - if (t->bits_per_word || t->speed_hz) { - /* Don't allow changes if CS is active */ - status = -EINVAL; - - if (cs_change) - status = fsl_spi_setup_transfer(spi, t); - if (status < 0) - break; - } - - if (cs_change) { - fsl_spi_chipselect(spi, BITBANG_CS_ACTIVE); - ndelay(nsecs); - } - cs_change = t->cs_change; - if (t->len) - status = fsl_spi_bufs(spi, t, m->is_dma_mapped); - if (status) { - status = -EMSGSIZE; - break; - } - m->actual_length += t->len; - - if (t->delay_usecs) - udelay(t->delay_usecs); - - if (cs_change) { - ndelay(nsecs); - fsl_spi_chipselect(spi, BITBANG_CS_INACTIVE); - ndelay(nsecs); - } - } - - m->status = status; - m->complete(m->context); - - if (status || !cs_change) { - ndelay(nsecs); - fsl_spi_chipselect(spi, BITBANG_CS_INACTIVE); - } - - fsl_spi_setup_transfer(spi, NULL); -} - -static int fsl_spi_setup(struct spi_device *spi) -{ - struct mpc8xxx_spi *mpc8xxx_spi; - struct fsl_spi_reg *reg_base; - int retval; - u32 hw_mode; - struct spi_mpc8xxx_cs *cs = spi->controller_state; - - if (!spi->max_speed_hz) - return -EINVAL; - - if (!cs) { - cs = kzalloc(sizeof *cs, GFP_KERNEL); - if (!cs) - return -ENOMEM; - spi->controller_state = cs; - } - mpc8xxx_spi = spi_master_get_devdata(spi->master); - - reg_base = mpc8xxx_spi->reg_base; - - hw_mode = cs->hw_mode; /* Save original settings */ - cs->hw_mode = mpc8xxx_spi_read_reg(®_base->mode); - /* mask out bits we are going to set */ - cs->hw_mode &= ~(SPMODE_CP_BEGIN_EDGECLK | SPMODE_CI_INACTIVEHIGH - | SPMODE_REV | SPMODE_LOOP); - - if (spi->mode & SPI_CPHA) - cs->hw_mode |= SPMODE_CP_BEGIN_EDGECLK; - if (spi->mode & SPI_CPOL) - cs->hw_mode |= SPMODE_CI_INACTIVEHIGH; - if (!(spi->mode & SPI_LSB_FIRST)) - cs->hw_mode |= SPMODE_REV; - if (spi->mode & SPI_LOOP) - cs->hw_mode |= SPMODE_LOOP; - - retval = fsl_spi_setup_transfer(spi, NULL); - if (retval < 0) { - cs->hw_mode = hw_mode; /* Restore settings */ - return retval; - } - return 0; -} - -static void fsl_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events) -{ - u16 len; - struct fsl_spi_reg *reg_base = mspi->reg_base; - - dev_dbg(mspi->dev, "%s: bd datlen %d, count %d\n", __func__, - in_be16(&mspi->rx_bd->cbd_datlen), mspi->count); - - len = in_be16(&mspi->rx_bd->cbd_datlen); - if (len > mspi->count) { - WARN_ON(1); - len = mspi->count; - } - - /* Clear the events */ - mpc8xxx_spi_write_reg(®_base->event, events); - - mspi->count -= len; - if (mspi->count) - fsl_spi_cpm_bufs_start(mspi); - else - complete(&mspi->done); -} - -static void fsl_spi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events) -{ - struct fsl_spi_reg *reg_base = mspi->reg_base; - - /* We need handle RX first */ - if (events & SPIE_NE) { - u32 rx_data = mpc8xxx_spi_read_reg(®_base->receive); - - if (mspi->rx) - mspi->get_rx(rx_data, mspi); - } - - if ((events & SPIE_NF) == 0) - /* spin until TX is done */ - while (((events = - mpc8xxx_spi_read_reg(®_base->event)) & - SPIE_NF) == 0) - cpu_relax(); - - /* Clear the events */ - mpc8xxx_spi_write_reg(®_base->event, events); - - mspi->count -= 1; - if (mspi->count) { - u32 word = mspi->get_tx(mspi); - - mpc8xxx_spi_write_reg(®_base->transmit, word); - } else { - complete(&mspi->done); - } -} - -static irqreturn_t fsl_spi_irq(s32 irq, void *context_data) -{ - struct mpc8xxx_spi *mspi = context_data; - irqreturn_t ret = IRQ_NONE; - u32 events; - struct fsl_spi_reg *reg_base = mspi->reg_base; - - /* Get interrupt events(tx/rx) */ - events = mpc8xxx_spi_read_reg(®_base->event); - if (events) - ret = IRQ_HANDLED; - - dev_dbg(mspi->dev, "%s: events %x\n", __func__, events); - - if (mspi->flags & SPI_CPM_MODE) - fsl_spi_cpm_irq(mspi, events); - else - fsl_spi_cpu_irq(mspi, events); - - return ret; -} - -static void *fsl_spi_alloc_dummy_rx(void) -{ - mutex_lock(&fsl_dummy_rx_lock); - - if (!fsl_dummy_rx) - fsl_dummy_rx = kmalloc(SPI_MRBLR, GFP_KERNEL); - if (fsl_dummy_rx) - fsl_dummy_rx_refcnt++; - - mutex_unlock(&fsl_dummy_rx_lock); - - return fsl_dummy_rx; -} - -static void fsl_spi_free_dummy_rx(void) -{ - mutex_lock(&fsl_dummy_rx_lock); - - switch (fsl_dummy_rx_refcnt) { - case 0: - WARN_ON(1); - break; - case 1: - kfree(fsl_dummy_rx); - fsl_dummy_rx = NULL; - /* fall through */ - default: - fsl_dummy_rx_refcnt--; - break; - } - - mutex_unlock(&fsl_dummy_rx_lock); -} - -static unsigned long fsl_spi_cpm_get_pram(struct mpc8xxx_spi *mspi) -{ - struct device *dev = mspi->dev; - struct device_node *np = dev->of_node; - const u32 *iprop; - int size; - unsigned long spi_base_ofs; - unsigned long pram_ofs = -ENOMEM; - - /* Can't use of_address_to_resource(), QE muram isn't at 0. */ - iprop = of_get_property(np, "reg", &size); - - /* QE with a fixed pram location? */ - if (mspi->flags & SPI_QE && iprop && size == sizeof(*iprop) * 4) - return cpm_muram_alloc_fixed(iprop[2], SPI_PRAM_SIZE); - - /* QE but with a dynamic pram location? */ - if (mspi->flags & SPI_QE) { - pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64); - qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, mspi->subblock, - QE_CR_PROTOCOL_UNSPECIFIED, pram_ofs); - return pram_ofs; - } - - /* CPM1 and CPM2 pram must be at a fixed addr. */ - if (!iprop || size != sizeof(*iprop) * 4) - return -ENOMEM; - - spi_base_ofs = cpm_muram_alloc_fixed(iprop[2], 2); - if (IS_ERR_VALUE(spi_base_ofs)) - return -ENOMEM; - - if (mspi->flags & SPI_CPM2) { - pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64); - if (!IS_ERR_VALUE(pram_ofs)) { - u16 __iomem *spi_base = cpm_muram_addr(spi_base_ofs); - - out_be16(spi_base, pram_ofs); - } - } else { - struct spi_pram __iomem *pram = cpm_muram_addr(spi_base_ofs); - u16 rpbase = in_be16(&pram->rpbase); - - /* Microcode relocation patch applied? */ - if (rpbase) - pram_ofs = rpbase; - else - return spi_base_ofs; - } - - cpm_muram_free(spi_base_ofs); - return pram_ofs; -} - -static int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi) -{ - struct device *dev = mspi->dev; - struct device_node *np = dev->of_node; - const u32 *iprop; - int size; - unsigned long pram_ofs; - unsigned long bds_ofs; - - if (!(mspi->flags & SPI_CPM_MODE)) - return 0; - - if (!fsl_spi_alloc_dummy_rx()) - return -ENOMEM; - - if (mspi->flags & SPI_QE) { - iprop = of_get_property(np, "cell-index", &size); - if (iprop && size == sizeof(*iprop)) - mspi->subblock = *iprop; - - switch (mspi->subblock) { - default: - dev_warn(dev, "cell-index unspecified, assuming SPI1"); - /* fall through */ - case 0: - mspi->subblock = QE_CR_SUBBLOCK_SPI1; - break; - case 1: - mspi->subblock = QE_CR_SUBBLOCK_SPI2; - break; - } - } - - pram_ofs = fsl_spi_cpm_get_pram(mspi); - if (IS_ERR_VALUE(pram_ofs)) { - dev_err(dev, "can't allocate spi parameter ram\n"); - goto err_pram; - } - - bds_ofs = cpm_muram_alloc(sizeof(*mspi->tx_bd) + - sizeof(*mspi->rx_bd), 8); - if (IS_ERR_VALUE(bds_ofs)) { - dev_err(dev, "can't allocate bds\n"); - goto err_bds; - } - - mspi->dma_dummy_tx = dma_map_single(dev, empty_zero_page, PAGE_SIZE, - DMA_TO_DEVICE); - if (dma_mapping_error(dev, mspi->dma_dummy_tx)) { - dev_err(dev, "unable to map dummy tx buffer\n"); - goto err_dummy_tx; - } - - mspi->dma_dummy_rx = dma_map_single(dev, fsl_dummy_rx, SPI_MRBLR, - DMA_FROM_DEVICE); - if (dma_mapping_error(dev, mspi->dma_dummy_rx)) { - dev_err(dev, "unable to map dummy rx buffer\n"); - goto err_dummy_rx; - } - - mspi->pram = cpm_muram_addr(pram_ofs); - - mspi->tx_bd = cpm_muram_addr(bds_ofs); - mspi->rx_bd = cpm_muram_addr(bds_ofs + sizeof(*mspi->tx_bd)); - - /* Initialize parameter ram. */ - out_be16(&mspi->pram->tbase, cpm_muram_offset(mspi->tx_bd)); - out_be16(&mspi->pram->rbase, cpm_muram_offset(mspi->rx_bd)); - out_8(&mspi->pram->tfcr, CPMFCR_EB | CPMFCR_GBL); - out_8(&mspi->pram->rfcr, CPMFCR_EB | CPMFCR_GBL); - out_be16(&mspi->pram->mrblr, SPI_MRBLR); - out_be32(&mspi->pram->rstate, 0); - out_be32(&mspi->pram->rdp, 0); - out_be16(&mspi->pram->rbptr, 0); - out_be16(&mspi->pram->rbc, 0); - out_be32(&mspi->pram->rxtmp, 0); - out_be32(&mspi->pram->tstate, 0); - out_be32(&mspi->pram->tdp, 0); - out_be16(&mspi->pram->tbptr, 0); - out_be16(&mspi->pram->tbc, 0); - out_be32(&mspi->pram->txtmp, 0); - - return 0; - -err_dummy_rx: - dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE); -err_dummy_tx: - cpm_muram_free(bds_ofs); -err_bds: - cpm_muram_free(pram_ofs); -err_pram: - fsl_spi_free_dummy_rx(); - return -ENOMEM; -} - -static void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi) -{ - struct device *dev = mspi->dev; - - dma_unmap_single(dev, mspi->dma_dummy_rx, SPI_MRBLR, DMA_FROM_DEVICE); - dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE); - cpm_muram_free(cpm_muram_offset(mspi->tx_bd)); - cpm_muram_free(cpm_muram_offset(mspi->pram)); - fsl_spi_free_dummy_rx(); -} - -static void fsl_spi_remove(struct mpc8xxx_spi *mspi) -{ - iounmap(mspi->reg_base); - fsl_spi_cpm_free(mspi); -} - -static struct spi_master * __devinit fsl_spi_probe(struct device *dev, - struct resource *mem, unsigned int irq) -{ - struct fsl_spi_platform_data *pdata = dev->platform_data; - struct spi_master *master; - struct mpc8xxx_spi *mpc8xxx_spi; - struct fsl_spi_reg *reg_base; - u32 regval; - int ret = 0; - - master = spi_alloc_master(dev, sizeof(struct mpc8xxx_spi)); - if (master == NULL) { - ret = -ENOMEM; - goto err; - } - - dev_set_drvdata(dev, master); - - ret = mpc8xxx_spi_probe(dev, mem, irq); - if (ret) - goto err_probe; - - master->setup = fsl_spi_setup; - - mpc8xxx_spi = spi_master_get_devdata(master); - mpc8xxx_spi->spi_do_one_msg = fsl_spi_do_one_msg; - mpc8xxx_spi->spi_remove = fsl_spi_remove; - - - ret = fsl_spi_cpm_init(mpc8xxx_spi); - if (ret) - goto err_cpm_init; - - if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) { - mpc8xxx_spi->rx_shift = 16; - mpc8xxx_spi->tx_shift = 24; - } - - mpc8xxx_spi->reg_base = ioremap(mem->start, resource_size(mem)); - if (mpc8xxx_spi->reg_base == NULL) { - ret = -ENOMEM; - goto err_ioremap; - } - - /* Register for SPI Interrupt */ - ret = request_irq(mpc8xxx_spi->irq, fsl_spi_irq, - 0, "fsl_spi", mpc8xxx_spi); - - if (ret != 0) - goto free_irq; - - reg_base = mpc8xxx_spi->reg_base; - - /* SPI controller initializations */ - mpc8xxx_spi_write_reg(®_base->mode, 0); - mpc8xxx_spi_write_reg(®_base->mask, 0); - mpc8xxx_spi_write_reg(®_base->command, 0); - mpc8xxx_spi_write_reg(®_base->event, 0xffffffff); - - /* Enable SPI interface */ - regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE; - if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) - regval |= SPMODE_OP; - - mpc8xxx_spi_write_reg(®_base->mode, regval); - - ret = spi_register_master(master); - if (ret < 0) - goto unreg_master; - - dev_info(dev, "at 0x%p (irq = %d), %s mode\n", reg_base, - mpc8xxx_spi->irq, mpc8xxx_spi_strmode(mpc8xxx_spi->flags)); - - return master; - -unreg_master: - free_irq(mpc8xxx_spi->irq, mpc8xxx_spi); -free_irq: - iounmap(mpc8xxx_spi->reg_base); -err_ioremap: - fsl_spi_cpm_free(mpc8xxx_spi); -err_cpm_init: -err_probe: - spi_master_put(master); -err: - return ERR_PTR(ret); -} - -static void fsl_spi_cs_control(struct spi_device *spi, bool on) -{ - struct device *dev = spi->dev.parent; - struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(dev->platform_data); - u16 cs = spi->chip_select; - int gpio = pinfo->gpios[cs]; - bool alow = pinfo->alow_flags[cs]; - - gpio_set_value(gpio, on ^ alow); -} - -static int of_fsl_spi_get_chipselects(struct device *dev) -{ - struct device_node *np = dev->of_node; - struct fsl_spi_platform_data *pdata = dev->platform_data; - struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata); - unsigned int ngpios; - int i = 0; - int ret; - - ngpios = of_gpio_count(np); - if (!ngpios) { - /* - * SPI w/o chip-select line. One SPI device is still permitted - * though. - */ - pdata->max_chipselect = 1; - return 0; - } - - pinfo->gpios = kmalloc(ngpios * sizeof(*pinfo->gpios), GFP_KERNEL); - if (!pinfo->gpios) - return -ENOMEM; - memset(pinfo->gpios, -1, ngpios * sizeof(*pinfo->gpios)); - - pinfo->alow_flags = kzalloc(ngpios * sizeof(*pinfo->alow_flags), - GFP_KERNEL); - if (!pinfo->alow_flags) { - ret = -ENOMEM; - goto err_alloc_flags; - } - - for (; i < ngpios; i++) { - int gpio; - enum of_gpio_flags flags; - - gpio = of_get_gpio_flags(np, i, &flags); - if (!gpio_is_valid(gpio)) { - dev_err(dev, "invalid gpio #%d: %d\n", i, gpio); - ret = gpio; - goto err_loop; - } - - ret = gpio_request(gpio, dev_name(dev)); - if (ret) { - dev_err(dev, "can't request gpio #%d: %d\n", i, ret); - goto err_loop; - } - - pinfo->gpios[i] = gpio; - pinfo->alow_flags[i] = flags & OF_GPIO_ACTIVE_LOW; - - ret = gpio_direction_output(pinfo->gpios[i], - pinfo->alow_flags[i]); - if (ret) { - dev_err(dev, "can't set output direction for gpio " - "#%d: %d\n", i, ret); - goto err_loop; - } - } - - pdata->max_chipselect = ngpios; - pdata->cs_control = fsl_spi_cs_control; - - return 0; - -err_loop: - while (i >= 0) { - if (gpio_is_valid(pinfo->gpios[i])) - gpio_free(pinfo->gpios[i]); - i--; - } - - kfree(pinfo->alow_flags); - pinfo->alow_flags = NULL; -err_alloc_flags: - kfree(pinfo->gpios); - pinfo->gpios = NULL; - return ret; -} - -static int of_fsl_spi_free_chipselects(struct device *dev) -{ - struct fsl_spi_platform_data *pdata = dev->platform_data; - struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata); - int i; - - if (!pinfo->gpios) - return 0; - - for (i = 0; i < pdata->max_chipselect; i++) { - if (gpio_is_valid(pinfo->gpios[i])) - gpio_free(pinfo->gpios[i]); - } - - kfree(pinfo->gpios); - kfree(pinfo->alow_flags); - return 0; -} - -static int __devinit of_fsl_spi_probe(struct platform_device *ofdev) -{ - struct device *dev = &ofdev->dev; - struct device_node *np = ofdev->dev.of_node; - struct spi_master *master; - struct resource mem; - struct resource irq; - int ret = -ENOMEM; - - ret = of_mpc8xxx_spi_probe(ofdev); - if (ret) - return ret; - - ret = of_fsl_spi_get_chipselects(dev); - if (ret) - goto err; - - ret = of_address_to_resource(np, 0, &mem); - if (ret) - goto err; - - ret = of_irq_to_resource(np, 0, &irq); - if (!ret) { - ret = -EINVAL; - goto err; - } - - master = fsl_spi_probe(dev, &mem, irq.start); - if (IS_ERR(master)) { - ret = PTR_ERR(master); - goto err; - } - - return 0; - -err: - of_fsl_spi_free_chipselects(dev); - return ret; -} - -static int __devexit of_fsl_spi_remove(struct platform_device *ofdev) -{ - int ret; - - ret = mpc8xxx_spi_remove(&ofdev->dev); - if (ret) - return ret; - of_fsl_spi_free_chipselects(&ofdev->dev); - return 0; -} - -static const struct of_device_id of_fsl_spi_match[] = { - { .compatible = "fsl,spi" }, - {} -}; -MODULE_DEVICE_TABLE(of, of_fsl_spi_match); - -static struct platform_driver of_fsl_spi_driver = { - .driver = { - .name = "fsl_spi", - .owner = THIS_MODULE, - .of_match_table = of_fsl_spi_match, - }, - .probe = of_fsl_spi_probe, - .remove = __devexit_p(of_fsl_spi_remove), -}; - -#ifdef CONFIG_MPC832x_RDB -/* - * XXX XXX XXX - * This is "legacy" platform driver, was used by the MPC8323E-RDB boards - * only. The driver should go away soon, since newer MPC8323E-RDB's device - * tree can work with OpenFirmware driver. But for now we support old trees - * as well. - */ -static int __devinit plat_mpc8xxx_spi_probe(struct platform_device *pdev) -{ - struct resource *mem; - int irq; - struct spi_master *master; - - if (!pdev->dev.platform_data) - return -EINVAL; - - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!mem) - return -EINVAL; - - irq = platform_get_irq(pdev, 0); - if (irq <= 0) - return -EINVAL; - - master = fsl_spi_probe(&pdev->dev, mem, irq); - if (IS_ERR(master)) - return PTR_ERR(master); - return 0; -} - -static int __devexit plat_mpc8xxx_spi_remove(struct platform_device *pdev) -{ - return mpc8xxx_spi_remove(&pdev->dev); -} - -MODULE_ALIAS("platform:mpc8xxx_spi"); -static struct platform_driver mpc8xxx_spi_driver = { - .probe = plat_mpc8xxx_spi_probe, - .remove = __devexit_p(plat_mpc8xxx_spi_remove), - .driver = { - .name = "mpc8xxx_spi", - .owner = THIS_MODULE, - }, -}; - -static bool legacy_driver_failed; - -static void __init legacy_driver_register(void) -{ - legacy_driver_failed = platform_driver_register(&mpc8xxx_spi_driver); -} - -static void __exit legacy_driver_unregister(void) -{ - if (legacy_driver_failed) - return; - platform_driver_unregister(&mpc8xxx_spi_driver); -} -#else -static void __init legacy_driver_register(void) {} -static void __exit legacy_driver_unregister(void) {} -#endif /* CONFIG_MPC832x_RDB */ - -static int __init fsl_spi_init(void) -{ - legacy_driver_register(); - return platform_driver_register(&of_fsl_spi_driver); -} -module_init(fsl_spi_init); - -static void __exit fsl_spi_exit(void) -{ - platform_driver_unregister(&of_fsl_spi_driver); - legacy_driver_unregister(); -} -module_exit(fsl_spi_exit); - -MODULE_AUTHOR("Kumar Gala"); -MODULE_DESCRIPTION("Simple Freescale SPI Driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi_gpio.c b/drivers/spi/spi_gpio.c deleted file mode 100644 index 63e51b0..0000000 --- a/drivers/spi/spi_gpio.c +++ /dev/null @@ -1,429 +0,0 @@ -/* - * spi_gpio.c - SPI master driver using generic bitbanged GPIO - * - * Copyright (C) 2006,2008 David Brownell - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ -#include -#include -#include -#include - -#include -#include -#include - - -/* - * This bitbanging SPI master driver should help make systems usable - * when a native hardware SPI engine is not available, perhaps because - * its driver isn't yet working or because the I/O pins it requires - * are used for other purposes. - * - * platform_device->driver_data ... points to spi_gpio - * - * spi->controller_state ... reserved for bitbang framework code - * spi->controller_data ... holds chipselect GPIO - * - * spi->master->dev.driver_data ... points to spi_gpio->bitbang - */ - -struct spi_gpio { - struct spi_bitbang bitbang; - struct spi_gpio_platform_data pdata; - struct platform_device *pdev; -}; - -/*----------------------------------------------------------------------*/ - -/* - * Because the overhead of going through four GPIO procedure calls - * per transferred bit can make performance a problem, this code - * is set up so that you can use it in either of two ways: - * - * - The slow generic way: set up platform_data to hold the GPIO - * numbers used for MISO/MOSI/SCK, and issue procedure calls for - * each of them. This driver can handle several such busses. - * - * - The quicker inlined way: only helps with platform GPIO code - * that inlines operations for constant GPIOs. This can give - * you tight (fast!) inner loops, but each such bus needs a - * new driver. You'll define a new C file, with Makefile and - * Kconfig support; the C code can be a total of six lines: - * - * #define DRIVER_NAME "myboard_spi2" - * #define SPI_MISO_GPIO 119 - * #define SPI_MOSI_GPIO 120 - * #define SPI_SCK_GPIO 121 - * #define SPI_N_CHIPSEL 4 - * #include "spi_gpio.c" - */ - -#ifndef DRIVER_NAME -#define DRIVER_NAME "spi_gpio" - -#define GENERIC_BITBANG /* vs tight inlines */ - -/* all functions referencing these symbols must define pdata */ -#define SPI_MISO_GPIO ((pdata)->miso) -#define SPI_MOSI_GPIO ((pdata)->mosi) -#define SPI_SCK_GPIO ((pdata)->sck) - -#define SPI_N_CHIPSEL ((pdata)->num_chipselect) - -#endif - -/*----------------------------------------------------------------------*/ - -static inline const struct spi_gpio_platform_data * __pure -spi_to_pdata(const struct spi_device *spi) -{ - const struct spi_bitbang *bang; - const struct spi_gpio *spi_gpio; - - bang = spi_master_get_devdata(spi->master); - spi_gpio = container_of(bang, struct spi_gpio, bitbang); - return &spi_gpio->pdata; -} - -/* this is #defined to avoid unused-variable warnings when inlining */ -#define pdata spi_to_pdata(spi) - -static inline void setsck(const struct spi_device *spi, int is_on) -{ - gpio_set_value(SPI_SCK_GPIO, is_on); -} - -static inline void setmosi(const struct spi_device *spi, int is_on) -{ - gpio_set_value(SPI_MOSI_GPIO, is_on); -} - -static inline int getmiso(const struct spi_device *spi) -{ - return !!gpio_get_value(SPI_MISO_GPIO); -} - -#undef pdata - -/* - * NOTE: this clocks "as fast as we can". It "should" be a function of the - * requested device clock. Software overhead means we usually have trouble - * reaching even one Mbit/sec (except when we can inline bitops), so for now - * we'll just assume we never need additional per-bit slowdowns. - */ -#define spidelay(nsecs) do {} while (0) - -#include "spi_bitbang_txrx.h" - -/* - * These functions can leverage inline expansion of GPIO calls to shrink - * costs for a txrx bit, often by factors of around ten (by instruction - * count). That is particularly visible for larger word sizes, but helps - * even with default 8-bit words. - * - * REVISIT overheads calling these functions for each word also have - * significant performance costs. Having txrx_bufs() calls that inline - * the txrx_word() logic would help performance, e.g. on larger blocks - * used with flash storage or MMC/SD. There should also be ways to make - * GCC be less stupid about reloading registers inside the I/O loops, - * even without inlined GPIO calls; __attribute__((hot)) on GCC 4.3? - */ - -static u32 spi_gpio_txrx_word_mode0(struct spi_device *spi, - unsigned nsecs, u32 word, u8 bits) -{ - return bitbang_txrx_be_cpha0(spi, nsecs, 0, 0, word, bits); -} - -static u32 spi_gpio_txrx_word_mode1(struct spi_device *spi, - unsigned nsecs, u32 word, u8 bits) -{ - return bitbang_txrx_be_cpha1(spi, nsecs, 0, 0, word, bits); -} - -static u32 spi_gpio_txrx_word_mode2(struct spi_device *spi, - unsigned nsecs, u32 word, u8 bits) -{ - return bitbang_txrx_be_cpha0(spi, nsecs, 1, 0, word, bits); -} - -static u32 spi_gpio_txrx_word_mode3(struct spi_device *spi, - unsigned nsecs, u32 word, u8 bits) -{ - return bitbang_txrx_be_cpha1(spi, nsecs, 1, 0, word, bits); -} - -/* - * These functions do not call setmosi or getmiso if respective flag - * (SPI_MASTER_NO_RX or SPI_MASTER_NO_TX) is set, so they are safe to - * call when such pin is not present or defined in the controller. - * A separate set of callbacks is defined to get highest possible - * speed in the generic case (when both MISO and MOSI lines are - * available), as optimiser will remove the checks when argument is - * constant. - */ - -static u32 spi_gpio_spec_txrx_word_mode0(struct spi_device *spi, - unsigned nsecs, u32 word, u8 bits) -{ - unsigned flags = spi->master->flags; - return bitbang_txrx_be_cpha0(spi, nsecs, 0, flags, word, bits); -} - -static u32 spi_gpio_spec_txrx_word_mode1(struct spi_device *spi, - unsigned nsecs, u32 word, u8 bits) -{ - unsigned flags = spi->master->flags; - return bitbang_txrx_be_cpha1(spi, nsecs, 0, flags, word, bits); -} - -static u32 spi_gpio_spec_txrx_word_mode2(struct spi_device *spi, - unsigned nsecs, u32 word, u8 bits) -{ - unsigned flags = spi->master->flags; - return bitbang_txrx_be_cpha0(spi, nsecs, 1, flags, word, bits); -} - -static u32 spi_gpio_spec_txrx_word_mode3(struct spi_device *spi, - unsigned nsecs, u32 word, u8 bits) -{ - unsigned flags = spi->master->flags; - return bitbang_txrx_be_cpha1(spi, nsecs, 1, flags, word, bits); -} - -/*----------------------------------------------------------------------*/ - -static void spi_gpio_chipselect(struct spi_device *spi, int is_active) -{ - unsigned long cs = (unsigned long) spi->controller_data; - - /* set initial clock polarity */ - if (is_active) - setsck(spi, spi->mode & SPI_CPOL); - - if (cs != SPI_GPIO_NO_CHIPSELECT) { - /* SPI is normally active-low */ - gpio_set_value(cs, (spi->mode & SPI_CS_HIGH) ? is_active : !is_active); - } -} - -static int spi_gpio_setup(struct spi_device *spi) -{ - unsigned long cs = (unsigned long) spi->controller_data; - int status = 0; - - if (spi->bits_per_word > 32) - return -EINVAL; - - if (!spi->controller_state) { - if (cs != SPI_GPIO_NO_CHIPSELECT) { - status = gpio_request(cs, dev_name(&spi->dev)); - if (status) - return status; - status = gpio_direction_output(cs, spi->mode & SPI_CS_HIGH); - } - } - if (!status) - status = spi_bitbang_setup(spi); - if (status) { - if (!spi->controller_state && cs != SPI_GPIO_NO_CHIPSELECT) - gpio_free(cs); - } - return status; -} - -static void spi_gpio_cleanup(struct spi_device *spi) -{ - unsigned long cs = (unsigned long) spi->controller_data; - - if (cs != SPI_GPIO_NO_CHIPSELECT) - gpio_free(cs); - spi_bitbang_cleanup(spi); -} - -static int __init spi_gpio_alloc(unsigned pin, const char *label, bool is_in) -{ - int value; - - value = gpio_request(pin, label); - if (value == 0) { - if (is_in) - value = gpio_direction_input(pin); - else - value = gpio_direction_output(pin, 0); - } - return value; -} - -static int __init -spi_gpio_request(struct spi_gpio_platform_data *pdata, const char *label, - u16 *res_flags) -{ - int value; - - /* NOTE: SPI_*_GPIO symbols may reference "pdata" */ - - if (SPI_MOSI_GPIO != SPI_GPIO_NO_MOSI) { - value = spi_gpio_alloc(SPI_MOSI_GPIO, label, false); - if (value) - goto done; - } else { - /* HW configuration without MOSI pin */ - *res_flags |= SPI_MASTER_NO_TX; - } - - if (SPI_MISO_GPIO != SPI_GPIO_NO_MISO) { - value = spi_gpio_alloc(SPI_MISO_GPIO, label, true); - if (value) - goto free_mosi; - } else { - /* HW configuration without MISO pin */ - *res_flags |= SPI_MASTER_NO_RX; - } - - value = spi_gpio_alloc(SPI_SCK_GPIO, label, false); - if (value) - goto free_miso; - - goto done; - -free_miso: - if (SPI_MISO_GPIO != SPI_GPIO_NO_MISO) - gpio_free(SPI_MISO_GPIO); -free_mosi: - if (SPI_MOSI_GPIO != SPI_GPIO_NO_MOSI) - gpio_free(SPI_MOSI_GPIO); -done: - return value; -} - -static int __init spi_gpio_probe(struct platform_device *pdev) -{ - int status; - struct spi_master *master; - struct spi_gpio *spi_gpio; - struct spi_gpio_platform_data *pdata; - u16 master_flags = 0; - - pdata = pdev->dev.platform_data; -#ifdef GENERIC_BITBANG - if (!pdata || !pdata->num_chipselect) - return -ENODEV; -#endif - - status = spi_gpio_request(pdata, dev_name(&pdev->dev), &master_flags); - if (status < 0) - return status; - - master = spi_alloc_master(&pdev->dev, sizeof *spi_gpio); - if (!master) { - status = -ENOMEM; - goto gpio_free; - } - spi_gpio = spi_master_get_devdata(master); - platform_set_drvdata(pdev, spi_gpio); - - spi_gpio->pdev = pdev; - if (pdata) - spi_gpio->pdata = *pdata; - - master->flags = master_flags; - master->bus_num = pdev->id; - master->num_chipselect = SPI_N_CHIPSEL; - master->setup = spi_gpio_setup; - master->cleanup = spi_gpio_cleanup; - - spi_gpio->bitbang.master = spi_master_get(master); - spi_gpio->bitbang.chipselect = spi_gpio_chipselect; - - if ((master_flags & (SPI_MASTER_NO_TX | SPI_MASTER_NO_RX)) == 0) { - spi_gpio->bitbang.txrx_word[SPI_MODE_0] = spi_gpio_txrx_word_mode0; - spi_gpio->bitbang.txrx_word[SPI_MODE_1] = spi_gpio_txrx_word_mode1; - spi_gpio->bitbang.txrx_word[SPI_MODE_2] = spi_gpio_txrx_word_mode2; - spi_gpio->bitbang.txrx_word[SPI_MODE_3] = spi_gpio_txrx_word_mode3; - } else { - spi_gpio->bitbang.txrx_word[SPI_MODE_0] = spi_gpio_spec_txrx_word_mode0; - spi_gpio->bitbang.txrx_word[SPI_MODE_1] = spi_gpio_spec_txrx_word_mode1; - spi_gpio->bitbang.txrx_word[SPI_MODE_2] = spi_gpio_spec_txrx_word_mode2; - spi_gpio->bitbang.txrx_word[SPI_MODE_3] = spi_gpio_spec_txrx_word_mode3; - } - spi_gpio->bitbang.setup_transfer = spi_bitbang_setup_transfer; - spi_gpio->bitbang.flags = SPI_CS_HIGH; - - status = spi_bitbang_start(&spi_gpio->bitbang); - if (status < 0) { - spi_master_put(spi_gpio->bitbang.master); -gpio_free: - if (SPI_MISO_GPIO != SPI_GPIO_NO_MISO) - gpio_free(SPI_MISO_GPIO); - if (SPI_MOSI_GPIO != SPI_GPIO_NO_MOSI) - gpio_free(SPI_MOSI_GPIO); - gpio_free(SPI_SCK_GPIO); - spi_master_put(master); - } - - return status; -} - -static int __exit spi_gpio_remove(struct platform_device *pdev) -{ - struct spi_gpio *spi_gpio; - struct spi_gpio_platform_data *pdata; - int status; - - spi_gpio = platform_get_drvdata(pdev); - pdata = pdev->dev.platform_data; - - /* stop() unregisters child devices too */ - status = spi_bitbang_stop(&spi_gpio->bitbang); - spi_master_put(spi_gpio->bitbang.master); - - platform_set_drvdata(pdev, NULL); - - if (SPI_MISO_GPIO != SPI_GPIO_NO_MISO) - gpio_free(SPI_MISO_GPIO); - if (SPI_MOSI_GPIO != SPI_GPIO_NO_MOSI) - gpio_free(SPI_MOSI_GPIO); - gpio_free(SPI_SCK_GPIO); - - return status; -} - -MODULE_ALIAS("platform:" DRIVER_NAME); - -static struct platform_driver spi_gpio_driver = { - .driver.name = DRIVER_NAME, - .driver.owner = THIS_MODULE, - .remove = __exit_p(spi_gpio_remove), -}; - -static int __init spi_gpio_init(void) -{ - return platform_driver_probe(&spi_gpio_driver, spi_gpio_probe); -} -module_init(spi_gpio_init); - -static void __exit spi_gpio_exit(void) -{ - platform_driver_unregister(&spi_gpio_driver); -} -module_exit(spi_gpio_exit); - - -MODULE_DESCRIPTION("SPI master driver using generic bitbanged GPIO "); -MODULE_AUTHOR("David Brownell"); -MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi_imx.c b/drivers/spi/spi_imx.c deleted file mode 100644 index 69d6dba..0000000 --- a/drivers/spi/spi_imx.c +++ /dev/null @@ -1,944 +0,0 @@ -/* - * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved. - * Copyright (C) 2008 Juergen Beisert - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the - * Free Software Foundation - * 51 Franklin Street, Fifth Floor - * Boston, MA 02110-1301, USA. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#define DRIVER_NAME "spi_imx" - -#define MXC_CSPIRXDATA 0x00 -#define MXC_CSPITXDATA 0x04 -#define MXC_CSPICTRL 0x08 -#define MXC_CSPIINT 0x0c -#define MXC_RESET 0x1c - -#define MX3_CSPISTAT 0x14 -#define MX3_CSPISTAT_RR (1 << 3) - -/* generic defines to abstract from the different register layouts */ -#define MXC_INT_RR (1 << 0) /* Receive data ready interrupt */ -#define MXC_INT_TE (1 << 1) /* Transmit FIFO empty interrupt */ - -struct spi_imx_config { - unsigned int speed_hz; - unsigned int bpw; - unsigned int mode; - u8 cs; -}; - -enum spi_imx_devtype { - SPI_IMX_VER_IMX1, - SPI_IMX_VER_0_0, - SPI_IMX_VER_0_4, - SPI_IMX_VER_0_5, - SPI_IMX_VER_0_7, - SPI_IMX_VER_2_3, -}; - -struct spi_imx_data; - -struct spi_imx_devtype_data { - void (*intctrl)(struct spi_imx_data *, int); - int (*config)(struct spi_imx_data *, struct spi_imx_config *); - void (*trigger)(struct spi_imx_data *); - int (*rx_available)(struct spi_imx_data *); - void (*reset)(struct spi_imx_data *); - unsigned int fifosize; -}; - -struct spi_imx_data { - struct spi_bitbang bitbang; - - struct completion xfer_done; - void *base; - int irq; - struct clk *clk; - unsigned long spi_clk; - int *chipselect; - - unsigned int count; - void (*tx)(struct spi_imx_data *); - void (*rx)(struct spi_imx_data *); - void *rx_buf; - const void *tx_buf; - unsigned int txfifo; /* number of words pushed in tx FIFO */ - - struct spi_imx_devtype_data devtype_data; -}; - -#define MXC_SPI_BUF_RX(type) \ -static void spi_imx_buf_rx_##type(struct spi_imx_data *spi_imx) \ -{ \ - unsigned int val = readl(spi_imx->base + MXC_CSPIRXDATA); \ - \ - if (spi_imx->rx_buf) { \ - *(type *)spi_imx->rx_buf = val; \ - spi_imx->rx_buf += sizeof(type); \ - } \ -} - -#define MXC_SPI_BUF_TX(type) \ -static void spi_imx_buf_tx_##type(struct spi_imx_data *spi_imx) \ -{ \ - type val = 0; \ - \ - if (spi_imx->tx_buf) { \ - val = *(type *)spi_imx->tx_buf; \ - spi_imx->tx_buf += sizeof(type); \ - } \ - \ - spi_imx->count -= sizeof(type); \ - \ - writel(val, spi_imx->base + MXC_CSPITXDATA); \ -} - -MXC_SPI_BUF_RX(u8) -MXC_SPI_BUF_TX(u8) -MXC_SPI_BUF_RX(u16) -MXC_SPI_BUF_TX(u16) -MXC_SPI_BUF_RX(u32) -MXC_SPI_BUF_TX(u32) - -/* First entry is reserved, second entry is valid only if SDHC_SPIEN is set - * (which is currently not the case in this driver) - */ -static int mxc_clkdivs[] = {0, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192, - 256, 384, 512, 768, 1024}; - -/* MX21, MX27 */ -static unsigned int spi_imx_clkdiv_1(unsigned int fin, - unsigned int fspi) -{ - int i, max; - - if (cpu_is_mx21()) - max = 18; - else - max = 16; - - for (i = 2; i < max; i++) - if (fspi * mxc_clkdivs[i] >= fin) - return i; - - return max; -} - -/* MX1, MX31, MX35, MX51 CSPI */ -static unsigned int spi_imx_clkdiv_2(unsigned int fin, - unsigned int fspi) -{ - int i, div = 4; - - for (i = 0; i < 7; i++) { - if (fspi * div >= fin) - return i; - div <<= 1; - } - - return 7; -} - -#define SPI_IMX2_3_CTRL 0x08 -#define SPI_IMX2_3_CTRL_ENABLE (1 << 0) -#define SPI_IMX2_3_CTRL_XCH (1 << 2) -#define SPI_IMX2_3_CTRL_MODE_MASK (0xf << 4) -#define SPI_IMX2_3_CTRL_POSTDIV_OFFSET 8 -#define SPI_IMX2_3_CTRL_PREDIV_OFFSET 12 -#define SPI_IMX2_3_CTRL_CS(cs) ((cs) << 18) -#define SPI_IMX2_3_CTRL_BL_OFFSET 20 - -#define SPI_IMX2_3_CONFIG 0x0c -#define SPI_IMX2_3_CONFIG_SCLKPHA(cs) (1 << ((cs) + 0)) -#define SPI_IMX2_3_CONFIG_SCLKPOL(cs) (1 << ((cs) + 4)) -#define SPI_IMX2_3_CONFIG_SBBCTRL(cs) (1 << ((cs) + 8)) -#define SPI_IMX2_3_CONFIG_SSBPOL(cs) (1 << ((cs) + 12)) - -#define SPI_IMX2_3_INT 0x10 -#define SPI_IMX2_3_INT_TEEN (1 << 0) -#define SPI_IMX2_3_INT_RREN (1 << 3) - -#define SPI_IMX2_3_STAT 0x18 -#define SPI_IMX2_3_STAT_RR (1 << 3) - -/* MX51 eCSPI */ -static unsigned int spi_imx2_3_clkdiv(unsigned int fin, unsigned int fspi) -{ - /* - * there are two 4-bit dividers, the pre-divider divides by - * $pre, the post-divider by 2^$post - */ - unsigned int pre, post; - - if (unlikely(fspi > fin)) - return 0; - - post = fls(fin) - fls(fspi); - if (fin > fspi << post) - post++; - - /* now we have: (fin <= fspi << post) with post being minimal */ - - post = max(4U, post) - 4; - if (unlikely(post > 0xf)) { - pr_err("%s: cannot set clock freq: %u (base freq: %u)\n", - __func__, fspi, fin); - return 0xff; - } - - pre = DIV_ROUND_UP(fin, fspi << post) - 1; - - pr_debug("%s: fin: %u, fspi: %u, post: %u, pre: %u\n", - __func__, fin, fspi, post, pre); - return (pre << SPI_IMX2_3_CTRL_PREDIV_OFFSET) | - (post << SPI_IMX2_3_CTRL_POSTDIV_OFFSET); -} - -static void __maybe_unused spi_imx2_3_intctrl(struct spi_imx_data *spi_imx, int enable) -{ - unsigned val = 0; - - if (enable & MXC_INT_TE) - val |= SPI_IMX2_3_INT_TEEN; - - if (enable & MXC_INT_RR) - val |= SPI_IMX2_3_INT_RREN; - - writel(val, spi_imx->base + SPI_IMX2_3_INT); -} - -static void __maybe_unused spi_imx2_3_trigger(struct spi_imx_data *spi_imx) -{ - u32 reg; - - reg = readl(spi_imx->base + SPI_IMX2_3_CTRL); - reg |= SPI_IMX2_3_CTRL_XCH; - writel(reg, spi_imx->base + SPI_IMX2_3_CTRL); -} - -static int __maybe_unused spi_imx2_3_config(struct spi_imx_data *spi_imx, - struct spi_imx_config *config) -{ - u32 ctrl = SPI_IMX2_3_CTRL_ENABLE, cfg = 0; - - /* - * The hardware seems to have a race condition when changing modes. The - * current assumption is that the selection of the channel arrives - * earlier in the hardware than the mode bits when they are written at - * the same time. - * So set master mode for all channels as we do not support slave mode. - */ - ctrl |= SPI_IMX2_3_CTRL_MODE_MASK; - - /* set clock speed */ - ctrl |= spi_imx2_3_clkdiv(spi_imx->spi_clk, config->speed_hz); - - /* set chip select to use */ - ctrl |= SPI_IMX2_3_CTRL_CS(config->cs); - - ctrl |= (config->bpw - 1) << SPI_IMX2_3_CTRL_BL_OFFSET; - - cfg |= SPI_IMX2_3_CONFIG_SBBCTRL(config->cs); - - if (config->mode & SPI_CPHA) - cfg |= SPI_IMX2_3_CONFIG_SCLKPHA(config->cs); - - if (config->mode & SPI_CPOL) - cfg |= SPI_IMX2_3_CONFIG_SCLKPOL(config->cs); - - if (config->mode & SPI_CS_HIGH) - cfg |= SPI_IMX2_3_CONFIG_SSBPOL(config->cs); - - writel(ctrl, spi_imx->base + SPI_IMX2_3_CTRL); - writel(cfg, spi_imx->base + SPI_IMX2_3_CONFIG); - - return 0; -} - -static int __maybe_unused spi_imx2_3_rx_available(struct spi_imx_data *spi_imx) -{ - return readl(spi_imx->base + SPI_IMX2_3_STAT) & SPI_IMX2_3_STAT_RR; -} - -static void __maybe_unused spi_imx2_3_reset(struct spi_imx_data *spi_imx) -{ - /* drain receive buffer */ - while (spi_imx2_3_rx_available(spi_imx)) - readl(spi_imx->base + MXC_CSPIRXDATA); -} - -#define MX31_INTREG_TEEN (1 << 0) -#define MX31_INTREG_RREN (1 << 3) - -#define MX31_CSPICTRL_ENABLE (1 << 0) -#define MX31_CSPICTRL_MASTER (1 << 1) -#define MX31_CSPICTRL_XCH (1 << 2) -#define MX31_CSPICTRL_POL (1 << 4) -#define MX31_CSPICTRL_PHA (1 << 5) -#define MX31_CSPICTRL_SSCTL (1 << 6) -#define MX31_CSPICTRL_SSPOL (1 << 7) -#define MX31_CSPICTRL_BC_SHIFT 8 -#define MX35_CSPICTRL_BL_SHIFT 20 -#define MX31_CSPICTRL_CS_SHIFT 24 -#define MX35_CSPICTRL_CS_SHIFT 12 -#define MX31_CSPICTRL_DR_SHIFT 16 - -#define MX31_CSPISTATUS 0x14 -#define MX31_STATUS_RR (1 << 3) - -/* These functions also work for the i.MX35, but be aware that - * the i.MX35 has a slightly different register layout for bits - * we do not use here. - */ -static void __maybe_unused mx31_intctrl(struct spi_imx_data *spi_imx, int enable) -{ - unsigned int val = 0; - - if (enable & MXC_INT_TE) - val |= MX31_INTREG_TEEN; - if (enable & MXC_INT_RR) - val |= MX31_INTREG_RREN; - - writel(val, spi_imx->base + MXC_CSPIINT); -} - -static void __maybe_unused mx31_trigger(struct spi_imx_data *spi_imx) -{ - unsigned int reg; - - reg = readl(spi_imx->base + MXC_CSPICTRL); - reg |= MX31_CSPICTRL_XCH; - writel(reg, spi_imx->base + MXC_CSPICTRL); -} - -static int __maybe_unused spi_imx0_4_config(struct spi_imx_data *spi_imx, - struct spi_imx_config *config) -{ - unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER; - int cs = spi_imx->chipselect[config->cs]; - - reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz) << - MX31_CSPICTRL_DR_SHIFT; - - reg |= (config->bpw - 1) << MX31_CSPICTRL_BC_SHIFT; - - if (config->mode & SPI_CPHA) - reg |= MX31_CSPICTRL_PHA; - if (config->mode & SPI_CPOL) - reg |= MX31_CSPICTRL_POL; - if (config->mode & SPI_CS_HIGH) - reg |= MX31_CSPICTRL_SSPOL; - if (cs < 0) - reg |= (cs + 32) << MX31_CSPICTRL_CS_SHIFT; - - writel(reg, spi_imx->base + MXC_CSPICTRL); - - return 0; -} - -static int __maybe_unused spi_imx0_7_config(struct spi_imx_data *spi_imx, - struct spi_imx_config *config) -{ - unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER; - int cs = spi_imx->chipselect[config->cs]; - - reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz) << - MX31_CSPICTRL_DR_SHIFT; - - reg |= (config->bpw - 1) << MX35_CSPICTRL_BL_SHIFT; - reg |= MX31_CSPICTRL_SSCTL; - - if (config->mode & SPI_CPHA) - reg |= MX31_CSPICTRL_PHA; - if (config->mode & SPI_CPOL) - reg |= MX31_CSPICTRL_POL; - if (config->mode & SPI_CS_HIGH) - reg |= MX31_CSPICTRL_SSPOL; - if (cs < 0) - reg |= (cs + 32) << MX35_CSPICTRL_CS_SHIFT; - - writel(reg, spi_imx->base + MXC_CSPICTRL); - - return 0; -} - -static int __maybe_unused mx31_rx_available(struct spi_imx_data *spi_imx) -{ - return readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR; -} - -static void __maybe_unused spi_imx0_4_reset(struct spi_imx_data *spi_imx) -{ - /* drain receive buffer */ - while (readl(spi_imx->base + MX3_CSPISTAT) & MX3_CSPISTAT_RR) - readl(spi_imx->base + MXC_CSPIRXDATA); -} - -#define MX27_INTREG_RR (1 << 4) -#define MX27_INTREG_TEEN (1 << 9) -#define MX27_INTREG_RREN (1 << 13) - -#define MX27_CSPICTRL_POL (1 << 5) -#define MX27_CSPICTRL_PHA (1 << 6) -#define MX27_CSPICTRL_SSPOL (1 << 8) -#define MX27_CSPICTRL_XCH (1 << 9) -#define MX27_CSPICTRL_ENABLE (1 << 10) -#define MX27_CSPICTRL_MASTER (1 << 11) -#define MX27_CSPICTRL_DR_SHIFT 14 -#define MX27_CSPICTRL_CS_SHIFT 19 - -static void __maybe_unused mx27_intctrl(struct spi_imx_data *spi_imx, int enable) -{ - unsigned int val = 0; - - if (enable & MXC_INT_TE) - val |= MX27_INTREG_TEEN; - if (enable & MXC_INT_RR) - val |= MX27_INTREG_RREN; - - writel(val, spi_imx->base + MXC_CSPIINT); -} - -static void __maybe_unused mx27_trigger(struct spi_imx_data *spi_imx) -{ - unsigned int reg; - - reg = readl(spi_imx->base + MXC_CSPICTRL); - reg |= MX27_CSPICTRL_XCH; - writel(reg, spi_imx->base + MXC_CSPICTRL); -} - -static int __maybe_unused mx27_config(struct spi_imx_data *spi_imx, - struct spi_imx_config *config) -{ - unsigned int reg = MX27_CSPICTRL_ENABLE | MX27_CSPICTRL_MASTER; - int cs = spi_imx->chipselect[config->cs]; - - reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, config->speed_hz) << - MX27_CSPICTRL_DR_SHIFT; - reg |= config->bpw - 1; - - if (config->mode & SPI_CPHA) - reg |= MX27_CSPICTRL_PHA; - if (config->mode & SPI_CPOL) - reg |= MX27_CSPICTRL_POL; - if (config->mode & SPI_CS_HIGH) - reg |= MX27_CSPICTRL_SSPOL; - if (cs < 0) - reg |= (cs + 32) << MX27_CSPICTRL_CS_SHIFT; - - writel(reg, spi_imx->base + MXC_CSPICTRL); - - return 0; -} - -static int __maybe_unused mx27_rx_available(struct spi_imx_data *spi_imx) -{ - return readl(spi_imx->base + MXC_CSPIINT) & MX27_INTREG_RR; -} - -static void __maybe_unused spi_imx0_0_reset(struct spi_imx_data *spi_imx) -{ - writel(1, spi_imx->base + MXC_RESET); -} - -#define MX1_INTREG_RR (1 << 3) -#define MX1_INTREG_TEEN (1 << 8) -#define MX1_INTREG_RREN (1 << 11) - -#define MX1_CSPICTRL_POL (1 << 4) -#define MX1_CSPICTRL_PHA (1 << 5) -#define MX1_CSPICTRL_XCH (1 << 8) -#define MX1_CSPICTRL_ENABLE (1 << 9) -#define MX1_CSPICTRL_MASTER (1 << 10) -#define MX1_CSPICTRL_DR_SHIFT 13 - -static void __maybe_unused mx1_intctrl(struct spi_imx_data *spi_imx, int enable) -{ - unsigned int val = 0; - - if (enable & MXC_INT_TE) - val |= MX1_INTREG_TEEN; - if (enable & MXC_INT_RR) - val |= MX1_INTREG_RREN; - - writel(val, spi_imx->base + MXC_CSPIINT); -} - -static void __maybe_unused mx1_trigger(struct spi_imx_data *spi_imx) -{ - unsigned int reg; - - reg = readl(spi_imx->base + MXC_CSPICTRL); - reg |= MX1_CSPICTRL_XCH; - writel(reg, spi_imx->base + MXC_CSPICTRL); -} - -static int __maybe_unused mx1_config(struct spi_imx_data *spi_imx, - struct spi_imx_config *config) -{ - unsigned int reg = MX1_CSPICTRL_ENABLE | MX1_CSPICTRL_MASTER; - - reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz) << - MX1_CSPICTRL_DR_SHIFT; - reg |= config->bpw - 1; - - if (config->mode & SPI_CPHA) - reg |= MX1_CSPICTRL_PHA; - if (config->mode & SPI_CPOL) - reg |= MX1_CSPICTRL_POL; - - writel(reg, spi_imx->base + MXC_CSPICTRL); - - return 0; -} - -static int __maybe_unused mx1_rx_available(struct spi_imx_data *spi_imx) -{ - return readl(spi_imx->base + MXC_CSPIINT) & MX1_INTREG_RR; -} - -static void __maybe_unused mx1_reset(struct spi_imx_data *spi_imx) -{ - writel(1, spi_imx->base + MXC_RESET); -} - -/* - * These version numbers are taken from the Freescale driver. Unfortunately it - * doesn't support i.MX1, so this entry doesn't match the scheme. :-( - */ -static struct spi_imx_devtype_data spi_imx_devtype_data[] __devinitdata = { -#ifdef CONFIG_SPI_IMX_VER_IMX1 - [SPI_IMX_VER_IMX1] = { - .intctrl = mx1_intctrl, - .config = mx1_config, - .trigger = mx1_trigger, - .rx_available = mx1_rx_available, - .reset = mx1_reset, - .fifosize = 8, - }, -#endif -#ifdef CONFIG_SPI_IMX_VER_0_0 - [SPI_IMX_VER_0_0] = { - .intctrl = mx27_intctrl, - .config = mx27_config, - .trigger = mx27_trigger, - .rx_available = mx27_rx_available, - .reset = spi_imx0_0_reset, - .fifosize = 8, - }, -#endif -#ifdef CONFIG_SPI_IMX_VER_0_4 - [SPI_IMX_VER_0_4] = { - .intctrl = mx31_intctrl, - .config = spi_imx0_4_config, - .trigger = mx31_trigger, - .rx_available = mx31_rx_available, - .reset = spi_imx0_4_reset, - .fifosize = 8, - }, -#endif -#ifdef CONFIG_SPI_IMX_VER_0_7 - [SPI_IMX_VER_0_7] = { - .intctrl = mx31_intctrl, - .config = spi_imx0_7_config, - .trigger = mx31_trigger, - .rx_available = mx31_rx_available, - .reset = spi_imx0_4_reset, - .fifosize = 8, - }, -#endif -#ifdef CONFIG_SPI_IMX_VER_2_3 - [SPI_IMX_VER_2_3] = { - .intctrl = spi_imx2_3_intctrl, - .config = spi_imx2_3_config, - .trigger = spi_imx2_3_trigger, - .rx_available = spi_imx2_3_rx_available, - .reset = spi_imx2_3_reset, - .fifosize = 64, - }, -#endif -}; - -static void spi_imx_chipselect(struct spi_device *spi, int is_active) -{ - struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); - int gpio = spi_imx->chipselect[spi->chip_select]; - int active = is_active != BITBANG_CS_INACTIVE; - int dev_is_lowactive = !(spi->mode & SPI_CS_HIGH); - - if (gpio < 0) - return; - - gpio_set_value(gpio, dev_is_lowactive ^ active); -} - -static void spi_imx_push(struct spi_imx_data *spi_imx) -{ - while (spi_imx->txfifo < spi_imx->devtype_data.fifosize) { - if (!spi_imx->count) - break; - spi_imx->tx(spi_imx); - spi_imx->txfifo++; - } - - spi_imx->devtype_data.trigger(spi_imx); -} - -static irqreturn_t spi_imx_isr(int irq, void *dev_id) -{ - struct spi_imx_data *spi_imx = dev_id; - - while (spi_imx->devtype_data.rx_available(spi_imx)) { - spi_imx->rx(spi_imx); - spi_imx->txfifo--; - } - - if (spi_imx->count) { - spi_imx_push(spi_imx); - return IRQ_HANDLED; - } - - if (spi_imx->txfifo) { - /* No data left to push, but still waiting for rx data, - * enable receive data available interrupt. - */ - spi_imx->devtype_data.intctrl( - spi_imx, MXC_INT_RR); - return IRQ_HANDLED; - } - - spi_imx->devtype_data.intctrl(spi_imx, 0); - complete(&spi_imx->xfer_done); - - return IRQ_HANDLED; -} - -static int spi_imx_setupxfer(struct spi_device *spi, - struct spi_transfer *t) -{ - struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); - struct spi_imx_config config; - - config.bpw = t ? t->bits_per_word : spi->bits_per_word; - config.speed_hz = t ? t->speed_hz : spi->max_speed_hz; - config.mode = spi->mode; - config.cs = spi->chip_select; - - if (!config.speed_hz) - config.speed_hz = spi->max_speed_hz; - if (!config.bpw) - config.bpw = spi->bits_per_word; - if (!config.speed_hz) - config.speed_hz = spi->max_speed_hz; - - /* Initialize the functions for transfer */ - if (config.bpw <= 8) { - spi_imx->rx = spi_imx_buf_rx_u8; - spi_imx->tx = spi_imx_buf_tx_u8; - } else if (config.bpw <= 16) { - spi_imx->rx = spi_imx_buf_rx_u16; - spi_imx->tx = spi_imx_buf_tx_u16; - } else if (config.bpw <= 32) { - spi_imx->rx = spi_imx_buf_rx_u32; - spi_imx->tx = spi_imx_buf_tx_u32; - } else - BUG(); - - spi_imx->devtype_data.config(spi_imx, &config); - - return 0; -} - -static int spi_imx_transfer(struct spi_device *spi, - struct spi_transfer *transfer) -{ - struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); - - spi_imx->tx_buf = transfer->tx_buf; - spi_imx->rx_buf = transfer->rx_buf; - spi_imx->count = transfer->len; - spi_imx->txfifo = 0; - - init_completion(&spi_imx->xfer_done); - - spi_imx_push(spi_imx); - - spi_imx->devtype_data.intctrl(spi_imx, MXC_INT_TE); - - wait_for_completion(&spi_imx->xfer_done); - - return transfer->len; -} - -static int spi_imx_setup(struct spi_device *spi) -{ - struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); - int gpio = spi_imx->chipselect[spi->chip_select]; - - dev_dbg(&spi->dev, "%s: mode %d, %u bpw, %d hz\n", __func__, - spi->mode, spi->bits_per_word, spi->max_speed_hz); - - if (gpio >= 0) - gpio_direction_output(gpio, spi->mode & SPI_CS_HIGH ? 0 : 1); - - spi_imx_chipselect(spi, BITBANG_CS_INACTIVE); - - return 0; -} - -static void spi_imx_cleanup(struct spi_device *spi) -{ -} - -static struct platform_device_id spi_imx_devtype[] = { - { - .name = "imx1-cspi", - .driver_data = SPI_IMX_VER_IMX1, - }, { - .name = "imx21-cspi", - .driver_data = SPI_IMX_VER_0_0, - }, { - .name = "imx25-cspi", - .driver_data = SPI_IMX_VER_0_7, - }, { - .name = "imx27-cspi", - .driver_data = SPI_IMX_VER_0_0, - }, { - .name = "imx31-cspi", - .driver_data = SPI_IMX_VER_0_4, - }, { - .name = "imx35-cspi", - .driver_data = SPI_IMX_VER_0_7, - }, { - .name = "imx51-cspi", - .driver_data = SPI_IMX_VER_0_7, - }, { - .name = "imx51-ecspi", - .driver_data = SPI_IMX_VER_2_3, - }, { - .name = "imx53-cspi", - .driver_data = SPI_IMX_VER_0_7, - }, { - .name = "imx53-ecspi", - .driver_data = SPI_IMX_VER_2_3, - }, { - /* sentinel */ - } -}; - -static int __devinit spi_imx_probe(struct platform_device *pdev) -{ - struct spi_imx_master *mxc_platform_info; - struct spi_master *master; - struct spi_imx_data *spi_imx; - struct resource *res; - int i, ret; - - mxc_platform_info = dev_get_platdata(&pdev->dev); - if (!mxc_platform_info) { - dev_err(&pdev->dev, "can't get the platform data\n"); - return -EINVAL; - } - - master = spi_alloc_master(&pdev->dev, sizeof(struct spi_imx_data)); - if (!master) - return -ENOMEM; - - platform_set_drvdata(pdev, master); - - master->bus_num = pdev->id; - master->num_chipselect = mxc_platform_info->num_chipselect; - - spi_imx = spi_master_get_devdata(master); - spi_imx->bitbang.master = spi_master_get(master); - spi_imx->chipselect = mxc_platform_info->chipselect; - - for (i = 0; i < master->num_chipselect; i++) { - if (spi_imx->chipselect[i] < 0) - continue; - ret = gpio_request(spi_imx->chipselect[i], DRIVER_NAME); - if (ret) { - while (i > 0) { - i--; - if (spi_imx->chipselect[i] >= 0) - gpio_free(spi_imx->chipselect[i]); - } - dev_err(&pdev->dev, "can't get cs gpios\n"); - goto out_master_put; - } - } - - spi_imx->bitbang.chipselect = spi_imx_chipselect; - spi_imx->bitbang.setup_transfer = spi_imx_setupxfer; - spi_imx->bitbang.txrx_bufs = spi_imx_transfer; - spi_imx->bitbang.master->setup = spi_imx_setup; - spi_imx->bitbang.master->cleanup = spi_imx_cleanup; - spi_imx->bitbang.master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; - - init_completion(&spi_imx->xfer_done); - - spi_imx->devtype_data = - spi_imx_devtype_data[pdev->id_entry->driver_data]; - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(&pdev->dev, "can't get platform resource\n"); - ret = -ENOMEM; - goto out_gpio_free; - } - - if (!request_mem_region(res->start, resource_size(res), pdev->name)) { - dev_err(&pdev->dev, "request_mem_region failed\n"); - ret = -EBUSY; - goto out_gpio_free; - } - - spi_imx->base = ioremap(res->start, resource_size(res)); - if (!spi_imx->base) { - ret = -EINVAL; - goto out_release_mem; - } - - spi_imx->irq = platform_get_irq(pdev, 0); - if (spi_imx->irq < 0) { - ret = -EINVAL; - goto out_iounmap; - } - - ret = request_irq(spi_imx->irq, spi_imx_isr, 0, DRIVER_NAME, spi_imx); - if (ret) { - dev_err(&pdev->dev, "can't get irq%d: %d\n", spi_imx->irq, ret); - goto out_iounmap; - } - - spi_imx->clk = clk_get(&pdev->dev, NULL); - if (IS_ERR(spi_imx->clk)) { - dev_err(&pdev->dev, "unable to get clock\n"); - ret = PTR_ERR(spi_imx->clk); - goto out_free_irq; - } - - clk_enable(spi_imx->clk); - spi_imx->spi_clk = clk_get_rate(spi_imx->clk); - - spi_imx->devtype_data.reset(spi_imx); - - spi_imx->devtype_data.intctrl(spi_imx, 0); - - ret = spi_bitbang_start(&spi_imx->bitbang); - if (ret) { - dev_err(&pdev->dev, "bitbang start failed with %d\n", ret); - goto out_clk_put; - } - - dev_info(&pdev->dev, "probed\n"); - - return ret; - -out_clk_put: - clk_disable(spi_imx->clk); - clk_put(spi_imx->clk); -out_free_irq: - free_irq(spi_imx->irq, spi_imx); -out_iounmap: - iounmap(spi_imx->base); -out_release_mem: - release_mem_region(res->start, resource_size(res)); -out_gpio_free: - for (i = 0; i < master->num_chipselect; i++) - if (spi_imx->chipselect[i] >= 0) - gpio_free(spi_imx->chipselect[i]); -out_master_put: - spi_master_put(master); - kfree(master); - platform_set_drvdata(pdev, NULL); - return ret; -} - -static int __devexit spi_imx_remove(struct platform_device *pdev) -{ - struct spi_master *master = platform_get_drvdata(pdev); - struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - struct spi_imx_data *spi_imx = spi_master_get_devdata(master); - int i; - - spi_bitbang_stop(&spi_imx->bitbang); - - writel(0, spi_imx->base + MXC_CSPICTRL); - clk_disable(spi_imx->clk); - clk_put(spi_imx->clk); - free_irq(spi_imx->irq, spi_imx); - iounmap(spi_imx->base); - - for (i = 0; i < master->num_chipselect; i++) - if (spi_imx->chipselect[i] >= 0) - gpio_free(spi_imx->chipselect[i]); - - spi_master_put(master); - - release_mem_region(res->start, resource_size(res)); - - platform_set_drvdata(pdev, NULL); - - return 0; -} - -static struct platform_driver spi_imx_driver = { - .driver = { - .name = DRIVER_NAME, - .owner = THIS_MODULE, - }, - .id_table = spi_imx_devtype, - .probe = spi_imx_probe, - .remove = __devexit_p(spi_imx_remove), -}; - -static int __init spi_imx_init(void) -{ - return platform_driver_register(&spi_imx_driver); -} - -static void __exit spi_imx_exit(void) -{ - platform_driver_unregister(&spi_imx_driver); -} - -module_init(spi_imx_init); -module_exit(spi_imx_exit); - -MODULE_DESCRIPTION("SPI Master Controller driver"); -MODULE_AUTHOR("Sascha Hauer, Pengutronix"); -MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi_lm70llp.c b/drivers/spi/spi_lm70llp.c deleted file mode 100644 index 7746a41..0000000 --- a/drivers/spi/spi_lm70llp.c +++ /dev/null @@ -1,351 +0,0 @@ -/* - * spi_lm70llp.c - driver for LM70EVAL-LLP board for the LM70 sensor - * - * Copyright (C) 2006 Kaiwan N Billimoria - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ - -#include -#include -#include -#include -#include -#include -#include -#include - - -#include -#include - - -/* - * The LM70 communicates with a host processor using a 3-wire variant of - * the SPI/Microwire bus interface. This driver specifically supports an - * NS LM70 LLP Evaluation Board, interfacing to a PC using its parallel - * port to bitbang an SPI-parport bridge. Accordingly, this is an SPI - * master controller driver. The hwmon/lm70 driver is a "SPI protocol - * driver", layered on top of this one and usable without the lm70llp. - * - * Datasheet and Schematic: - * The LM70 is a temperature sensor chip from National Semiconductor; its - * datasheet is available at http://www.national.com/pf/LM/LM70.html - * The schematic for this particular board (the LM70EVAL-LLP) is - * available (on page 4) here: - * http://www.national.com/appinfo/tempsensors/files/LM70LLPEVALmanual.pdf - * - * Also see Documentation/spi/spi-lm70llp. The SPI<->parport code here is - * (heavily) based on spi-butterfly by David Brownell. - * - * The LM70 LLP connects to the PC parallel port in the following manner: - * - * Parallel LM70 LLP - * Port Direction JP2 Header - * ----------- --------- ------------ - * D0 2 - - - * D1 3 --> V+ 5 - * D2 4 --> V+ 5 - * D3 5 --> V+ 5 - * D4 6 --> V+ 5 - * D5 7 --> nCS 8 - * D6 8 --> SCLK 3 - * D7 9 --> SI/O 5 - * GND 25 - GND 7 - * Select 13 <-- SI/O 1 - * - * Note that parport pin 13 actually gets inverted by the transistor - * arrangement which lets either the parport or the LM70 drive the - * SI/SO signal (see the schematic for details). - */ - -#define DRVNAME "spi-lm70llp" - -#define lm70_INIT 0xBE -#define SIO 0x10 -#define nCS 0x20 -#define SCLK 0x40 - -/*-------------------------------------------------------------------------*/ - -struct spi_lm70llp { - struct spi_bitbang bitbang; - struct parport *port; - struct pardevice *pd; - struct spi_device *spidev_lm70; - struct spi_board_info info; - //struct device *dev; -}; - -/* REVISIT : ugly global ; provides "exclusive open" facility */ -static struct spi_lm70llp *lm70llp; - - -/*-------------------------------------------------------------------*/ - -static inline struct spi_lm70llp *spidev_to_pp(struct spi_device *spi) -{ - return spi->controller_data; -} - -/*---------------------- LM70 LLP eval board-specific inlines follow */ - -/* NOTE: we don't actually need to reread the output values, since they'll - * still be what we wrote before. Plus, going through parport builds in - * a ~1ms/operation delay; these SPI transfers could easily be faster. - */ - -static inline void deassertCS(struct spi_lm70llp *pp) -{ - u8 data = parport_read_data(pp->port); - - data &= ~0x80; /* pull D7/SI-out low while de-asserted */ - parport_write_data(pp->port, data | nCS); -} - -static inline void assertCS(struct spi_lm70llp *pp) -{ - u8 data = parport_read_data(pp->port); - - data |= 0x80; /* pull D7/SI-out high so lm70 drives SO-in */ - parport_write_data(pp->port, data & ~nCS); -} - -static inline void clkHigh(struct spi_lm70llp *pp) -{ - u8 data = parport_read_data(pp->port); - parport_write_data(pp->port, data | SCLK); -} - -static inline void clkLow(struct spi_lm70llp *pp) -{ - u8 data = parport_read_data(pp->port); - parport_write_data(pp->port, data & ~SCLK); -} - -/*------------------------- SPI-LM70-specific inlines ----------------------*/ - -static inline void spidelay(unsigned d) -{ - udelay(d); -} - -static inline void setsck(struct spi_device *s, int is_on) -{ - struct spi_lm70llp *pp = spidev_to_pp(s); - - if (is_on) - clkHigh(pp); - else - clkLow(pp); -} - -static inline void setmosi(struct spi_device *s, int is_on) -{ - /* FIXME update D7 ... this way we can put the chip - * into shutdown mode and read the manufacturer ID, - * but we can't put it back into operational mode. - */ -} - -/* - * getmiso: - * Why do we return 0 when the SIO line is high and vice-versa? - * The fact is, the lm70 eval board from NS (which this driver drives), - * is wired in just such a way : when the lm70's SIO goes high, a transistor - * switches it to low reflecting this on the parport (pin 13), and vice-versa. - */ -static inline int getmiso(struct spi_device *s) -{ - struct spi_lm70llp *pp = spidev_to_pp(s); - return ((SIO == (parport_read_status(pp->port) & SIO)) ? 0 : 1 ); -} -/*--------------------------------------------------------------------*/ - -#include "spi_bitbang_txrx.h" - -static void lm70_chipselect(struct spi_device *spi, int value) -{ - struct spi_lm70llp *pp = spidev_to_pp(spi); - - if (value) - assertCS(pp); - else - deassertCS(pp); -} - -/* - * Our actual bitbanger routine. - */ -static u32 lm70_txrx(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits) -{ - return bitbang_txrx_be_cpha0(spi, nsecs, 0, 0, word, bits); -} - -static void spi_lm70llp_attach(struct parport *p) -{ - struct pardevice *pd; - struct spi_lm70llp *pp; - struct spi_master *master; - int status; - - if (lm70llp) { - printk(KERN_WARNING - "%s: spi_lm70llp instance already loaded. Aborting.\n", - DRVNAME); - return; - } - - /* TODO: this just _assumes_ a lm70 is there ... no probe; - * the lm70 driver could verify it, reading the manf ID. - */ - - master = spi_alloc_master(p->physport->dev, sizeof *pp); - if (!master) { - status = -ENOMEM; - goto out_fail; - } - pp = spi_master_get_devdata(master); - - master->bus_num = -1; /* dynamic alloc of a bus number */ - master->num_chipselect = 1; - - /* - * SPI and bitbang hookup. - */ - pp->bitbang.master = spi_master_get(master); - pp->bitbang.chipselect = lm70_chipselect; - pp->bitbang.txrx_word[SPI_MODE_0] = lm70_txrx; - pp->bitbang.flags = SPI_3WIRE; - - /* - * Parport hookup - */ - pp->port = p; - pd = parport_register_device(p, DRVNAME, - NULL, NULL, NULL, - PARPORT_FLAG_EXCL, pp); - if (!pd) { - status = -ENOMEM; - goto out_free_master; - } - pp->pd = pd; - - status = parport_claim(pd); - if (status < 0) - goto out_parport_unreg; - - /* - * Start SPI ... - */ - status = spi_bitbang_start(&pp->bitbang); - if (status < 0) { - printk(KERN_WARNING - "%s: spi_bitbang_start failed with status %d\n", - DRVNAME, status); - goto out_off_and_release; - } - - /* - * The modalias name MUST match the device_driver name - * for the bus glue code to match and subsequently bind them. - * We are binding to the generic drivers/hwmon/lm70.c device - * driver. - */ - strcpy(pp->info.modalias, "lm70"); - pp->info.max_speed_hz = 6 * 1000 * 1000; - pp->info.chip_select = 0; - pp->info.mode = SPI_3WIRE | SPI_MODE_0; - - /* power up the chip, and let the LM70 control SI/SO */ - parport_write_data(pp->port, lm70_INIT); - - /* Enable access to our primary data structure via - * the board info's (void *)controller_data. - */ - pp->info.controller_data = pp; - pp->spidev_lm70 = spi_new_device(pp->bitbang.master, &pp->info); - if (pp->spidev_lm70) - dev_dbg(&pp->spidev_lm70->dev, "spidev_lm70 at %s\n", - dev_name(&pp->spidev_lm70->dev)); - else { - printk(KERN_WARNING "%s: spi_new_device failed\n", DRVNAME); - status = -ENODEV; - goto out_bitbang_stop; - } - pp->spidev_lm70->bits_per_word = 8; - - lm70llp = pp; - return; - -out_bitbang_stop: - spi_bitbang_stop(&pp->bitbang); -out_off_and_release: - /* power down */ - parport_write_data(pp->port, 0); - mdelay(10); - parport_release(pp->pd); -out_parport_unreg: - parport_unregister_device(pd); -out_free_master: - (void) spi_master_put(master); -out_fail: - pr_info("%s: spi_lm70llp probe fail, status %d\n", DRVNAME, status); -} - -static void spi_lm70llp_detach(struct parport *p) -{ - struct spi_lm70llp *pp; - - if (!lm70llp || lm70llp->port != p) - return; - - pp = lm70llp; - spi_bitbang_stop(&pp->bitbang); - - /* power down */ - parport_write_data(pp->port, 0); - - parport_release(pp->pd); - parport_unregister_device(pp->pd); - - (void) spi_master_put(pp->bitbang.master); - - lm70llp = NULL; -} - - -static struct parport_driver spi_lm70llp_drv = { - .name = DRVNAME, - .attach = spi_lm70llp_attach, - .detach = spi_lm70llp_detach, -}; - -static int __init init_spi_lm70llp(void) -{ - return parport_register_driver(&spi_lm70llp_drv); -} -module_init(init_spi_lm70llp); - -static void __exit cleanup_spi_lm70llp(void) -{ - parport_unregister_driver(&spi_lm70llp_drv); -} -module_exit(cleanup_spi_lm70llp); - -MODULE_AUTHOR("Kaiwan N Billimoria "); -MODULE_DESCRIPTION( - "Parport adapter for the National Semiconductor LM70 LLP eval board"); -MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi_nuc900.c b/drivers/spi/spi_nuc900.c deleted file mode 100644 index 3cd15f6..0000000 --- a/drivers/spi/spi_nuc900.c +++ /dev/null @@ -1,505 +0,0 @@ -/* linux/drivers/spi/spi_nuc900.c - * - * Copyright (c) 2009 Nuvoton technology. - * Wan ZongShun - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * -*/ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include - -/* usi registers offset */ -#define USI_CNT 0x00 -#define USI_DIV 0x04 -#define USI_SSR 0x08 -#define USI_RX0 0x10 -#define USI_TX0 0x10 - -/* usi register bit */ -#define ENINT (0x01 << 17) -#define ENFLG (0x01 << 16) -#define TXNUM (0x03 << 8) -#define TXNEG (0x01 << 2) -#define RXNEG (0x01 << 1) -#define LSB (0x01 << 10) -#define SELECTLEV (0x01 << 2) -#define SELECTPOL (0x01 << 31) -#define SELECTSLAVE 0x01 -#define GOBUSY 0x01 - -struct nuc900_spi { - struct spi_bitbang bitbang; - struct completion done; - void __iomem *regs; - int irq; - int len; - int count; - const unsigned char *tx; - unsigned char *rx; - struct clk *clk; - struct resource *ioarea; - struct spi_master *master; - struct spi_device *curdev; - struct device *dev; - struct nuc900_spi_info *pdata; - spinlock_t lock; - struct resource *res; -}; - -static inline struct nuc900_spi *to_hw(struct spi_device *sdev) -{ - return spi_master_get_devdata(sdev->master); -} - -static void nuc900_slave_select(struct spi_device *spi, unsigned int ssr) -{ - struct nuc900_spi *hw = to_hw(spi); - unsigned int val; - unsigned int cs = spi->mode & SPI_CS_HIGH ? 1 : 0; - unsigned int cpol = spi->mode & SPI_CPOL ? 1 : 0; - unsigned long flags; - - spin_lock_irqsave(&hw->lock, flags); - - val = __raw_readl(hw->regs + USI_SSR); - - if (!cs) - val &= ~SELECTLEV; - else - val |= SELECTLEV; - - if (!ssr) - val &= ~SELECTSLAVE; - else - val |= SELECTSLAVE; - - __raw_writel(val, hw->regs + USI_SSR); - - val = __raw_readl(hw->regs + USI_CNT); - - if (!cpol) - val &= ~SELECTPOL; - else - val |= SELECTPOL; - - __raw_writel(val, hw->regs + USI_CNT); - - spin_unlock_irqrestore(&hw->lock, flags); -} - -static void nuc900_spi_chipsel(struct spi_device *spi, int value) -{ - switch (value) { - case BITBANG_CS_INACTIVE: - nuc900_slave_select(spi, 0); - break; - - case BITBANG_CS_ACTIVE: - nuc900_slave_select(spi, 1); - break; - } -} - -static void nuc900_spi_setup_txnum(struct nuc900_spi *hw, - unsigned int txnum) -{ - unsigned int val; - unsigned long flags; - - spin_lock_irqsave(&hw->lock, flags); - - val = __raw_readl(hw->regs + USI_CNT); - - if (!txnum) - val &= ~TXNUM; - else - val |= txnum << 0x08; - - __raw_writel(val, hw->regs + USI_CNT); - - spin_unlock_irqrestore(&hw->lock, flags); - -} - -static void nuc900_spi_setup_txbitlen(struct nuc900_spi *hw, - unsigned int txbitlen) -{ - unsigned int val; - unsigned long flags; - - spin_lock_irqsave(&hw->lock, flags); - - val = __raw_readl(hw->regs + USI_CNT); - - val |= (txbitlen << 0x03); - - __raw_writel(val, hw->regs + USI_CNT); - - spin_unlock_irqrestore(&hw->lock, flags); -} - -static void nuc900_spi_gobusy(struct nuc900_spi *hw) -{ - unsigned int val; - unsigned long flags; - - spin_lock_irqsave(&hw->lock, flags); - - val = __raw_readl(hw->regs + USI_CNT); - - val |= GOBUSY; - - __raw_writel(val, hw->regs + USI_CNT); - - spin_unlock_irqrestore(&hw->lock, flags); -} - -static int nuc900_spi_setupxfer(struct spi_device *spi, - struct spi_transfer *t) -{ - return 0; -} - -static int nuc900_spi_setup(struct spi_device *spi) -{ - return 0; -} - -static inline unsigned int hw_txbyte(struct nuc900_spi *hw, int count) -{ - return hw->tx ? hw->tx[count] : 0; -} - -static int nuc900_spi_txrx(struct spi_device *spi, struct spi_transfer *t) -{ - struct nuc900_spi *hw = to_hw(spi); - - hw->tx = t->tx_buf; - hw->rx = t->rx_buf; - hw->len = t->len; - hw->count = 0; - - __raw_writel(hw_txbyte(hw, 0x0), hw->regs + USI_TX0); - - nuc900_spi_gobusy(hw); - - wait_for_completion(&hw->done); - - return hw->count; -} - -static irqreturn_t nuc900_spi_irq(int irq, void *dev) -{ - struct nuc900_spi *hw = dev; - unsigned int status; - unsigned int count = hw->count; - - status = __raw_readl(hw->regs + USI_CNT); - __raw_writel(status, hw->regs + USI_CNT); - - if (status & ENFLG) { - hw->count++; - - if (hw->rx) - hw->rx[count] = __raw_readl(hw->regs + USI_RX0); - count++; - - if (count < hw->len) { - __raw_writel(hw_txbyte(hw, count), hw->regs + USI_TX0); - nuc900_spi_gobusy(hw); - } else { - complete(&hw->done); - } - - return IRQ_HANDLED; - } - - complete(&hw->done); - return IRQ_HANDLED; -} - -static void nuc900_tx_edge(struct nuc900_spi *hw, unsigned int edge) -{ - unsigned int val; - unsigned long flags; - - spin_lock_irqsave(&hw->lock, flags); - - val = __raw_readl(hw->regs + USI_CNT); - - if (edge) - val |= TXNEG; - else - val &= ~TXNEG; - __raw_writel(val, hw->regs + USI_CNT); - - spin_unlock_irqrestore(&hw->lock, flags); -} - -static void nuc900_rx_edge(struct nuc900_spi *hw, unsigned int edge) -{ - unsigned int val; - unsigned long flags; - - spin_lock_irqsave(&hw->lock, flags); - - val = __raw_readl(hw->regs + USI_CNT); - - if (edge) - val |= RXNEG; - else - val &= ~RXNEG; - __raw_writel(val, hw->regs + USI_CNT); - - spin_unlock_irqrestore(&hw->lock, flags); -} - -static void nuc900_send_first(struct nuc900_spi *hw, unsigned int lsb) -{ - unsigned int val; - unsigned long flags; - - spin_lock_irqsave(&hw->lock, flags); - - val = __raw_readl(hw->regs + USI_CNT); - - if (lsb) - val |= LSB; - else - val &= ~LSB; - __raw_writel(val, hw->regs + USI_CNT); - - spin_unlock_irqrestore(&hw->lock, flags); -} - -static void nuc900_set_sleep(struct nuc900_spi *hw, unsigned int sleep) -{ - unsigned int val; - unsigned long flags; - - spin_lock_irqsave(&hw->lock, flags); - - val = __raw_readl(hw->regs + USI_CNT); - - if (sleep) - val |= (sleep << 12); - else - val &= ~(0x0f << 12); - __raw_writel(val, hw->regs + USI_CNT); - - spin_unlock_irqrestore(&hw->lock, flags); -} - -static void nuc900_enable_int(struct nuc900_spi *hw) -{ - unsigned int val; - unsigned long flags; - - spin_lock_irqsave(&hw->lock, flags); - - val = __raw_readl(hw->regs + USI_CNT); - - val |= ENINT; - - __raw_writel(val, hw->regs + USI_CNT); - - spin_unlock_irqrestore(&hw->lock, flags); -} - -static void nuc900_set_divider(struct nuc900_spi *hw) -{ - __raw_writel(hw->pdata->divider, hw->regs + USI_DIV); -} - -static void nuc900_init_spi(struct nuc900_spi *hw) -{ - clk_enable(hw->clk); - spin_lock_init(&hw->lock); - - nuc900_tx_edge(hw, hw->pdata->txneg); - nuc900_rx_edge(hw, hw->pdata->rxneg); - nuc900_send_first(hw, hw->pdata->lsb); - nuc900_set_sleep(hw, hw->pdata->sleep); - nuc900_spi_setup_txbitlen(hw, hw->pdata->txbitlen); - nuc900_spi_setup_txnum(hw, hw->pdata->txnum); - nuc900_set_divider(hw); - nuc900_enable_int(hw); -} - -static int __devinit nuc900_spi_probe(struct platform_device *pdev) -{ - struct nuc900_spi *hw; - struct spi_master *master; - int err = 0; - - master = spi_alloc_master(&pdev->dev, sizeof(struct nuc900_spi)); - if (master == NULL) { - dev_err(&pdev->dev, "No memory for spi_master\n"); - err = -ENOMEM; - goto err_nomem; - } - - hw = spi_master_get_devdata(master); - memset(hw, 0, sizeof(struct nuc900_spi)); - - hw->master = spi_master_get(master); - hw->pdata = pdev->dev.platform_data; - hw->dev = &pdev->dev; - - if (hw->pdata == NULL) { - dev_err(&pdev->dev, "No platform data supplied\n"); - err = -ENOENT; - goto err_pdata; - } - - platform_set_drvdata(pdev, hw); - init_completion(&hw->done); - - master->mode_bits = SPI_MODE_0; - master->num_chipselect = hw->pdata->num_cs; - master->bus_num = hw->pdata->bus_num; - hw->bitbang.master = hw->master; - hw->bitbang.setup_transfer = nuc900_spi_setupxfer; - hw->bitbang.chipselect = nuc900_spi_chipsel; - hw->bitbang.txrx_bufs = nuc900_spi_txrx; - hw->bitbang.master->setup = nuc900_spi_setup; - - hw->res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (hw->res == NULL) { - dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n"); - err = -ENOENT; - goto err_pdata; - } - - hw->ioarea = request_mem_region(hw->res->start, - resource_size(hw->res), pdev->name); - - if (hw->ioarea == NULL) { - dev_err(&pdev->dev, "Cannot reserve region\n"); - err = -ENXIO; - goto err_pdata; - } - - hw->regs = ioremap(hw->res->start, resource_size(hw->res)); - if (hw->regs == NULL) { - dev_err(&pdev->dev, "Cannot map IO\n"); - err = -ENXIO; - goto err_iomap; - } - - hw->irq = platform_get_irq(pdev, 0); - if (hw->irq < 0) { - dev_err(&pdev->dev, "No IRQ specified\n"); - err = -ENOENT; - goto err_irq; - } - - err = request_irq(hw->irq, nuc900_spi_irq, 0, pdev->name, hw); - if (err) { - dev_err(&pdev->dev, "Cannot claim IRQ\n"); - goto err_irq; - } - - hw->clk = clk_get(&pdev->dev, "spi"); - if (IS_ERR(hw->clk)) { - dev_err(&pdev->dev, "No clock for device\n"); - err = PTR_ERR(hw->clk); - goto err_clk; - } - - mfp_set_groupg(&pdev->dev); - nuc900_init_spi(hw); - - err = spi_bitbang_start(&hw->bitbang); - if (err) { - dev_err(&pdev->dev, "Failed to register SPI master\n"); - goto err_register; - } - - return 0; - -err_register: - clk_disable(hw->clk); - clk_put(hw->clk); -err_clk: - free_irq(hw->irq, hw); -err_irq: - iounmap(hw->regs); -err_iomap: - release_mem_region(hw->res->start, resource_size(hw->res)); - kfree(hw->ioarea); -err_pdata: - spi_master_put(hw->master); - -err_nomem: - return err; -} - -static int __devexit nuc900_spi_remove(struct platform_device *dev) -{ - struct nuc900_spi *hw = platform_get_drvdata(dev); - - free_irq(hw->irq, hw); - - platform_set_drvdata(dev, NULL); - - spi_bitbang_stop(&hw->bitbang); - - clk_disable(hw->clk); - clk_put(hw->clk); - - iounmap(hw->regs); - - release_mem_region(hw->res->start, resource_size(hw->res)); - kfree(hw->ioarea); - - spi_master_put(hw->master); - return 0; -} - -static struct platform_driver nuc900_spi_driver = { - .probe = nuc900_spi_probe, - .remove = __devexit_p(nuc900_spi_remove), - .driver = { - .name = "nuc900-spi", - .owner = THIS_MODULE, - }, -}; - -static int __init nuc900_spi_init(void) -{ - return platform_driver_register(&nuc900_spi_driver); -} - -static void __exit nuc900_spi_exit(void) -{ - platform_driver_unregister(&nuc900_spi_driver); -} - -module_init(nuc900_spi_init); -module_exit(nuc900_spi_exit); - -MODULE_AUTHOR("Wan ZongShun "); -MODULE_DESCRIPTION("nuc900 spi driver!"); -MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:nuc900-spi"); diff --git a/drivers/spi/spi_oc_tiny.c b/drivers/spi/spi_oc_tiny.c deleted file mode 100644 index f1bde66..0000000 --- a/drivers/spi/spi_oc_tiny.c +++ /dev/null @@ -1,425 +0,0 @@ -/* - * OpenCores tiny SPI master driver - * - * http://opencores.org/project,tiny_spi - * - * Copyright (C) 2011 Thomas Chou - * - * Based on spi_s3c24xx.c, which is: - * Copyright (c) 2006 Ben Dooks - * Copyright (c) 2006 Simtec Electronics - * Ben Dooks - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define DRV_NAME "spi_oc_tiny" - -#define TINY_SPI_RXDATA 0 -#define TINY_SPI_TXDATA 4 -#define TINY_SPI_STATUS 8 -#define TINY_SPI_CONTROL 12 -#define TINY_SPI_BAUD 16 - -#define TINY_SPI_STATUS_TXE 0x1 -#define TINY_SPI_STATUS_TXR 0x2 - -struct tiny_spi { - /* bitbang has to be first */ - struct spi_bitbang bitbang; - struct completion done; - - void __iomem *base; - int irq; - unsigned int freq; - unsigned int baudwidth; - unsigned int baud; - unsigned int speed_hz; - unsigned int mode; - unsigned int len; - unsigned int txc, rxc; - const u8 *txp; - u8 *rxp; - unsigned int gpio_cs_count; - int *gpio_cs; -}; - -static inline struct tiny_spi *tiny_spi_to_hw(struct spi_device *sdev) -{ - return spi_master_get_devdata(sdev->master); -} - -static unsigned int tiny_spi_baud(struct spi_device *spi, unsigned int hz) -{ - struct tiny_spi *hw = tiny_spi_to_hw(spi); - - return min(DIV_ROUND_UP(hw->freq, hz * 2), (1U << hw->baudwidth)) - 1; -} - -static void tiny_spi_chipselect(struct spi_device *spi, int is_active) -{ - struct tiny_spi *hw = tiny_spi_to_hw(spi); - - if (hw->gpio_cs_count) { - gpio_set_value(hw->gpio_cs[spi->chip_select], - (spi->mode & SPI_CS_HIGH) ? is_active : !is_active); - } -} - -static int tiny_spi_setup_transfer(struct spi_device *spi, - struct spi_transfer *t) -{ - struct tiny_spi *hw = tiny_spi_to_hw(spi); - unsigned int baud = hw->baud; - - if (t) { - if (t->speed_hz && t->speed_hz != hw->speed_hz) - baud = tiny_spi_baud(spi, t->speed_hz); - } - writel(baud, hw->base + TINY_SPI_BAUD); - writel(hw->mode, hw->base + TINY_SPI_CONTROL); - return 0; -} - -static int tiny_spi_setup(struct spi_device *spi) -{ - struct tiny_spi *hw = tiny_spi_to_hw(spi); - - if (spi->max_speed_hz != hw->speed_hz) { - hw->speed_hz = spi->max_speed_hz; - hw->baud = tiny_spi_baud(spi, hw->speed_hz); - } - hw->mode = spi->mode & (SPI_CPOL | SPI_CPHA); - return 0; -} - -static inline void tiny_spi_wait_txr(struct tiny_spi *hw) -{ - while (!(readb(hw->base + TINY_SPI_STATUS) & - TINY_SPI_STATUS_TXR)) - cpu_relax(); -} - -static inline void tiny_spi_wait_txe(struct tiny_spi *hw) -{ - while (!(readb(hw->base + TINY_SPI_STATUS) & - TINY_SPI_STATUS_TXE)) - cpu_relax(); -} - -static int tiny_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t) -{ - struct tiny_spi *hw = tiny_spi_to_hw(spi); - const u8 *txp = t->tx_buf; - u8 *rxp = t->rx_buf; - unsigned int i; - - if (hw->irq >= 0) { - /* use intrrupt driven data transfer */ - hw->len = t->len; - hw->txp = t->tx_buf; - hw->rxp = t->rx_buf; - hw->txc = 0; - hw->rxc = 0; - - /* send the first byte */ - if (t->len > 1) { - writeb(hw->txp ? *hw->txp++ : 0, - hw->base + TINY_SPI_TXDATA); - hw->txc++; - writeb(hw->txp ? *hw->txp++ : 0, - hw->base + TINY_SPI_TXDATA); - hw->txc++; - writeb(TINY_SPI_STATUS_TXR, hw->base + TINY_SPI_STATUS); - } else { - writeb(hw->txp ? *hw->txp++ : 0, - hw->base + TINY_SPI_TXDATA); - hw->txc++; - writeb(TINY_SPI_STATUS_TXE, hw->base + TINY_SPI_STATUS); - } - - wait_for_completion(&hw->done); - } else if (txp && rxp) { - /* we need to tighten the transfer loop */ - writeb(*txp++, hw->base + TINY_SPI_TXDATA); - if (t->len > 1) { - writeb(*txp++, hw->base + TINY_SPI_TXDATA); - for (i = 2; i < t->len; i++) { - u8 rx, tx = *txp++; - tiny_spi_wait_txr(hw); - rx = readb(hw->base + TINY_SPI_TXDATA); - writeb(tx, hw->base + TINY_SPI_TXDATA); - *rxp++ = rx; - } - tiny_spi_wait_txr(hw); - *rxp++ = readb(hw->base + TINY_SPI_TXDATA); - } - tiny_spi_wait_txe(hw); - *rxp++ = readb(hw->base + TINY_SPI_RXDATA); - } else if (rxp) { - writeb(0, hw->base + TINY_SPI_TXDATA); - if (t->len > 1) { - writeb(0, - hw->base + TINY_SPI_TXDATA); - for (i = 2; i < t->len; i++) { - u8 rx; - tiny_spi_wait_txr(hw); - rx = readb(hw->base + TINY_SPI_TXDATA); - writeb(0, hw->base + TINY_SPI_TXDATA); - *rxp++ = rx; - } - tiny_spi_wait_txr(hw); - *rxp++ = readb(hw->base + TINY_SPI_TXDATA); - } - tiny_spi_wait_txe(hw); - *rxp++ = readb(hw->base + TINY_SPI_RXDATA); - } else if (txp) { - writeb(*txp++, hw->base + TINY_SPI_TXDATA); - if (t->len > 1) { - writeb(*txp++, hw->base + TINY_SPI_TXDATA); - for (i = 2; i < t->len; i++) { - u8 tx = *txp++; - tiny_spi_wait_txr(hw); - writeb(tx, hw->base + TINY_SPI_TXDATA); - } - } - tiny_spi_wait_txe(hw); - } else { - writeb(0, hw->base + TINY_SPI_TXDATA); - if (t->len > 1) { - writeb(0, hw->base + TINY_SPI_TXDATA); - for (i = 2; i < t->len; i++) { - tiny_spi_wait_txr(hw); - writeb(0, hw->base + TINY_SPI_TXDATA); - } - } - tiny_spi_wait_txe(hw); - } - return t->len; -} - -static irqreturn_t tiny_spi_irq(int irq, void *dev) -{ - struct tiny_spi *hw = dev; - - writeb(0, hw->base + TINY_SPI_STATUS); - if (hw->rxc + 1 == hw->len) { - if (hw->rxp) - *hw->rxp++ = readb(hw->base + TINY_SPI_RXDATA); - hw->rxc++; - complete(&hw->done); - } else { - if (hw->rxp) - *hw->rxp++ = readb(hw->base + TINY_SPI_TXDATA); - hw->rxc++; - if (hw->txc < hw->len) { - writeb(hw->txp ? *hw->txp++ : 0, - hw->base + TINY_SPI_TXDATA); - hw->txc++; - writeb(TINY_SPI_STATUS_TXR, - hw->base + TINY_SPI_STATUS); - } else { - writeb(TINY_SPI_STATUS_TXE, - hw->base + TINY_SPI_STATUS); - } - } - return IRQ_HANDLED; -} - -#ifdef CONFIG_OF -#include - -static int __devinit tiny_spi_of_probe(struct platform_device *pdev) -{ - struct tiny_spi *hw = platform_get_drvdata(pdev); - struct device_node *np = pdev->dev.of_node; - unsigned int i; - const __be32 *val; - int len; - - if (!np) - return 0; - hw->gpio_cs_count = of_gpio_count(np); - if (hw->gpio_cs_count) { - hw->gpio_cs = devm_kzalloc(&pdev->dev, - hw->gpio_cs_count * sizeof(unsigned int), - GFP_KERNEL); - if (!hw->gpio_cs) - return -ENOMEM; - } - for (i = 0; i < hw->gpio_cs_count; i++) { - hw->gpio_cs[i] = of_get_gpio_flags(np, i, NULL); - if (hw->gpio_cs[i] < 0) - return -ENODEV; - } - hw->bitbang.master->dev.of_node = pdev->dev.of_node; - val = of_get_property(pdev->dev.of_node, - "clock-frequency", &len); - if (val && len >= sizeof(__be32)) - hw->freq = be32_to_cpup(val); - val = of_get_property(pdev->dev.of_node, "baud-width", &len); - if (val && len >= sizeof(__be32)) - hw->baudwidth = be32_to_cpup(val); - return 0; -} -#else /* !CONFIG_OF */ -static int __devinit tiny_spi_of_probe(struct platform_device *pdev) -{ - return 0; -} -#endif /* CONFIG_OF */ - -static int __devinit tiny_spi_probe(struct platform_device *pdev) -{ - struct tiny_spi_platform_data *platp = pdev->dev.platform_data; - struct tiny_spi *hw; - struct spi_master *master; - struct resource *res; - unsigned int i; - int err = -ENODEV; - - master = spi_alloc_master(&pdev->dev, sizeof(struct tiny_spi)); - if (!master) - return err; - - /* setup the master state. */ - master->bus_num = pdev->id; - master->num_chipselect = 255; - master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; - master->setup = tiny_spi_setup; - - hw = spi_master_get_devdata(master); - platform_set_drvdata(pdev, hw); - - /* setup the state for the bitbang driver */ - hw->bitbang.master = spi_master_get(master); - if (!hw->bitbang.master) - return err; - hw->bitbang.setup_transfer = tiny_spi_setup_transfer; - hw->bitbang.chipselect = tiny_spi_chipselect; - hw->bitbang.txrx_bufs = tiny_spi_txrx_bufs; - - /* find and map our resources */ - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) - goto exit_busy; - if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res), - pdev->name)) - goto exit_busy; - hw->base = devm_ioremap_nocache(&pdev->dev, res->start, - resource_size(res)); - if (!hw->base) - goto exit_busy; - /* irq is optional */ - hw->irq = platform_get_irq(pdev, 0); - if (hw->irq >= 0) { - init_completion(&hw->done); - err = devm_request_irq(&pdev->dev, hw->irq, tiny_spi_irq, 0, - pdev->name, hw); - if (err) - goto exit; - } - /* find platform data */ - if (platp) { - hw->gpio_cs_count = platp->gpio_cs_count; - hw->gpio_cs = platp->gpio_cs; - if (platp->gpio_cs_count && !platp->gpio_cs) - goto exit_busy; - hw->freq = platp->freq; - hw->baudwidth = platp->baudwidth; - } else { - err = tiny_spi_of_probe(pdev); - if (err) - goto exit; - } - for (i = 0; i < hw->gpio_cs_count; i++) { - err = gpio_request(hw->gpio_cs[i], dev_name(&pdev->dev)); - if (err) - goto exit_gpio; - gpio_direction_output(hw->gpio_cs[i], 1); - } - hw->bitbang.master->num_chipselect = max(1U, hw->gpio_cs_count); - - /* register our spi controller */ - err = spi_bitbang_start(&hw->bitbang); - if (err) - goto exit; - dev_info(&pdev->dev, "base %p, irq %d\n", hw->base, hw->irq); - - return 0; - -exit_gpio: - while (i-- > 0) - gpio_free(hw->gpio_cs[i]); -exit_busy: - err = -EBUSY; -exit: - platform_set_drvdata(pdev, NULL); - spi_master_put(master); - return err; -} - -static int __devexit tiny_spi_remove(struct platform_device *pdev) -{ - struct tiny_spi *hw = platform_get_drvdata(pdev); - struct spi_master *master = hw->bitbang.master; - unsigned int i; - - spi_bitbang_stop(&hw->bitbang); - for (i = 0; i < hw->gpio_cs_count; i++) - gpio_free(hw->gpio_cs[i]); - platform_set_drvdata(pdev, NULL); - spi_master_put(master); - return 0; -} - -#ifdef CONFIG_OF -static const struct of_device_id tiny_spi_match[] = { - { .compatible = "opencores,tiny-spi-rtlsvn2", }, - {}, -}; -MODULE_DEVICE_TABLE(of, tiny_spi_match); -#else /* CONFIG_OF */ -#define tiny_spi_match NULL -#endif /* CONFIG_OF */ - -static struct platform_driver tiny_spi_driver = { - .probe = tiny_spi_probe, - .remove = __devexit_p(tiny_spi_remove), - .driver = { - .name = DRV_NAME, - .owner = THIS_MODULE, - .pm = NULL, - .of_match_table = tiny_spi_match, - }, -}; - -static int __init tiny_spi_init(void) -{ - return platform_driver_register(&tiny_spi_driver); -} -module_init(tiny_spi_init); - -static void __exit tiny_spi_exit(void) -{ - platform_driver_unregister(&tiny_spi_driver); -} -module_exit(tiny_spi_exit); - -MODULE_DESCRIPTION("OpenCores tiny SPI driver"); -MODULE_AUTHOR("Thomas Chou "); -MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:" DRV_NAME); diff --git a/drivers/spi/spi_ppc4xx.c b/drivers/spi/spi_ppc4xx.c deleted file mode 100644 index 2a298c0..0000000 --- a/drivers/spi/spi_ppc4xx.c +++ /dev/null @@ -1,612 +0,0 @@ -/* - * SPI_PPC4XX SPI controller driver. - * - * Copyright (C) 2007 Gary Jennejohn - * Copyright 2008 Stefan Roese , DENX Software Engineering - * Copyright 2009 Harris Corporation, Steven A. Falco - * - * Based in part on drivers/spi/spi_s3c24xx.c - * - * Copyright (c) 2006 Ben Dooks - * Copyright (c) 2006 Simtec Electronics - * Ben Dooks - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - */ - -/* - * The PPC4xx SPI controller has no FIFO so each sent/received byte will - * generate an interrupt to the CPU. This can cause high CPU utilization. - * This driver allows platforms to reduce the interrupt load on the CPU - * during SPI transfers by setting max_speed_hz via the device tree. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#include -#include -#include - -/* bits in mode register - bit 0 is MSb */ - -/* - * SPI_PPC4XX_MODE_SCP = 0 means "data latched on trailing edge of clock" - * SPI_PPC4XX_MODE_SCP = 1 means "data latched on leading edge of clock" - * Note: This is the inverse of CPHA. - */ -#define SPI_PPC4XX_MODE_SCP (0x80 >> 3) - -/* SPI_PPC4XX_MODE_SPE = 1 means "port enabled" */ -#define SPI_PPC4XX_MODE_SPE (0x80 >> 4) - -/* - * SPI_PPC4XX_MODE_RD = 0 means "MSB first" - this is the normal mode - * SPI_PPC4XX_MODE_RD = 1 means "LSB first" - this is bit-reversed mode - * Note: This is identical to SPI_LSB_FIRST. - */ -#define SPI_PPC4XX_MODE_RD (0x80 >> 5) - -/* - * SPI_PPC4XX_MODE_CI = 0 means "clock idles low" - * SPI_PPC4XX_MODE_CI = 1 means "clock idles high" - * Note: This is identical to CPOL. - */ -#define SPI_PPC4XX_MODE_CI (0x80 >> 6) - -/* - * SPI_PPC4XX_MODE_IL = 0 means "loopback disable" - * SPI_PPC4XX_MODE_IL = 1 means "loopback enable" - */ -#define SPI_PPC4XX_MODE_IL (0x80 >> 7) - -/* bits in control register */ -/* starts a transfer when set */ -#define SPI_PPC4XX_CR_STR (0x80 >> 7) - -/* bits in status register */ -/* port is busy with a transfer */ -#define SPI_PPC4XX_SR_BSY (0x80 >> 6) -/* RxD ready */ -#define SPI_PPC4XX_SR_RBR (0x80 >> 7) - -/* clock settings (SCP and CI) for various SPI modes */ -#define SPI_CLK_MODE0 (SPI_PPC4XX_MODE_SCP | 0) -#define SPI_CLK_MODE1 (0 | 0) -#define SPI_CLK_MODE2 (SPI_PPC4XX_MODE_SCP | SPI_PPC4XX_MODE_CI) -#define SPI_CLK_MODE3 (0 | SPI_PPC4XX_MODE_CI) - -#define DRIVER_NAME "spi_ppc4xx_of" - -struct spi_ppc4xx_regs { - u8 mode; - u8 rxd; - u8 txd; - u8 cr; - u8 sr; - u8 dummy; - /* - * Clock divisor modulus register - * This uses the follwing formula: - * SCPClkOut = OPBCLK/(4(CDM + 1)) - * or - * CDM = (OPBCLK/4*SCPClkOut) - 1 - * bit 0 is the MSb! - */ - u8 cdm; -}; - -/* SPI Controller driver's private data. */ -struct ppc4xx_spi { - /* bitbang has to be first */ - struct spi_bitbang bitbang; - struct completion done; - - u64 mapbase; - u64 mapsize; - int irqnum; - /* need this to set the SPI clock */ - unsigned int opb_freq; - - /* for transfers */ - int len; - int count; - /* data buffers */ - const unsigned char *tx; - unsigned char *rx; - - int *gpios; - - struct spi_ppc4xx_regs __iomem *regs; /* pointer to the registers */ - struct spi_master *master; - struct device *dev; -}; - -/* need this so we can set the clock in the chipselect routine */ -struct spi_ppc4xx_cs { - u8 mode; -}; - -static int spi_ppc4xx_txrx(struct spi_device *spi, struct spi_transfer *t) -{ - struct ppc4xx_spi *hw; - u8 data; - - dev_dbg(&spi->dev, "txrx: tx %p, rx %p, len %d\n", - t->tx_buf, t->rx_buf, t->len); - - hw = spi_master_get_devdata(spi->master); - - hw->tx = t->tx_buf; - hw->rx = t->rx_buf; - hw->len = t->len; - hw->count = 0; - - /* send the first byte */ - data = hw->tx ? hw->tx[0] : 0; - out_8(&hw->regs->txd, data); - out_8(&hw->regs->cr, SPI_PPC4XX_CR_STR); - wait_for_completion(&hw->done); - - return hw->count; -} - -static int spi_ppc4xx_setupxfer(struct spi_device *spi, struct spi_transfer *t) -{ - struct ppc4xx_spi *hw = spi_master_get_devdata(spi->master); - struct spi_ppc4xx_cs *cs = spi->controller_state; - int scr; - u8 cdm = 0; - u32 speed; - u8 bits_per_word; - - /* Start with the generic configuration for this device. */ - bits_per_word = spi->bits_per_word; - speed = spi->max_speed_hz; - - /* - * Modify the configuration if the transfer overrides it. Do not allow - * the transfer to overwrite the generic configuration with zeros. - */ - if (t) { - if (t->bits_per_word) - bits_per_word = t->bits_per_word; - - if (t->speed_hz) - speed = min(t->speed_hz, spi->max_speed_hz); - } - - if (bits_per_word != 8) { - dev_err(&spi->dev, "invalid bits-per-word (%d)\n", - bits_per_word); - return -EINVAL; - } - - if (!speed || (speed > spi->max_speed_hz)) { - dev_err(&spi->dev, "invalid speed_hz (%d)\n", speed); - return -EINVAL; - } - - /* Write new configration */ - out_8(&hw->regs->mode, cs->mode); - - /* Set the clock */ - /* opb_freq was already divided by 4 */ - scr = (hw->opb_freq / speed) - 1; - if (scr > 0) - cdm = min(scr, 0xff); - - dev_dbg(&spi->dev, "setting pre-scaler to %d (hz %d)\n", cdm, speed); - - if (in_8(&hw->regs->cdm) != cdm) - out_8(&hw->regs->cdm, cdm); - - spin_lock(&hw->bitbang.lock); - if (!hw->bitbang.busy) { - hw->bitbang.chipselect(spi, BITBANG_CS_INACTIVE); - /* Need to ndelay here? */ - } - spin_unlock(&hw->bitbang.lock); - - return 0; -} - -static int spi_ppc4xx_setup(struct spi_device *spi) -{ - struct spi_ppc4xx_cs *cs = spi->controller_state; - - if (spi->bits_per_word != 8) { - dev_err(&spi->dev, "invalid bits-per-word (%d)\n", - spi->bits_per_word); - return -EINVAL; - } - - if (!spi->max_speed_hz) { - dev_err(&spi->dev, "invalid max_speed_hz (must be non-zero)\n"); - return -EINVAL; - } - - if (cs == NULL) { - cs = kzalloc(sizeof *cs, GFP_KERNEL); - if (!cs) - return -ENOMEM; - spi->controller_state = cs; - } - - /* - * We set all bits of the SPI0_MODE register, so, - * no need to read-modify-write - */ - cs->mode = SPI_PPC4XX_MODE_SPE; - - switch (spi->mode & (SPI_CPHA | SPI_CPOL)) { - case SPI_MODE_0: - cs->mode |= SPI_CLK_MODE0; - break; - case SPI_MODE_1: - cs->mode |= SPI_CLK_MODE1; - break; - case SPI_MODE_2: - cs->mode |= SPI_CLK_MODE2; - break; - case SPI_MODE_3: - cs->mode |= SPI_CLK_MODE3; - break; - } - - if (spi->mode & SPI_LSB_FIRST) - cs->mode |= SPI_PPC4XX_MODE_RD; - - return 0; -} - -static void spi_ppc4xx_chipsel(struct spi_device *spi, int value) -{ - struct ppc4xx_spi *hw = spi_master_get_devdata(spi->master); - unsigned int cs = spi->chip_select; - unsigned int cspol; - - /* - * If there are no chip selects at all, or if this is the special - * case of a non-existent (dummy) chip select, do nothing. - */ - - if (!hw->master->num_chipselect || hw->gpios[cs] == -EEXIST) - return; - - cspol = spi->mode & SPI_CS_HIGH ? 1 : 0; - if (value == BITBANG_CS_INACTIVE) - cspol = !cspol; - - gpio_set_value(hw->gpios[cs], cspol); -} - -static irqreturn_t spi_ppc4xx_int(int irq, void *dev_id) -{ - struct ppc4xx_spi *hw; - u8 status; - u8 data; - unsigned int count; - - hw = (struct ppc4xx_spi *)dev_id; - - status = in_8(&hw->regs->sr); - if (!status) - return IRQ_NONE; - - /* - * BSY de-asserts one cycle after the transfer is complete. The - * interrupt is asserted after the transfer is complete. The exact - * relationship is not documented, hence this code. - */ - - if (unlikely(status & SPI_PPC4XX_SR_BSY)) { - u8 lstatus; - int cnt = 0; - - dev_dbg(hw->dev, "got interrupt but spi still busy?\n"); - do { - ndelay(10); - lstatus = in_8(&hw->regs->sr); - } while (++cnt < 100 && lstatus & SPI_PPC4XX_SR_BSY); - - if (cnt >= 100) { - dev_err(hw->dev, "busywait: too many loops!\n"); - complete(&hw->done); - return IRQ_HANDLED; - } else { - /* status is always 1 (RBR) here */ - status = in_8(&hw->regs->sr); - dev_dbg(hw->dev, "loops %d status %x\n", cnt, status); - } - } - - count = hw->count; - hw->count++; - - /* RBR triggered this interrupt. Therefore, data must be ready. */ - data = in_8(&hw->regs->rxd); - if (hw->rx) - hw->rx[count] = data; - - count++; - - if (count < hw->len) { - data = hw->tx ? hw->tx[count] : 0; - out_8(&hw->regs->txd, data); - out_8(&hw->regs->cr, SPI_PPC4XX_CR_STR); - } else { - complete(&hw->done); - } - - return IRQ_HANDLED; -} - -static void spi_ppc4xx_cleanup(struct spi_device *spi) -{ - kfree(spi->controller_state); -} - -static void spi_ppc4xx_enable(struct ppc4xx_spi *hw) -{ - /* - * On all 4xx PPC's the SPI bus is shared/multiplexed with - * the 2nd I2C bus. We need to enable the the SPI bus before - * using it. - */ - - /* need to clear bit 14 to enable SPC */ - dcri_clrset(SDR0, SDR0_PFC1, 0x80000000 >> 14, 0); -} - -static void free_gpios(struct ppc4xx_spi *hw) -{ - if (hw->master->num_chipselect) { - int i; - for (i = 0; i < hw->master->num_chipselect; i++) - if (gpio_is_valid(hw->gpios[i])) - gpio_free(hw->gpios[i]); - - kfree(hw->gpios); - hw->gpios = NULL; - } -} - -/* - * platform_device layer stuff... - */ -static int __init spi_ppc4xx_of_probe(struct platform_device *op) -{ - struct ppc4xx_spi *hw; - struct spi_master *master; - struct spi_bitbang *bbp; - struct resource resource; - struct device_node *np = op->dev.of_node; - struct device *dev = &op->dev; - struct device_node *opbnp; - int ret; - int num_gpios; - const unsigned int *clk; - - master = spi_alloc_master(dev, sizeof *hw); - if (master == NULL) - return -ENOMEM; - master->dev.of_node = np; - dev_set_drvdata(dev, master); - hw = spi_master_get_devdata(master); - hw->master = spi_master_get(master); - hw->dev = dev; - - init_completion(&hw->done); - - /* - * A count of zero implies a single SPI device without any chip-select. - * Note that of_gpio_count counts all gpios assigned to this spi master. - * This includes both "null" gpio's and real ones. - */ - num_gpios = of_gpio_count(np); - if (num_gpios) { - int i; - - hw->gpios = kzalloc(sizeof(int) * num_gpios, GFP_KERNEL); - if (!hw->gpios) { - ret = -ENOMEM; - goto free_master; - } - - for (i = 0; i < num_gpios; i++) { - int gpio; - enum of_gpio_flags flags; - - gpio = of_get_gpio_flags(np, i, &flags); - hw->gpios[i] = gpio; - - if (gpio_is_valid(gpio)) { - /* Real CS - set the initial state. */ - ret = gpio_request(gpio, np->name); - if (ret < 0) { - dev_err(dev, "can't request gpio " - "#%d: %d\n", i, ret); - goto free_gpios; - } - - gpio_direction_output(gpio, - !!(flags & OF_GPIO_ACTIVE_LOW)); - } else if (gpio == -EEXIST) { - ; /* No CS, but that's OK. */ - } else { - dev_err(dev, "invalid gpio #%d: %d\n", i, gpio); - ret = -EINVAL; - goto free_gpios; - } - } - } - - /* Setup the state for the bitbang driver */ - bbp = &hw->bitbang; - bbp->master = hw->master; - bbp->setup_transfer = spi_ppc4xx_setupxfer; - bbp->chipselect = spi_ppc4xx_chipsel; - bbp->txrx_bufs = spi_ppc4xx_txrx; - bbp->use_dma = 0; - bbp->master->setup = spi_ppc4xx_setup; - bbp->master->cleanup = spi_ppc4xx_cleanup; - - /* Allocate bus num dynamically. */ - bbp->master->bus_num = -1; - - /* the spi->mode bits understood by this driver: */ - bbp->master->mode_bits = - SPI_CPHA | SPI_CPOL | SPI_CS_HIGH | SPI_LSB_FIRST; - - /* this many pins in all GPIO controllers */ - bbp->master->num_chipselect = num_gpios; - - /* Get the clock for the OPB */ - opbnp = of_find_compatible_node(NULL, NULL, "ibm,opb"); - if (opbnp == NULL) { - dev_err(dev, "OPB: cannot find node\n"); - ret = -ENODEV; - goto free_gpios; - } - /* Get the clock (Hz) for the OPB */ - clk = of_get_property(opbnp, "clock-frequency", NULL); - if (clk == NULL) { - dev_err(dev, "OPB: no clock-frequency property set\n"); - of_node_put(opbnp); - ret = -ENODEV; - goto free_gpios; - } - hw->opb_freq = *clk; - hw->opb_freq >>= 2; - of_node_put(opbnp); - - ret = of_address_to_resource(np, 0, &resource); - if (ret) { - dev_err(dev, "error while parsing device node resource\n"); - goto free_gpios; - } - hw->mapbase = resource.start; - hw->mapsize = resource.end - resource.start + 1; - - /* Sanity check */ - if (hw->mapsize < sizeof(struct spi_ppc4xx_regs)) { - dev_err(dev, "too small to map registers\n"); - ret = -EINVAL; - goto free_gpios; - } - - /* Request IRQ */ - hw->irqnum = irq_of_parse_and_map(np, 0); - ret = request_irq(hw->irqnum, spi_ppc4xx_int, - IRQF_DISABLED, "spi_ppc4xx_of", (void *)hw); - if (ret) { - dev_err(dev, "unable to allocate interrupt\n"); - goto free_gpios; - } - - if (!request_mem_region(hw->mapbase, hw->mapsize, DRIVER_NAME)) { - dev_err(dev, "resource unavailable\n"); - ret = -EBUSY; - goto request_mem_error; - } - - hw->regs = ioremap(hw->mapbase, sizeof(struct spi_ppc4xx_regs)); - - if (!hw->regs) { - dev_err(dev, "unable to memory map registers\n"); - ret = -ENXIO; - goto map_io_error; - } - - spi_ppc4xx_enable(hw); - - /* Finally register our spi controller */ - dev->dma_mask = 0; - ret = spi_bitbang_start(bbp); - if (ret) { - dev_err(dev, "failed to register SPI master\n"); - goto unmap_regs; - } - - dev_info(dev, "driver initialized\n"); - - return 0; - -unmap_regs: - iounmap(hw->regs); -map_io_error: - release_mem_region(hw->mapbase, hw->mapsize); -request_mem_error: - free_irq(hw->irqnum, hw); -free_gpios: - free_gpios(hw); -free_master: - dev_set_drvdata(dev, NULL); - spi_master_put(master); - - dev_err(dev, "initialization failed\n"); - return ret; -} - -static int __exit spi_ppc4xx_of_remove(struct platform_device *op) -{ - struct spi_master *master = dev_get_drvdata(&op->dev); - struct ppc4xx_spi *hw = spi_master_get_devdata(master); - - spi_bitbang_stop(&hw->bitbang); - dev_set_drvdata(&op->dev, NULL); - release_mem_region(hw->mapbase, hw->mapsize); - free_irq(hw->irqnum, hw); - iounmap(hw->regs); - free_gpios(hw); - return 0; -} - -static const struct of_device_id spi_ppc4xx_of_match[] = { - { .compatible = "ibm,ppc4xx-spi", }, - {}, -}; - -MODULE_DEVICE_TABLE(of, spi_ppc4xx_of_match); - -static struct platform_driver spi_ppc4xx_of_driver = { - .probe = spi_ppc4xx_of_probe, - .remove = __exit_p(spi_ppc4xx_of_remove), - .driver = { - .name = DRIVER_NAME, - .owner = THIS_MODULE, - .of_match_table = spi_ppc4xx_of_match, - }, -}; - -static int __init spi_ppc4xx_init(void) -{ - return platform_driver_register(&spi_ppc4xx_of_driver); -} -module_init(spi_ppc4xx_init); - -static void __exit spi_ppc4xx_exit(void) -{ - platform_driver_unregister(&spi_ppc4xx_of_driver); -} -module_exit(spi_ppc4xx_exit); - -MODULE_AUTHOR("Gary Jennejohn & Stefan Roese"); -MODULE_DESCRIPTION("Simple PPC4xx SPI Driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi_s3c24xx.c b/drivers/spi/spi_s3c24xx.c deleted file mode 100644 index 1a5fcab..0000000 --- a/drivers/spi/spi_s3c24xx.c +++ /dev/null @@ -1,746 +0,0 @@ -/* linux/drivers/spi/spi_s3c24xx.c - * - * Copyright (c) 2006 Ben Dooks - * Copyright 2006-2009 Simtec Electronics - * Ben Dooks - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * -*/ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include -#include - -#include -#include - -#include "spi_s3c24xx_fiq.h" - -/** - * s3c24xx_spi_devstate - per device data - * @hz: Last frequency calculated for @sppre field. - * @mode: Last mode setting for the @spcon field. - * @spcon: Value to write to the SPCON register. - * @sppre: Value to write to the SPPRE register. - */ -struct s3c24xx_spi_devstate { - unsigned int hz; - unsigned int mode; - u8 spcon; - u8 sppre; -}; - -enum spi_fiq_mode { - FIQ_MODE_NONE = 0, - FIQ_MODE_TX = 1, - FIQ_MODE_RX = 2, - FIQ_MODE_TXRX = 3, -}; - -struct s3c24xx_spi { - /* bitbang has to be first */ - struct spi_bitbang bitbang; - struct completion done; - - void __iomem *regs; - int irq; - int len; - int count; - - struct fiq_handler fiq_handler; - enum spi_fiq_mode fiq_mode; - unsigned char fiq_inuse; - unsigned char fiq_claimed; - - void (*set_cs)(struct s3c2410_spi_info *spi, - int cs, int pol); - - /* data buffers */ - const unsigned char *tx; - unsigned char *rx; - - struct clk *clk; - struct resource *ioarea; - struct spi_master *master; - struct spi_device *curdev; - struct device *dev; - struct s3c2410_spi_info *pdata; -}; - - -#define SPCON_DEFAULT (S3C2410_SPCON_MSTR | S3C2410_SPCON_SMOD_INT) -#define SPPIN_DEFAULT (S3C2410_SPPIN_KEEP) - -static inline struct s3c24xx_spi *to_hw(struct spi_device *sdev) -{ - return spi_master_get_devdata(sdev->master); -} - -static void s3c24xx_spi_gpiocs(struct s3c2410_spi_info *spi, int cs, int pol) -{ - gpio_set_value(spi->pin_cs, pol); -} - -static void s3c24xx_spi_chipsel(struct spi_device *spi, int value) -{ - struct s3c24xx_spi_devstate *cs = spi->controller_state; - struct s3c24xx_spi *hw = to_hw(spi); - unsigned int cspol = spi->mode & SPI_CS_HIGH ? 1 : 0; - - /* change the chipselect state and the state of the spi engine clock */ - - switch (value) { - case BITBANG_CS_INACTIVE: - hw->set_cs(hw->pdata, spi->chip_select, cspol^1); - writeb(cs->spcon, hw->regs + S3C2410_SPCON); - break; - - case BITBANG_CS_ACTIVE: - writeb(cs->spcon | S3C2410_SPCON_ENSCK, - hw->regs + S3C2410_SPCON); - hw->set_cs(hw->pdata, spi->chip_select, cspol); - break; - } -} - -static int s3c24xx_spi_update_state(struct spi_device *spi, - struct spi_transfer *t) -{ - struct s3c24xx_spi *hw = to_hw(spi); - struct s3c24xx_spi_devstate *cs = spi->controller_state; - unsigned int bpw; - unsigned int hz; - unsigned int div; - unsigned long clk; - - bpw = t ? t->bits_per_word : spi->bits_per_word; - hz = t ? t->speed_hz : spi->max_speed_hz; - - if (!bpw) - bpw = 8; - - if (!hz) - hz = spi->max_speed_hz; - - if (bpw != 8) { - dev_err(&spi->dev, "invalid bits-per-word (%d)\n", bpw); - return -EINVAL; - } - - if (spi->mode != cs->mode) { - u8 spcon = SPCON_DEFAULT | S3C2410_SPCON_ENSCK; - - if (spi->mode & SPI_CPHA) - spcon |= S3C2410_SPCON_CPHA_FMTB; - - if (spi->mode & SPI_CPOL) - spcon |= S3C2410_SPCON_CPOL_HIGH; - - cs->mode = spi->mode; - cs->spcon = spcon; - } - - if (cs->hz != hz) { - clk = clk_get_rate(hw->clk); - div = DIV_ROUND_UP(clk, hz * 2) - 1; - - if (div > 255) - div = 255; - - dev_dbg(&spi->dev, "pre-scaler=%d (wanted %d, got %ld)\n", - div, hz, clk / (2 * (div + 1))); - - cs->hz = hz; - cs->sppre = div; - } - - return 0; -} - -static int s3c24xx_spi_setupxfer(struct spi_device *spi, - struct spi_transfer *t) -{ - struct s3c24xx_spi_devstate *cs = spi->controller_state; - struct s3c24xx_spi *hw = to_hw(spi); - int ret; - - ret = s3c24xx_spi_update_state(spi, t); - if (!ret) - writeb(cs->sppre, hw->regs + S3C2410_SPPRE); - - return ret; -} - -static int s3c24xx_spi_setup(struct spi_device *spi) -{ - struct s3c24xx_spi_devstate *cs = spi->controller_state; - struct s3c24xx_spi *hw = to_hw(spi); - int ret; - - /* allocate settings on the first call */ - if (!cs) { - cs = kzalloc(sizeof(struct s3c24xx_spi_devstate), GFP_KERNEL); - if (!cs) { - dev_err(&spi->dev, "no memory for controller state\n"); - return -ENOMEM; - } - - cs->spcon = SPCON_DEFAULT; - cs->hz = -1; - spi->controller_state = cs; - } - - /* initialise the state from the device */ - ret = s3c24xx_spi_update_state(spi, NULL); - if (ret) - return ret; - - spin_lock(&hw->bitbang.lock); - if (!hw->bitbang.busy) { - hw->bitbang.chipselect(spi, BITBANG_CS_INACTIVE); - /* need to ndelay for 0.5 clocktick ? */ - } - spin_unlock(&hw->bitbang.lock); - - return 0; -} - -static void s3c24xx_spi_cleanup(struct spi_device *spi) -{ - kfree(spi->controller_state); -} - -static inline unsigned int hw_txbyte(struct s3c24xx_spi *hw, int count) -{ - return hw->tx ? hw->tx[count] : 0; -} - -#ifdef CONFIG_SPI_S3C24XX_FIQ -/* Support for FIQ based pseudo-DMA to improve the transfer speed. - * - * This code uses the assembly helper in spi_s3c24xx_spi.S which is - * used by the FIQ core to move data between main memory and the peripheral - * block. Since this is code running on the processor, there is no problem - * with cache coherency of the buffers, so we can use any buffer we like. - */ - -/** - * struct spi_fiq_code - FIQ code and header - * @length: The length of the code fragment, excluding this header. - * @ack_offset: The offset from @data to the word to place the IRQ ACK bit at. - * @data: The code itself to install as a FIQ handler. - */ -struct spi_fiq_code { - u32 length; - u32 ack_offset; - u8 data[0]; -}; - -extern struct spi_fiq_code s3c24xx_spi_fiq_txrx; -extern struct spi_fiq_code s3c24xx_spi_fiq_tx; -extern struct spi_fiq_code s3c24xx_spi_fiq_rx; - -/** - * ack_bit - turn IRQ into IRQ acknowledgement bit - * @irq: The interrupt number - * - * Returns the bit to write to the interrupt acknowledge register. - */ -static inline u32 ack_bit(unsigned int irq) -{ - return 1 << (irq - IRQ_EINT0); -} - -/** - * s3c24xx_spi_tryfiq - attempt to claim and setup FIQ for transfer - * @hw: The hardware state. - * - * Claim the FIQ handler (only one can be active at any one time) and - * then setup the correct transfer code for this transfer. - * - * This call updates all the necessary state information if successful, - * so the caller does not need to do anything more than start the transfer - * as normal, since the IRQ will have been re-routed to the FIQ handler. -*/ -void s3c24xx_spi_tryfiq(struct s3c24xx_spi *hw) -{ - struct pt_regs regs; - enum spi_fiq_mode mode; - struct spi_fiq_code *code; - int ret; - - if (!hw->fiq_claimed) { - /* try and claim fiq if we haven't got it, and if not - * then return and simply use another transfer method */ - - ret = claim_fiq(&hw->fiq_handler); - if (ret) - return; - } - - if (hw->tx && !hw->rx) - mode = FIQ_MODE_TX; - else if (hw->rx && !hw->tx) - mode = FIQ_MODE_RX; - else - mode = FIQ_MODE_TXRX; - - regs.uregs[fiq_rspi] = (long)hw->regs; - regs.uregs[fiq_rrx] = (long)hw->rx; - regs.uregs[fiq_rtx] = (long)hw->tx + 1; - regs.uregs[fiq_rcount] = hw->len - 1; - regs.uregs[fiq_rirq] = (long)S3C24XX_VA_IRQ; - - set_fiq_regs(®s); - - if (hw->fiq_mode != mode) { - u32 *ack_ptr; - - hw->fiq_mode = mode; - - switch (mode) { - case FIQ_MODE_TX: - code = &s3c24xx_spi_fiq_tx; - break; - case FIQ_MODE_RX: - code = &s3c24xx_spi_fiq_rx; - break; - case FIQ_MODE_TXRX: - code = &s3c24xx_spi_fiq_txrx; - break; - default: - code = NULL; - } - - BUG_ON(!code); - - ack_ptr = (u32 *)&code->data[code->ack_offset]; - *ack_ptr = ack_bit(hw->irq); - - set_fiq_handler(&code->data, code->length); - } - - s3c24xx_set_fiq(hw->irq, true); - - hw->fiq_mode = mode; - hw->fiq_inuse = 1; -} - -/** - * s3c24xx_spi_fiqop - FIQ core code callback - * @pw: Data registered with the handler - * @release: Whether this is a release or a return. - * - * Called by the FIQ code when another module wants to use the FIQ, so - * return whether we are currently using this or not and then update our - * internal state. - */ -static int s3c24xx_spi_fiqop(void *pw, int release) -{ - struct s3c24xx_spi *hw = pw; - int ret = 0; - - if (release) { - if (hw->fiq_inuse) - ret = -EBUSY; - - /* note, we do not need to unroute the FIQ, as the FIQ - * vector code de-routes it to signal the end of transfer */ - - hw->fiq_mode = FIQ_MODE_NONE; - hw->fiq_claimed = 0; - } else { - hw->fiq_claimed = 1; - } - - return ret; -} - -/** - * s3c24xx_spi_initfiq - setup the information for the FIQ core - * @hw: The hardware state. - * - * Setup the fiq_handler block to pass to the FIQ core. - */ -static inline void s3c24xx_spi_initfiq(struct s3c24xx_spi *hw) -{ - hw->fiq_handler.dev_id = hw; - hw->fiq_handler.name = dev_name(hw->dev); - hw->fiq_handler.fiq_op = s3c24xx_spi_fiqop; -} - -/** - * s3c24xx_spi_usefiq - return if we should be using FIQ. - * @hw: The hardware state. - * - * Return true if the platform data specifies whether this channel is - * allowed to use the FIQ. - */ -static inline bool s3c24xx_spi_usefiq(struct s3c24xx_spi *hw) -{ - return hw->pdata->use_fiq; -} - -/** - * s3c24xx_spi_usingfiq - return if channel is using FIQ - * @spi: The hardware state. - * - * Return whether the channel is currently using the FIQ (separate from - * whether the FIQ is claimed). - */ -static inline bool s3c24xx_spi_usingfiq(struct s3c24xx_spi *spi) -{ - return spi->fiq_inuse; -} -#else - -static inline void s3c24xx_spi_initfiq(struct s3c24xx_spi *s) { } -static inline void s3c24xx_spi_tryfiq(struct s3c24xx_spi *s) { } -static inline bool s3c24xx_spi_usefiq(struct s3c24xx_spi *s) { return false; } -static inline bool s3c24xx_spi_usingfiq(struct s3c24xx_spi *s) { return false; } - -#endif /* CONFIG_SPI_S3C24XX_FIQ */ - -static int s3c24xx_spi_txrx(struct spi_device *spi, struct spi_transfer *t) -{ - struct s3c24xx_spi *hw = to_hw(spi); - - hw->tx = t->tx_buf; - hw->rx = t->rx_buf; - hw->len = t->len; - hw->count = 0; - - init_completion(&hw->done); - - hw->fiq_inuse = 0; - if (s3c24xx_spi_usefiq(hw) && t->len >= 3) - s3c24xx_spi_tryfiq(hw); - - /* send the first byte */ - writeb(hw_txbyte(hw, 0), hw->regs + S3C2410_SPTDAT); - - wait_for_completion(&hw->done); - return hw->count; -} - -static irqreturn_t s3c24xx_spi_irq(int irq, void *dev) -{ - struct s3c24xx_spi *hw = dev; - unsigned int spsta = readb(hw->regs + S3C2410_SPSTA); - unsigned int count = hw->count; - - if (spsta & S3C2410_SPSTA_DCOL) { - dev_dbg(hw->dev, "data-collision\n"); - complete(&hw->done); - goto irq_done; - } - - if (!(spsta & S3C2410_SPSTA_READY)) { - dev_dbg(hw->dev, "spi not ready for tx?\n"); - complete(&hw->done); - goto irq_done; - } - - if (!s3c24xx_spi_usingfiq(hw)) { - hw->count++; - - if (hw->rx) - hw->rx[count] = readb(hw->regs + S3C2410_SPRDAT); - - count++; - - if (count < hw->len) - writeb(hw_txbyte(hw, count), hw->regs + S3C2410_SPTDAT); - else - complete(&hw->done); - } else { - hw->count = hw->len; - hw->fiq_inuse = 0; - - if (hw->rx) - hw->rx[hw->len-1] = readb(hw->regs + S3C2410_SPRDAT); - - complete(&hw->done); - } - - irq_done: - return IRQ_HANDLED; -} - -static void s3c24xx_spi_initialsetup(struct s3c24xx_spi *hw) -{ - /* for the moment, permanently enable the clock */ - - clk_enable(hw->clk); - - /* program defaults into the registers */ - - writeb(0xff, hw->regs + S3C2410_SPPRE); - writeb(SPPIN_DEFAULT, hw->regs + S3C2410_SPPIN); - writeb(SPCON_DEFAULT, hw->regs + S3C2410_SPCON); - - if (hw->pdata) { - if (hw->set_cs == s3c24xx_spi_gpiocs) - gpio_direction_output(hw->pdata->pin_cs, 1); - - if (hw->pdata->gpio_setup) - hw->pdata->gpio_setup(hw->pdata, 1); - } -} - -static int __init s3c24xx_spi_probe(struct platform_device *pdev) -{ - struct s3c2410_spi_info *pdata; - struct s3c24xx_spi *hw; - struct spi_master *master; - struct resource *res; - int err = 0; - - master = spi_alloc_master(&pdev->dev, sizeof(struct s3c24xx_spi)); - if (master == NULL) { - dev_err(&pdev->dev, "No memory for spi_master\n"); - err = -ENOMEM; - goto err_nomem; - } - - hw = spi_master_get_devdata(master); - memset(hw, 0, sizeof(struct s3c24xx_spi)); - - hw->master = spi_master_get(master); - hw->pdata = pdata = pdev->dev.platform_data; - hw->dev = &pdev->dev; - - if (pdata == NULL) { - dev_err(&pdev->dev, "No platform data supplied\n"); - err = -ENOENT; - goto err_no_pdata; - } - - platform_set_drvdata(pdev, hw); - init_completion(&hw->done); - - /* initialise fiq handler */ - - s3c24xx_spi_initfiq(hw); - - /* setup the master state. */ - - /* the spi->mode bits understood by this driver: */ - master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; - - master->num_chipselect = hw->pdata->num_cs; - master->bus_num = pdata->bus_num; - - /* setup the state for the bitbang driver */ - - hw->bitbang.master = hw->master; - hw->bitbang.setup_transfer = s3c24xx_spi_setupxfer; - hw->bitbang.chipselect = s3c24xx_spi_chipsel; - hw->bitbang.txrx_bufs = s3c24xx_spi_txrx; - - hw->master->setup = s3c24xx_spi_setup; - hw->master->cleanup = s3c24xx_spi_cleanup; - - dev_dbg(hw->dev, "bitbang at %p\n", &hw->bitbang); - - /* find and map our resources */ - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (res == NULL) { - dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n"); - err = -ENOENT; - goto err_no_iores; - } - - hw->ioarea = request_mem_region(res->start, resource_size(res), - pdev->name); - - if (hw->ioarea == NULL) { - dev_err(&pdev->dev, "Cannot reserve region\n"); - err = -ENXIO; - goto err_no_iores; - } - - hw->regs = ioremap(res->start, resource_size(res)); - if (hw->regs == NULL) { - dev_err(&pdev->dev, "Cannot map IO\n"); - err = -ENXIO; - goto err_no_iomap; - } - - hw->irq = platform_get_irq(pdev, 0); - if (hw->irq < 0) { - dev_err(&pdev->dev, "No IRQ specified\n"); - err = -ENOENT; - goto err_no_irq; - } - - err = request_irq(hw->irq, s3c24xx_spi_irq, 0, pdev->name, hw); - if (err) { - dev_err(&pdev->dev, "Cannot claim IRQ\n"); - goto err_no_irq; - } - - hw->clk = clk_get(&pdev->dev, "spi"); - if (IS_ERR(hw->clk)) { - dev_err(&pdev->dev, "No clock for device\n"); - err = PTR_ERR(hw->clk); - goto err_no_clk; - } - - /* setup any gpio we can */ - - if (!pdata->set_cs) { - if (pdata->pin_cs < 0) { - dev_err(&pdev->dev, "No chipselect pin\n"); - goto err_register; - } - - err = gpio_request(pdata->pin_cs, dev_name(&pdev->dev)); - if (err) { - dev_err(&pdev->dev, "Failed to get gpio for cs\n"); - goto err_register; - } - - hw->set_cs = s3c24xx_spi_gpiocs; - gpio_direction_output(pdata->pin_cs, 1); - } else - hw->set_cs = pdata->set_cs; - - s3c24xx_spi_initialsetup(hw); - - /* register our spi controller */ - - err = spi_bitbang_start(&hw->bitbang); - if (err) { - dev_err(&pdev->dev, "Failed to register SPI master\n"); - goto err_register; - } - - return 0; - - err_register: - if (hw->set_cs == s3c24xx_spi_gpiocs) - gpio_free(pdata->pin_cs); - - clk_disable(hw->clk); - clk_put(hw->clk); - - err_no_clk: - free_irq(hw->irq, hw); - - err_no_irq: - iounmap(hw->regs); - - err_no_iomap: - release_resource(hw->ioarea); - kfree(hw->ioarea); - - err_no_iores: - err_no_pdata: - spi_master_put(hw->master); - - err_nomem: - return err; -} - -static int __exit s3c24xx_spi_remove(struct platform_device *dev) -{ - struct s3c24xx_spi *hw = platform_get_drvdata(dev); - - platform_set_drvdata(dev, NULL); - - spi_bitbang_stop(&hw->bitbang); - - clk_disable(hw->clk); - clk_put(hw->clk); - - free_irq(hw->irq, hw); - iounmap(hw->regs); - - if (hw->set_cs == s3c24xx_spi_gpiocs) - gpio_free(hw->pdata->pin_cs); - - release_resource(hw->ioarea); - kfree(hw->ioarea); - - spi_master_put(hw->master); - return 0; -} - - -#ifdef CONFIG_PM - -static int s3c24xx_spi_suspend(struct device *dev) -{ - struct s3c24xx_spi *hw = platform_get_drvdata(to_platform_device(dev)); - - if (hw->pdata && hw->pdata->gpio_setup) - hw->pdata->gpio_setup(hw->pdata, 0); - - clk_disable(hw->clk); - return 0; -} - -static int s3c24xx_spi_resume(struct device *dev) -{ - struct s3c24xx_spi *hw = platform_get_drvdata(to_platform_device(dev)); - - s3c24xx_spi_initialsetup(hw); - return 0; -} - -static const struct dev_pm_ops s3c24xx_spi_pmops = { - .suspend = s3c24xx_spi_suspend, - .resume = s3c24xx_spi_resume, -}; - -#define S3C24XX_SPI_PMOPS &s3c24xx_spi_pmops -#else -#define S3C24XX_SPI_PMOPS NULL -#endif /* CONFIG_PM */ - -MODULE_ALIAS("platform:s3c2410-spi"); -static struct platform_driver s3c24xx_spi_driver = { - .remove = __exit_p(s3c24xx_spi_remove), - .driver = { - .name = "s3c2410-spi", - .owner = THIS_MODULE, - .pm = S3C24XX_SPI_PMOPS, - }, -}; - -static int __init s3c24xx_spi_init(void) -{ - return platform_driver_probe(&s3c24xx_spi_driver, s3c24xx_spi_probe); -} - -static void __exit s3c24xx_spi_exit(void) -{ - platform_driver_unregister(&s3c24xx_spi_driver); -} - -module_init(s3c24xx_spi_init); -module_exit(s3c24xx_spi_exit); - -MODULE_DESCRIPTION("S3C24XX SPI Driver"); -MODULE_AUTHOR("Ben Dooks, "); -MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi_s3c24xx_fiq.S b/drivers/spi/spi_s3c24xx_fiq.S deleted file mode 100644 index 3793cae..0000000 --- a/drivers/spi/spi_s3c24xx_fiq.S +++ /dev/null @@ -1,116 +0,0 @@ -/* linux/drivers/spi/spi_s3c24xx_fiq.S - * - * Copyright 2009 Simtec Electronics - * Ben Dooks - * - * S3C24XX SPI - FIQ pseudo-DMA transfer code - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. -*/ - -#include -#include - -#include -#include -#include - -#include "spi_s3c24xx_fiq.h" - - .text - - @ entry to these routines is as follows, with the register names - @ defined in fiq.h so that they can be shared with the C files which - @ setup the calling registers. - @ - @ fiq_rirq The base of the IRQ registers to find S3C2410_SRCPND - @ fiq_rtmp Temporary register to hold tx/rx data - @ fiq_rspi The base of the SPI register block - @ fiq_rtx The tx buffer pointer - @ fiq_rrx The rx buffer pointer - @ fiq_rcount The number of bytes to move - - @ each entry starts with a word entry of how long it is - @ and an offset to the irq acknowledgment word - -ENTRY(s3c24xx_spi_fiq_rx) -s3c24xx_spi_fix_rx: - .word fiq_rx_end - fiq_rx_start - .word fiq_rx_irq_ack - fiq_rx_start -fiq_rx_start: - ldr fiq_rtmp, fiq_rx_irq_ack - str fiq_rtmp, [ fiq_rirq, # S3C2410_SRCPND - S3C24XX_VA_IRQ ] - - ldrb fiq_rtmp, [ fiq_rspi, # S3C2410_SPRDAT ] - strb fiq_rtmp, [ fiq_rrx ], #1 - - mov fiq_rtmp, #0xff - strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ] - - subs fiq_rcount, fiq_rcount, #1 - subnes pc, lr, #4 @@ return, still have work to do - - @@ set IRQ controller so that next op will trigger IRQ - mov fiq_rtmp, #0 - str fiq_rtmp, [ fiq_rirq, # S3C2410_INTMOD - S3C24XX_VA_IRQ ] - subs pc, lr, #4 - -fiq_rx_irq_ack: - .word 0 -fiq_rx_end: - -ENTRY(s3c24xx_spi_fiq_txrx) -s3c24xx_spi_fiq_txrx: - .word fiq_txrx_end - fiq_txrx_start - .word fiq_txrx_irq_ack - fiq_txrx_start -fiq_txrx_start: - - ldrb fiq_rtmp, [ fiq_rspi, # S3C2410_SPRDAT ] - strb fiq_rtmp, [ fiq_rrx ], #1 - - ldr fiq_rtmp, fiq_txrx_irq_ack - str fiq_rtmp, [ fiq_rirq, # S3C2410_SRCPND - S3C24XX_VA_IRQ ] - - ldrb fiq_rtmp, [ fiq_rtx ], #1 - strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ] - - subs fiq_rcount, fiq_rcount, #1 - subnes pc, lr, #4 @@ return, still have work to do - - mov fiq_rtmp, #0 - str fiq_rtmp, [ fiq_rirq, # S3C2410_INTMOD - S3C24XX_VA_IRQ ] - subs pc, lr, #4 - -fiq_txrx_irq_ack: - .word 0 - -fiq_txrx_end: - -ENTRY(s3c24xx_spi_fiq_tx) -s3c24xx_spi_fix_tx: - .word fiq_tx_end - fiq_tx_start - .word fiq_tx_irq_ack - fiq_tx_start -fiq_tx_start: - ldrb fiq_rtmp, [ fiq_rspi, # S3C2410_SPRDAT ] - - ldr fiq_rtmp, fiq_tx_irq_ack - str fiq_rtmp, [ fiq_rirq, # S3C2410_SRCPND - S3C24XX_VA_IRQ ] - - ldrb fiq_rtmp, [ fiq_rtx ], #1 - strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ] - - subs fiq_rcount, fiq_rcount, #1 - subnes pc, lr, #4 @@ return, still have work to do - - mov fiq_rtmp, #0 - str fiq_rtmp, [ fiq_rirq, # S3C2410_INTMOD - S3C24XX_VA_IRQ ] - subs pc, lr, #4 - -fiq_tx_irq_ack: - .word 0 - -fiq_tx_end: - - .end diff --git a/drivers/spi/spi_s3c24xx_fiq.h b/drivers/spi/spi_s3c24xx_fiq.h deleted file mode 100644 index a5950bb..0000000 --- a/drivers/spi/spi_s3c24xx_fiq.h +++ /dev/null @@ -1,26 +0,0 @@ -/* linux/drivers/spi/spi_s3c24xx_fiq.h - * - * Copyright 2009 Simtec Electronics - * Ben Dooks - * - * S3C24XX SPI - FIQ pseudo-DMA transfer support - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. -*/ - -/* We have R8 through R13 to play with */ - -#ifdef __ASSEMBLY__ -#define __REG_NR(x) r##x -#else -#define __REG_NR(x) (x) -#endif - -#define fiq_rspi __REG_NR(8) -#define fiq_rtmp __REG_NR(9) -#define fiq_rrx __REG_NR(10) -#define fiq_rtx __REG_NR(11) -#define fiq_rcount __REG_NR(12) -#define fiq_rirq __REG_NR(13) diff --git a/drivers/spi/spi_s3c24xx_gpio.c b/drivers/spi/spi_s3c24xx_gpio.c deleted file mode 100644 index be99135..0000000 --- a/drivers/spi/spi_s3c24xx_gpio.c +++ /dev/null @@ -1,201 +0,0 @@ -/* linux/drivers/spi/spi_s3c24xx_gpio.c - * - * Copyright (c) 2006 Ben Dooks - * Copyright (c) 2006 Simtec Electronics - * - * S3C24XX GPIO based SPI driver - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * -*/ - -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include -#include -#include - -struct s3c2410_spigpio { - struct spi_bitbang bitbang; - - struct s3c2410_spigpio_info *info; - struct platform_device *dev; -}; - -static inline struct s3c2410_spigpio *spidev_to_sg(struct spi_device *spi) -{ - return spi_master_get_devdata(spi->master); -} - -static inline void setsck(struct spi_device *dev, int on) -{ - struct s3c2410_spigpio *sg = spidev_to_sg(dev); - s3c2410_gpio_setpin(sg->info->pin_clk, on ? 1 : 0); -} - -static inline void setmosi(struct spi_device *dev, int on) -{ - struct s3c2410_spigpio *sg = spidev_to_sg(dev); - s3c2410_gpio_setpin(sg->info->pin_mosi, on ? 1 : 0); -} - -static inline u32 getmiso(struct spi_device *dev) -{ - struct s3c2410_spigpio *sg = spidev_to_sg(dev); - return s3c2410_gpio_getpin(sg->info->pin_miso) ? 1 : 0; -} - -#define spidelay(x) ndelay(x) - -#include "spi_bitbang_txrx.h" - - -static u32 s3c2410_spigpio_txrx_mode0(struct spi_device *spi, - unsigned nsecs, u32 word, u8 bits) -{ - return bitbang_txrx_be_cpha0(spi, nsecs, 0, 0, word, bits); -} - -static u32 s3c2410_spigpio_txrx_mode1(struct spi_device *spi, - unsigned nsecs, u32 word, u8 bits) -{ - return bitbang_txrx_be_cpha1(spi, nsecs, 0, 0, word, bits); -} - -static u32 s3c2410_spigpio_txrx_mode2(struct spi_device *spi, - unsigned nsecs, u32 word, u8 bits) -{ - return bitbang_txrx_be_cpha0(spi, nsecs, 1, 0, word, bits); -} - -static u32 s3c2410_spigpio_txrx_mode3(struct spi_device *spi, - unsigned nsecs, u32 word, u8 bits) -{ - return bitbang_txrx_be_cpha1(spi, nsecs, 1, 0, word, bits); -} - - -static void s3c2410_spigpio_chipselect(struct spi_device *dev, int value) -{ - struct s3c2410_spigpio *sg = spidev_to_sg(dev); - - if (sg->info && sg->info->chip_select) - (sg->info->chip_select)(sg->info, value); -} - -static int s3c2410_spigpio_probe(struct platform_device *dev) -{ - struct s3c2410_spigpio_info *info; - struct spi_master *master; - struct s3c2410_spigpio *sp; - int ret; - - master = spi_alloc_master(&dev->dev, sizeof(struct s3c2410_spigpio)); - if (master == NULL) { - dev_err(&dev->dev, "failed to allocate spi master\n"); - ret = -ENOMEM; - goto err; - } - - sp = spi_master_get_devdata(master); - - platform_set_drvdata(dev, sp); - - /* copy in the plkatform data */ - info = sp->info = dev->dev.platform_data; - - /* setup spi bitbang adaptor */ - sp->bitbang.master = spi_master_get(master); - sp->bitbang.master->bus_num = info->bus_num; - sp->bitbang.master->num_chipselect = info->num_chipselect; - sp->bitbang.chipselect = s3c2410_spigpio_chipselect; - - sp->bitbang.txrx_word[SPI_MODE_0] = s3c2410_spigpio_txrx_mode0; - sp->bitbang.txrx_word[SPI_MODE_1] = s3c2410_spigpio_txrx_mode1; - sp->bitbang.txrx_word[SPI_MODE_2] = s3c2410_spigpio_txrx_mode2; - sp->bitbang.txrx_word[SPI_MODE_3] = s3c2410_spigpio_txrx_mode3; - - /* set state of spi pins, always assume that the clock is - * available, but do check the MOSI and MISO. */ - s3c2410_gpio_setpin(info->pin_clk, 0); - s3c2410_gpio_cfgpin(info->pin_clk, S3C2410_GPIO_OUTPUT); - - if (info->pin_mosi < S3C2410_GPH10) { - s3c2410_gpio_setpin(info->pin_mosi, 0); - s3c2410_gpio_cfgpin(info->pin_mosi, S3C2410_GPIO_OUTPUT); - } - - if (info->pin_miso != S3C2410_GPA0 && info->pin_miso < S3C2410_GPH10) - s3c2410_gpio_cfgpin(info->pin_miso, S3C2410_GPIO_INPUT); - - ret = spi_bitbang_start(&sp->bitbang); - if (ret) - goto err_no_bitbang; - - return 0; - - err_no_bitbang: - spi_master_put(sp->bitbang.master); - err: - return ret; - -} - -static int s3c2410_spigpio_remove(struct platform_device *dev) -{ - struct s3c2410_spigpio *sp = platform_get_drvdata(dev); - - spi_bitbang_stop(&sp->bitbang); - spi_master_put(sp->bitbang.master); - - return 0; -} - -/* all gpio should be held over suspend/resume, so we should - * not need to deal with this -*/ - -#define s3c2410_spigpio_suspend NULL -#define s3c2410_spigpio_resume NULL - -/* work with hotplug and coldplug */ -MODULE_ALIAS("platform:spi_s3c24xx_gpio"); - -static struct platform_driver s3c2410_spigpio_drv = { - .probe = s3c2410_spigpio_probe, - .remove = s3c2410_spigpio_remove, - .suspend = s3c2410_spigpio_suspend, - .resume = s3c2410_spigpio_resume, - .driver = { - .name = "spi_s3c24xx_gpio", - .owner = THIS_MODULE, - }, -}; - -static int __init s3c2410_spigpio_init(void) -{ - return platform_driver_register(&s3c2410_spigpio_drv); -} - -static void __exit s3c2410_spigpio_exit(void) -{ - platform_driver_unregister(&s3c2410_spigpio_drv); -} - -module_init(s3c2410_spigpio_init); -module_exit(s3c2410_spigpio_exit); - -MODULE_DESCRIPTION("S3C24XX SPI Driver"); -MODULE_AUTHOR("Ben Dooks, "); -MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi_s3c64xx.c b/drivers/spi/spi_s3c64xx.c deleted file mode 100644 index 795828b..0000000 --- a/drivers/spi/spi_s3c64xx.c +++ /dev/null @@ -1,1248 +0,0 @@ -/* linux/drivers/spi/spi_s3c64xx.c - * - * Copyright (C) 2009 Samsung Electronics Ltd. - * Jaswinder Singh - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -/* Registers and bit-fields */ - -#define S3C64XX_SPI_CH_CFG 0x00 -#define S3C64XX_SPI_CLK_CFG 0x04 -#define S3C64XX_SPI_MODE_CFG 0x08 -#define S3C64XX_SPI_SLAVE_SEL 0x0C -#define S3C64XX_SPI_INT_EN 0x10 -#define S3C64XX_SPI_STATUS 0x14 -#define S3C64XX_SPI_TX_DATA 0x18 -#define S3C64XX_SPI_RX_DATA 0x1C -#define S3C64XX_SPI_PACKET_CNT 0x20 -#define S3C64XX_SPI_PENDING_CLR 0x24 -#define S3C64XX_SPI_SWAP_CFG 0x28 -#define S3C64XX_SPI_FB_CLK 0x2C - -#define S3C64XX_SPI_CH_HS_EN (1<<6) /* High Speed Enable */ -#define S3C64XX_SPI_CH_SW_RST (1<<5) -#define S3C64XX_SPI_CH_SLAVE (1<<4) -#define S3C64XX_SPI_CPOL_L (1<<3) -#define S3C64XX_SPI_CPHA_B (1<<2) -#define S3C64XX_SPI_CH_RXCH_ON (1<<1) -#define S3C64XX_SPI_CH_TXCH_ON (1<<0) - -#define S3C64XX_SPI_CLKSEL_SRCMSK (3<<9) -#define S3C64XX_SPI_CLKSEL_SRCSHFT 9 -#define S3C64XX_SPI_ENCLK_ENABLE (1<<8) -#define S3C64XX_SPI_PSR_MASK 0xff - -#define S3C64XX_SPI_MODE_CH_TSZ_BYTE (0<<29) -#define S3C64XX_SPI_MODE_CH_TSZ_HALFWORD (1<<29) -#define S3C64XX_SPI_MODE_CH_TSZ_WORD (2<<29) -#define S3C64XX_SPI_MODE_CH_TSZ_MASK (3<<29) -#define S3C64XX_SPI_MODE_BUS_TSZ_BYTE (0<<17) -#define S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD (1<<17) -#define S3C64XX_SPI_MODE_BUS_TSZ_WORD (2<<17) -#define S3C64XX_SPI_MODE_BUS_TSZ_MASK (3<<17) -#define S3C64XX_SPI_MODE_RXDMA_ON (1<<2) -#define S3C64XX_SPI_MODE_TXDMA_ON (1<<1) -#define S3C64XX_SPI_MODE_4BURST (1<<0) - -#define S3C64XX_SPI_SLAVE_AUTO (1<<1) -#define S3C64XX_SPI_SLAVE_SIG_INACT (1<<0) - -#define S3C64XX_SPI_ACT(c) writel(0, (c)->regs + S3C64XX_SPI_SLAVE_SEL) - -#define S3C64XX_SPI_DEACT(c) writel(S3C64XX_SPI_SLAVE_SIG_INACT, \ - (c)->regs + S3C64XX_SPI_SLAVE_SEL) - -#define S3C64XX_SPI_INT_TRAILING_EN (1<<6) -#define S3C64XX_SPI_INT_RX_OVERRUN_EN (1<<5) -#define S3C64XX_SPI_INT_RX_UNDERRUN_EN (1<<4) -#define S3C64XX_SPI_INT_TX_OVERRUN_EN (1<<3) -#define S3C64XX_SPI_INT_TX_UNDERRUN_EN (1<<2) -#define S3C64XX_SPI_INT_RX_FIFORDY_EN (1<<1) -#define S3C64XX_SPI_INT_TX_FIFORDY_EN (1<<0) - -#define S3C64XX_SPI_ST_RX_OVERRUN_ERR (1<<5) -#define S3C64XX_SPI_ST_RX_UNDERRUN_ERR (1<<4) -#define S3C64XX_SPI_ST_TX_OVERRUN_ERR (1<<3) -#define S3C64XX_SPI_ST_TX_UNDERRUN_ERR (1<<2) -#define S3C64XX_SPI_ST_RX_FIFORDY (1<<1) -#define S3C64XX_SPI_ST_TX_FIFORDY (1<<0) - -#define S3C64XX_SPI_PACKET_CNT_EN (1<<16) - -#define S3C64XX_SPI_PND_TX_UNDERRUN_CLR (1<<4) -#define S3C64XX_SPI_PND_TX_OVERRUN_CLR (1<<3) -#define S3C64XX_SPI_PND_RX_UNDERRUN_CLR (1<<2) -#define S3C64XX_SPI_PND_RX_OVERRUN_CLR (1<<1) -#define S3C64XX_SPI_PND_TRAILING_CLR (1<<0) - -#define S3C64XX_SPI_SWAP_RX_HALF_WORD (1<<7) -#define S3C64XX_SPI_SWAP_RX_BYTE (1<<6) -#define S3C64XX_SPI_SWAP_RX_BIT (1<<5) -#define S3C64XX_SPI_SWAP_RX_EN (1<<4) -#define S3C64XX_SPI_SWAP_TX_HALF_WORD (1<<3) -#define S3C64XX_SPI_SWAP_TX_BYTE (1<<2) -#define S3C64XX_SPI_SWAP_TX_BIT (1<<1) -#define S3C64XX_SPI_SWAP_TX_EN (1<<0) - -#define S3C64XX_SPI_FBCLK_MSK (3<<0) - -#define S3C64XX_SPI_ST_TRLCNTZ(v, i) ((((v) >> (i)->rx_lvl_offset) & \ - (((i)->fifo_lvl_mask + 1))) \ - ? 1 : 0) - -#define S3C64XX_SPI_ST_TX_DONE(v, i) ((((v) >> (i)->rx_lvl_offset) & \ - (((i)->fifo_lvl_mask + 1) << 1)) \ - ? 1 : 0) -#define TX_FIFO_LVL(v, i) (((v) >> 6) & (i)->fifo_lvl_mask) -#define RX_FIFO_LVL(v, i) (((v) >> (i)->rx_lvl_offset) & (i)->fifo_lvl_mask) - -#define S3C64XX_SPI_MAX_TRAILCNT 0x3ff -#define S3C64XX_SPI_TRAILCNT_OFF 19 - -#define S3C64XX_SPI_TRAILCNT S3C64XX_SPI_MAX_TRAILCNT - -#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t) - -#define SUSPND (1<<0) -#define SPIBUSY (1<<1) -#define RXBUSY (1<<2) -#define TXBUSY (1<<3) - -/** - * struct s3c64xx_spi_driver_data - Runtime info holder for SPI driver. - * @clk: Pointer to the spi clock. - * @src_clk: Pointer to the clock used to generate SPI signals. - * @master: Pointer to the SPI Protocol master. - * @workqueue: Work queue for the SPI xfer requests. - * @cntrlr_info: Platform specific data for the controller this driver manages. - * @tgl_spi: Pointer to the last CS left untoggled by the cs_change hint. - * @work: Work - * @queue: To log SPI xfer requests. - * @lock: Controller specific lock. - * @state: Set of FLAGS to indicate status. - * @rx_dmach: Controller's DMA channel for Rx. - * @tx_dmach: Controller's DMA channel for Tx. - * @sfr_start: BUS address of SPI controller regs. - * @regs: Pointer to ioremap'ed controller registers. - * @xfer_completion: To indicate completion of xfer task. - * @cur_mode: Stores the active configuration of the controller. - * @cur_bpw: Stores the active bits per word settings. - * @cur_speed: Stores the active xfer clock speed. - */ -struct s3c64xx_spi_driver_data { - void __iomem *regs; - struct clk *clk; - struct clk *src_clk; - struct platform_device *pdev; - struct spi_master *master; - struct workqueue_struct *workqueue; - struct s3c64xx_spi_info *cntrlr_info; - struct spi_device *tgl_spi; - struct work_struct work; - struct list_head queue; - spinlock_t lock; - enum dma_ch rx_dmach; - enum dma_ch tx_dmach; - unsigned long sfr_start; - struct completion xfer_completion; - unsigned state; - unsigned cur_mode, cur_bpw; - unsigned cur_speed; -}; - -static struct s3c2410_dma_client s3c64xx_spi_dma_client = { - .name = "samsung-spi-dma", -}; - -static void flush_fifo(struct s3c64xx_spi_driver_data *sdd) -{ - struct s3c64xx_spi_info *sci = sdd->cntrlr_info; - void __iomem *regs = sdd->regs; - unsigned long loops; - u32 val; - - writel(0, regs + S3C64XX_SPI_PACKET_CNT); - - val = readl(regs + S3C64XX_SPI_CH_CFG); - val |= S3C64XX_SPI_CH_SW_RST; - val &= ~S3C64XX_SPI_CH_HS_EN; - writel(val, regs + S3C64XX_SPI_CH_CFG); - - /* Flush TxFIFO*/ - loops = msecs_to_loops(1); - do { - val = readl(regs + S3C64XX_SPI_STATUS); - } while (TX_FIFO_LVL(val, sci) && loops--); - - if (loops == 0) - dev_warn(&sdd->pdev->dev, "Timed out flushing TX FIFO\n"); - - /* Flush RxFIFO*/ - loops = msecs_to_loops(1); - do { - val = readl(regs + S3C64XX_SPI_STATUS); - if (RX_FIFO_LVL(val, sci)) - readl(regs + S3C64XX_SPI_RX_DATA); - else - break; - } while (loops--); - - if (loops == 0) - dev_warn(&sdd->pdev->dev, "Timed out flushing RX FIFO\n"); - - val = readl(regs + S3C64XX_SPI_CH_CFG); - val &= ~S3C64XX_SPI_CH_SW_RST; - writel(val, regs + S3C64XX_SPI_CH_CFG); - - val = readl(regs + S3C64XX_SPI_MODE_CFG); - val &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON); - writel(val, regs + S3C64XX_SPI_MODE_CFG); - - val = readl(regs + S3C64XX_SPI_CH_CFG); - val &= ~(S3C64XX_SPI_CH_RXCH_ON | S3C64XX_SPI_CH_TXCH_ON); - writel(val, regs + S3C64XX_SPI_CH_CFG); -} - -static void enable_datapath(struct s3c64xx_spi_driver_data *sdd, - struct spi_device *spi, - struct spi_transfer *xfer, int dma_mode) -{ - struct s3c64xx_spi_info *sci = sdd->cntrlr_info; - void __iomem *regs = sdd->regs; - u32 modecfg, chcfg; - - modecfg = readl(regs + S3C64XX_SPI_MODE_CFG); - modecfg &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON); - - chcfg = readl(regs + S3C64XX_SPI_CH_CFG); - chcfg &= ~S3C64XX_SPI_CH_TXCH_ON; - - if (dma_mode) { - chcfg &= ~S3C64XX_SPI_CH_RXCH_ON; - } else { - /* Always shift in data in FIFO, even if xfer is Tx only, - * this helps setting PCKT_CNT value for generating clocks - * as exactly needed. - */ - chcfg |= S3C64XX_SPI_CH_RXCH_ON; - writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff) - | S3C64XX_SPI_PACKET_CNT_EN, - regs + S3C64XX_SPI_PACKET_CNT); - } - - if (xfer->tx_buf != NULL) { - sdd->state |= TXBUSY; - chcfg |= S3C64XX_SPI_CH_TXCH_ON; - if (dma_mode) { - modecfg |= S3C64XX_SPI_MODE_TXDMA_ON; - s3c2410_dma_config(sdd->tx_dmach, sdd->cur_bpw / 8); - s3c2410_dma_enqueue(sdd->tx_dmach, (void *)sdd, - xfer->tx_dma, xfer->len); - s3c2410_dma_ctrl(sdd->tx_dmach, S3C2410_DMAOP_START); - } else { - switch (sdd->cur_bpw) { - case 32: - iowrite32_rep(regs + S3C64XX_SPI_TX_DATA, - xfer->tx_buf, xfer->len / 4); - break; - case 16: - iowrite16_rep(regs + S3C64XX_SPI_TX_DATA, - xfer->tx_buf, xfer->len / 2); - break; - default: - iowrite8_rep(regs + S3C64XX_SPI_TX_DATA, - xfer->tx_buf, xfer->len); - break; - } - } - } - - if (xfer->rx_buf != NULL) { - sdd->state |= RXBUSY; - - if (sci->high_speed && sdd->cur_speed >= 30000000UL - && !(sdd->cur_mode & SPI_CPHA)) - chcfg |= S3C64XX_SPI_CH_HS_EN; - - if (dma_mode) { - modecfg |= S3C64XX_SPI_MODE_RXDMA_ON; - chcfg |= S3C64XX_SPI_CH_RXCH_ON; - writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff) - | S3C64XX_SPI_PACKET_CNT_EN, - regs + S3C64XX_SPI_PACKET_CNT); - s3c2410_dma_config(sdd->rx_dmach, sdd->cur_bpw / 8); - s3c2410_dma_enqueue(sdd->rx_dmach, (void *)sdd, - xfer->rx_dma, xfer->len); - s3c2410_dma_ctrl(sdd->rx_dmach, S3C2410_DMAOP_START); - } - } - - writel(modecfg, regs + S3C64XX_SPI_MODE_CFG); - writel(chcfg, regs + S3C64XX_SPI_CH_CFG); -} - -static inline void enable_cs(struct s3c64xx_spi_driver_data *sdd, - struct spi_device *spi) -{ - struct s3c64xx_spi_csinfo *cs; - - if (sdd->tgl_spi != NULL) { /* If last device toggled after mssg */ - if (sdd->tgl_spi != spi) { /* if last mssg on diff device */ - /* Deselect the last toggled device */ - cs = sdd->tgl_spi->controller_data; - cs->set_level(cs->line, - spi->mode & SPI_CS_HIGH ? 0 : 1); - } - sdd->tgl_spi = NULL; - } - - cs = spi->controller_data; - cs->set_level(cs->line, spi->mode & SPI_CS_HIGH ? 1 : 0); -} - -static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd, - struct spi_transfer *xfer, int dma_mode) -{ - struct s3c64xx_spi_info *sci = sdd->cntrlr_info; - void __iomem *regs = sdd->regs; - unsigned long val; - int ms; - - /* millisecs to xfer 'len' bytes @ 'cur_speed' */ - ms = xfer->len * 8 * 1000 / sdd->cur_speed; - ms += 10; /* some tolerance */ - - if (dma_mode) { - val = msecs_to_jiffies(ms) + 10; - val = wait_for_completion_timeout(&sdd->xfer_completion, val); - } else { - u32 status; - val = msecs_to_loops(ms); - do { - status = readl(regs + S3C64XX_SPI_STATUS); - } while (RX_FIFO_LVL(status, sci) < xfer->len && --val); - } - - if (!val) - return -EIO; - - if (dma_mode) { - u32 status; - - /* - * DmaTx returns after simply writing data in the FIFO, - * w/o waiting for real transmission on the bus to finish. - * DmaRx returns only after Dma read data from FIFO which - * needs bus transmission to finish, so we don't worry if - * Xfer involved Rx(with or without Tx). - */ - if (xfer->rx_buf == NULL) { - val = msecs_to_loops(10); - status = readl(regs + S3C64XX_SPI_STATUS); - while ((TX_FIFO_LVL(status, sci) - || !S3C64XX_SPI_ST_TX_DONE(status, sci)) - && --val) { - cpu_relax(); - status = readl(regs + S3C64XX_SPI_STATUS); - } - - if (!val) - return -EIO; - } - } else { - /* If it was only Tx */ - if (xfer->rx_buf == NULL) { - sdd->state &= ~TXBUSY; - return 0; - } - - switch (sdd->cur_bpw) { - case 32: - ioread32_rep(regs + S3C64XX_SPI_RX_DATA, - xfer->rx_buf, xfer->len / 4); - break; - case 16: - ioread16_rep(regs + S3C64XX_SPI_RX_DATA, - xfer->rx_buf, xfer->len / 2); - break; - default: - ioread8_rep(regs + S3C64XX_SPI_RX_DATA, - xfer->rx_buf, xfer->len); - break; - } - sdd->state &= ~RXBUSY; - } - - return 0; -} - -static inline void disable_cs(struct s3c64xx_spi_driver_data *sdd, - struct spi_device *spi) -{ - struct s3c64xx_spi_csinfo *cs = spi->controller_data; - - if (sdd->tgl_spi == spi) - sdd->tgl_spi = NULL; - - cs->set_level(cs->line, spi->mode & SPI_CS_HIGH ? 0 : 1); -} - -static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) -{ - struct s3c64xx_spi_info *sci = sdd->cntrlr_info; - void __iomem *regs = sdd->regs; - u32 val; - - /* Disable Clock */ - if (sci->clk_from_cmu) { - clk_disable(sdd->src_clk); - } else { - val = readl(regs + S3C64XX_SPI_CLK_CFG); - val &= ~S3C64XX_SPI_ENCLK_ENABLE; - writel(val, regs + S3C64XX_SPI_CLK_CFG); - } - - /* Set Polarity and Phase */ - val = readl(regs + S3C64XX_SPI_CH_CFG); - val &= ~(S3C64XX_SPI_CH_SLAVE | - S3C64XX_SPI_CPOL_L | - S3C64XX_SPI_CPHA_B); - - if (sdd->cur_mode & SPI_CPOL) - val |= S3C64XX_SPI_CPOL_L; - - if (sdd->cur_mode & SPI_CPHA) - val |= S3C64XX_SPI_CPHA_B; - - writel(val, regs + S3C64XX_SPI_CH_CFG); - - /* Set Channel & DMA Mode */ - val = readl(regs + S3C64XX_SPI_MODE_CFG); - val &= ~(S3C64XX_SPI_MODE_BUS_TSZ_MASK - | S3C64XX_SPI_MODE_CH_TSZ_MASK); - - switch (sdd->cur_bpw) { - case 32: - val |= S3C64XX_SPI_MODE_BUS_TSZ_WORD; - val |= S3C64XX_SPI_MODE_CH_TSZ_WORD; - break; - case 16: - val |= S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD; - val |= S3C64XX_SPI_MODE_CH_TSZ_HALFWORD; - break; - default: - val |= S3C64XX_SPI_MODE_BUS_TSZ_BYTE; - val |= S3C64XX_SPI_MODE_CH_TSZ_BYTE; - break; - } - - writel(val, regs + S3C64XX_SPI_MODE_CFG); - - if (sci->clk_from_cmu) { - /* Configure Clock */ - /* There is half-multiplier before the SPI */ - clk_set_rate(sdd->src_clk, sdd->cur_speed * 2); - /* Enable Clock */ - clk_enable(sdd->src_clk); - } else { - /* Configure Clock */ - val = readl(regs + S3C64XX_SPI_CLK_CFG); - val &= ~S3C64XX_SPI_PSR_MASK; - val |= ((clk_get_rate(sdd->src_clk) / sdd->cur_speed / 2 - 1) - & S3C64XX_SPI_PSR_MASK); - writel(val, regs + S3C64XX_SPI_CLK_CFG); - - /* Enable Clock */ - val = readl(regs + S3C64XX_SPI_CLK_CFG); - val |= S3C64XX_SPI_ENCLK_ENABLE; - writel(val, regs + S3C64XX_SPI_CLK_CFG); - } -} - -static void s3c64xx_spi_dma_rxcb(struct s3c2410_dma_chan *chan, void *buf_id, - int size, enum s3c2410_dma_buffresult res) -{ - struct s3c64xx_spi_driver_data *sdd = buf_id; - unsigned long flags; - - spin_lock_irqsave(&sdd->lock, flags); - - if (res == S3C2410_RES_OK) - sdd->state &= ~RXBUSY; - else - dev_err(&sdd->pdev->dev, "DmaAbrtRx-%d\n", size); - - /* If the other done */ - if (!(sdd->state & TXBUSY)) - complete(&sdd->xfer_completion); - - spin_unlock_irqrestore(&sdd->lock, flags); -} - -static void s3c64xx_spi_dma_txcb(struct s3c2410_dma_chan *chan, void *buf_id, - int size, enum s3c2410_dma_buffresult res) -{ - struct s3c64xx_spi_driver_data *sdd = buf_id; - unsigned long flags; - - spin_lock_irqsave(&sdd->lock, flags); - - if (res == S3C2410_RES_OK) - sdd->state &= ~TXBUSY; - else - dev_err(&sdd->pdev->dev, "DmaAbrtTx-%d \n", size); - - /* If the other done */ - if (!(sdd->state & RXBUSY)) - complete(&sdd->xfer_completion); - - spin_unlock_irqrestore(&sdd->lock, flags); -} - -#define XFER_DMAADDR_INVALID DMA_BIT_MASK(32) - -static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd, - struct spi_message *msg) -{ - struct s3c64xx_spi_info *sci = sdd->cntrlr_info; - struct device *dev = &sdd->pdev->dev; - struct spi_transfer *xfer; - - if (msg->is_dma_mapped) - return 0; - - /* First mark all xfer unmapped */ - list_for_each_entry(xfer, &msg->transfers, transfer_list) { - xfer->rx_dma = XFER_DMAADDR_INVALID; - xfer->tx_dma = XFER_DMAADDR_INVALID; - } - - /* Map until end or first fail */ - list_for_each_entry(xfer, &msg->transfers, transfer_list) { - - if (xfer->len <= ((sci->fifo_lvl_mask >> 1) + 1)) - continue; - - if (xfer->tx_buf != NULL) { - xfer->tx_dma = dma_map_single(dev, - (void *)xfer->tx_buf, xfer->len, - DMA_TO_DEVICE); - if (dma_mapping_error(dev, xfer->tx_dma)) { - dev_err(dev, "dma_map_single Tx failed\n"); - xfer->tx_dma = XFER_DMAADDR_INVALID; - return -ENOMEM; - } - } - - if (xfer->rx_buf != NULL) { - xfer->rx_dma = dma_map_single(dev, xfer->rx_buf, - xfer->len, DMA_FROM_DEVICE); - if (dma_mapping_error(dev, xfer->rx_dma)) { - dev_err(dev, "dma_map_single Rx failed\n"); - dma_unmap_single(dev, xfer->tx_dma, - xfer->len, DMA_TO_DEVICE); - xfer->tx_dma = XFER_DMAADDR_INVALID; - xfer->rx_dma = XFER_DMAADDR_INVALID; - return -ENOMEM; - } - } - } - - return 0; -} - -static void s3c64xx_spi_unmap_mssg(struct s3c64xx_spi_driver_data *sdd, - struct spi_message *msg) -{ - struct s3c64xx_spi_info *sci = sdd->cntrlr_info; - struct device *dev = &sdd->pdev->dev; - struct spi_transfer *xfer; - - if (msg->is_dma_mapped) - return; - - list_for_each_entry(xfer, &msg->transfers, transfer_list) { - - if (xfer->len <= ((sci->fifo_lvl_mask >> 1) + 1)) - continue; - - if (xfer->rx_buf != NULL - && xfer->rx_dma != XFER_DMAADDR_INVALID) - dma_unmap_single(dev, xfer->rx_dma, - xfer->len, DMA_FROM_DEVICE); - - if (xfer->tx_buf != NULL - && xfer->tx_dma != XFER_DMAADDR_INVALID) - dma_unmap_single(dev, xfer->tx_dma, - xfer->len, DMA_TO_DEVICE); - } -} - -static void handle_msg(struct s3c64xx_spi_driver_data *sdd, - struct spi_message *msg) -{ - struct s3c64xx_spi_info *sci = sdd->cntrlr_info; - struct spi_device *spi = msg->spi; - struct s3c64xx_spi_csinfo *cs = spi->controller_data; - struct spi_transfer *xfer; - int status = 0, cs_toggle = 0; - u32 speed; - u8 bpw; - - /* If Master's(controller) state differs from that needed by Slave */ - if (sdd->cur_speed != spi->max_speed_hz - || sdd->cur_mode != spi->mode - || sdd->cur_bpw != spi->bits_per_word) { - sdd->cur_bpw = spi->bits_per_word; - sdd->cur_speed = spi->max_speed_hz; - sdd->cur_mode = spi->mode; - s3c64xx_spi_config(sdd); - } - - /* Map all the transfers if needed */ - if (s3c64xx_spi_map_mssg(sdd, msg)) { - dev_err(&spi->dev, - "Xfer: Unable to map message buffers!\n"); - status = -ENOMEM; - goto out; - } - - /* Configure feedback delay */ - writel(cs->fb_delay & 0x3, sdd->regs + S3C64XX_SPI_FB_CLK); - - list_for_each_entry(xfer, &msg->transfers, transfer_list) { - - unsigned long flags; - int use_dma; - - INIT_COMPLETION(sdd->xfer_completion); - - /* Only BPW and Speed may change across transfers */ - bpw = xfer->bits_per_word ? : spi->bits_per_word; - speed = xfer->speed_hz ? : spi->max_speed_hz; - - if (xfer->len % (bpw / 8)) { - dev_err(&spi->dev, - "Xfer length(%u) not a multiple of word size(%u)\n", - xfer->len, bpw / 8); - status = -EIO; - goto out; - } - - if (bpw != sdd->cur_bpw || speed != sdd->cur_speed) { - sdd->cur_bpw = bpw; - sdd->cur_speed = speed; - s3c64xx_spi_config(sdd); - } - - /* Polling method for xfers not bigger than FIFO capacity */ - if (xfer->len <= ((sci->fifo_lvl_mask >> 1) + 1)) - use_dma = 0; - else - use_dma = 1; - - spin_lock_irqsave(&sdd->lock, flags); - - /* Pending only which is to be done */ - sdd->state &= ~RXBUSY; - sdd->state &= ~TXBUSY; - - enable_datapath(sdd, spi, xfer, use_dma); - - /* Slave Select */ - enable_cs(sdd, spi); - - /* Start the signals */ - S3C64XX_SPI_ACT(sdd); - - spin_unlock_irqrestore(&sdd->lock, flags); - - status = wait_for_xfer(sdd, xfer, use_dma); - - /* Quiese the signals */ - S3C64XX_SPI_DEACT(sdd); - - if (status) { - dev_err(&spi->dev, "I/O Error: " - "rx-%d tx-%d res:rx-%c tx-%c len-%d\n", - xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0, - (sdd->state & RXBUSY) ? 'f' : 'p', - (sdd->state & TXBUSY) ? 'f' : 'p', - xfer->len); - - if (use_dma) { - if (xfer->tx_buf != NULL - && (sdd->state & TXBUSY)) - s3c2410_dma_ctrl(sdd->tx_dmach, - S3C2410_DMAOP_FLUSH); - if (xfer->rx_buf != NULL - && (sdd->state & RXBUSY)) - s3c2410_dma_ctrl(sdd->rx_dmach, - S3C2410_DMAOP_FLUSH); - } - - goto out; - } - - if (xfer->delay_usecs) - udelay(xfer->delay_usecs); - - if (xfer->cs_change) { - /* Hint that the next mssg is gonna be - for the same device */ - if (list_is_last(&xfer->transfer_list, - &msg->transfers)) - cs_toggle = 1; - else - disable_cs(sdd, spi); - } - - msg->actual_length += xfer->len; - - flush_fifo(sdd); - } - -out: - if (!cs_toggle || status) - disable_cs(sdd, spi); - else - sdd->tgl_spi = spi; - - s3c64xx_spi_unmap_mssg(sdd, msg); - - msg->status = status; - - if (msg->complete) - msg->complete(msg->context); -} - -static int acquire_dma(struct s3c64xx_spi_driver_data *sdd) -{ - if (s3c2410_dma_request(sdd->rx_dmach, - &s3c64xx_spi_dma_client, NULL) < 0) { - dev_err(&sdd->pdev->dev, "cannot get RxDMA\n"); - return 0; - } - s3c2410_dma_set_buffdone_fn(sdd->rx_dmach, s3c64xx_spi_dma_rxcb); - s3c2410_dma_devconfig(sdd->rx_dmach, S3C2410_DMASRC_HW, - sdd->sfr_start + S3C64XX_SPI_RX_DATA); - - if (s3c2410_dma_request(sdd->tx_dmach, - &s3c64xx_spi_dma_client, NULL) < 0) { - dev_err(&sdd->pdev->dev, "cannot get TxDMA\n"); - s3c2410_dma_free(sdd->rx_dmach, &s3c64xx_spi_dma_client); - return 0; - } - s3c2410_dma_set_buffdone_fn(sdd->tx_dmach, s3c64xx_spi_dma_txcb); - s3c2410_dma_devconfig(sdd->tx_dmach, S3C2410_DMASRC_MEM, - sdd->sfr_start + S3C64XX_SPI_TX_DATA); - - return 1; -} - -static void s3c64xx_spi_work(struct work_struct *work) -{ - struct s3c64xx_spi_driver_data *sdd = container_of(work, - struct s3c64xx_spi_driver_data, work); - unsigned long flags; - - /* Acquire DMA channels */ - while (!acquire_dma(sdd)) - msleep(10); - - spin_lock_irqsave(&sdd->lock, flags); - - while (!list_empty(&sdd->queue) - && !(sdd->state & SUSPND)) { - - struct spi_message *msg; - - msg = container_of(sdd->queue.next, struct spi_message, queue); - - list_del_init(&msg->queue); - - /* Set Xfer busy flag */ - sdd->state |= SPIBUSY; - - spin_unlock_irqrestore(&sdd->lock, flags); - - handle_msg(sdd, msg); - - spin_lock_irqsave(&sdd->lock, flags); - - sdd->state &= ~SPIBUSY; - } - - spin_unlock_irqrestore(&sdd->lock, flags); - - /* Free DMA channels */ - s3c2410_dma_free(sdd->tx_dmach, &s3c64xx_spi_dma_client); - s3c2410_dma_free(sdd->rx_dmach, &s3c64xx_spi_dma_client); -} - -static int s3c64xx_spi_transfer(struct spi_device *spi, - struct spi_message *msg) -{ - struct s3c64xx_spi_driver_data *sdd; - unsigned long flags; - - sdd = spi_master_get_devdata(spi->master); - - spin_lock_irqsave(&sdd->lock, flags); - - if (sdd->state & SUSPND) { - spin_unlock_irqrestore(&sdd->lock, flags); - return -ESHUTDOWN; - } - - msg->status = -EINPROGRESS; - msg->actual_length = 0; - - list_add_tail(&msg->queue, &sdd->queue); - - queue_work(sdd->workqueue, &sdd->work); - - spin_unlock_irqrestore(&sdd->lock, flags); - - return 0; -} - -/* - * Here we only check the validity of requested configuration - * and save the configuration in a local data-structure. - * The controller is actually configured only just before we - * get a message to transfer. - */ -static int s3c64xx_spi_setup(struct spi_device *spi) -{ - struct s3c64xx_spi_csinfo *cs = spi->controller_data; - struct s3c64xx_spi_driver_data *sdd; - struct s3c64xx_spi_info *sci; - struct spi_message *msg; - unsigned long flags; - int err = 0; - - if (cs == NULL || cs->set_level == NULL) { - dev_err(&spi->dev, "No CS for SPI(%d)\n", spi->chip_select); - return -ENODEV; - } - - sdd = spi_master_get_devdata(spi->master); - sci = sdd->cntrlr_info; - - spin_lock_irqsave(&sdd->lock, flags); - - list_for_each_entry(msg, &sdd->queue, queue) { - /* Is some mssg is already queued for this device */ - if (msg->spi == spi) { - dev_err(&spi->dev, - "setup: attempt while mssg in queue!\n"); - spin_unlock_irqrestore(&sdd->lock, flags); - return -EBUSY; - } - } - - if (sdd->state & SUSPND) { - spin_unlock_irqrestore(&sdd->lock, flags); - dev_err(&spi->dev, - "setup: SPI-%d not active!\n", spi->master->bus_num); - return -ESHUTDOWN; - } - - spin_unlock_irqrestore(&sdd->lock, flags); - - if (spi->bits_per_word != 8 - && spi->bits_per_word != 16 - && spi->bits_per_word != 32) { - dev_err(&spi->dev, "setup: %dbits/wrd not supported!\n", - spi->bits_per_word); - err = -EINVAL; - goto setup_exit; - } - - /* Check if we can provide the requested rate */ - if (!sci->clk_from_cmu) { - u32 psr, speed; - - /* Max possible */ - speed = clk_get_rate(sdd->src_clk) / 2 / (0 + 1); - - if (spi->max_speed_hz > speed) - spi->max_speed_hz = speed; - - psr = clk_get_rate(sdd->src_clk) / 2 / spi->max_speed_hz - 1; - psr &= S3C64XX_SPI_PSR_MASK; - if (psr == S3C64XX_SPI_PSR_MASK) - psr--; - - speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1); - if (spi->max_speed_hz < speed) { - if (psr+1 < S3C64XX_SPI_PSR_MASK) { - psr++; - } else { - err = -EINVAL; - goto setup_exit; - } - } - - speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1); - if (spi->max_speed_hz >= speed) - spi->max_speed_hz = speed; - else - err = -EINVAL; - } - -setup_exit: - - /* setup() returns with device de-selected */ - disable_cs(sdd, spi); - - return err; -} - -static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel) -{ - struct s3c64xx_spi_info *sci = sdd->cntrlr_info; - void __iomem *regs = sdd->regs; - unsigned int val; - - sdd->cur_speed = 0; - - S3C64XX_SPI_DEACT(sdd); - - /* Disable Interrupts - we use Polling if not DMA mode */ - writel(0, regs + S3C64XX_SPI_INT_EN); - - if (!sci->clk_from_cmu) - writel(sci->src_clk_nr << S3C64XX_SPI_CLKSEL_SRCSHFT, - regs + S3C64XX_SPI_CLK_CFG); - writel(0, regs + S3C64XX_SPI_MODE_CFG); - writel(0, regs + S3C64XX_SPI_PACKET_CNT); - - /* Clear any irq pending bits */ - writel(readl(regs + S3C64XX_SPI_PENDING_CLR), - regs + S3C64XX_SPI_PENDING_CLR); - - writel(0, regs + S3C64XX_SPI_SWAP_CFG); - - val = readl(regs + S3C64XX_SPI_MODE_CFG); - val &= ~S3C64XX_SPI_MODE_4BURST; - val &= ~(S3C64XX_SPI_MAX_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF); - val |= (S3C64XX_SPI_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF); - writel(val, regs + S3C64XX_SPI_MODE_CFG); - - flush_fifo(sdd); -} - -static int __init s3c64xx_spi_probe(struct platform_device *pdev) -{ - struct resource *mem_res, *dmatx_res, *dmarx_res; - struct s3c64xx_spi_driver_data *sdd; - struct s3c64xx_spi_info *sci; - struct spi_master *master; - int ret; - - if (pdev->id < 0) { - dev_err(&pdev->dev, - "Invalid platform device id-%d\n", pdev->id); - return -ENODEV; - } - - if (pdev->dev.platform_data == NULL) { - dev_err(&pdev->dev, "platform_data missing!\n"); - return -ENODEV; - } - - sci = pdev->dev.platform_data; - if (!sci->src_clk_name) { - dev_err(&pdev->dev, - "Board init must call s3c64xx_spi_set_info()\n"); - return -EINVAL; - } - - /* Check for availability of necessary resource */ - - dmatx_res = platform_get_resource(pdev, IORESOURCE_DMA, 0); - if (dmatx_res == NULL) { - dev_err(&pdev->dev, "Unable to get SPI-Tx dma resource\n"); - return -ENXIO; - } - - dmarx_res = platform_get_resource(pdev, IORESOURCE_DMA, 1); - if (dmarx_res == NULL) { - dev_err(&pdev->dev, "Unable to get SPI-Rx dma resource\n"); - return -ENXIO; - } - - mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (mem_res == NULL) { - dev_err(&pdev->dev, "Unable to get SPI MEM resource\n"); - return -ENXIO; - } - - master = spi_alloc_master(&pdev->dev, - sizeof(struct s3c64xx_spi_driver_data)); - if (master == NULL) { - dev_err(&pdev->dev, "Unable to allocate SPI Master\n"); - return -ENOMEM; - } - - platform_set_drvdata(pdev, master); - - sdd = spi_master_get_devdata(master); - sdd->master = master; - sdd->cntrlr_info = sci; - sdd->pdev = pdev; - sdd->sfr_start = mem_res->start; - sdd->tx_dmach = dmatx_res->start; - sdd->rx_dmach = dmarx_res->start; - - sdd->cur_bpw = 8; - - master->bus_num = pdev->id; - master->setup = s3c64xx_spi_setup; - master->transfer = s3c64xx_spi_transfer; - master->num_chipselect = sci->num_cs; - master->dma_alignment = 8; - /* the spi->mode bits understood by this driver: */ - master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; - - if (request_mem_region(mem_res->start, - resource_size(mem_res), pdev->name) == NULL) { - dev_err(&pdev->dev, "Req mem region failed\n"); - ret = -ENXIO; - goto err0; - } - - sdd->regs = ioremap(mem_res->start, resource_size(mem_res)); - if (sdd->regs == NULL) { - dev_err(&pdev->dev, "Unable to remap IO\n"); - ret = -ENXIO; - goto err1; - } - - if (sci->cfg_gpio == NULL || sci->cfg_gpio(pdev)) { - dev_err(&pdev->dev, "Unable to config gpio\n"); - ret = -EBUSY; - goto err2; - } - - /* Setup clocks */ - sdd->clk = clk_get(&pdev->dev, "spi"); - if (IS_ERR(sdd->clk)) { - dev_err(&pdev->dev, "Unable to acquire clock 'spi'\n"); - ret = PTR_ERR(sdd->clk); - goto err3; - } - - if (clk_enable(sdd->clk)) { - dev_err(&pdev->dev, "Couldn't enable clock 'spi'\n"); - ret = -EBUSY; - goto err4; - } - - sdd->src_clk = clk_get(&pdev->dev, sci->src_clk_name); - if (IS_ERR(sdd->src_clk)) { - dev_err(&pdev->dev, - "Unable to acquire clock '%s'\n", sci->src_clk_name); - ret = PTR_ERR(sdd->src_clk); - goto err5; - } - - if (clk_enable(sdd->src_clk)) { - dev_err(&pdev->dev, "Couldn't enable clock '%s'\n", - sci->src_clk_name); - ret = -EBUSY; - goto err6; - } - - sdd->workqueue = create_singlethread_workqueue( - dev_name(master->dev.parent)); - if (sdd->workqueue == NULL) { - dev_err(&pdev->dev, "Unable to create workqueue\n"); - ret = -ENOMEM; - goto err7; - } - - /* Setup Deufult Mode */ - s3c64xx_spi_hwinit(sdd, pdev->id); - - spin_lock_init(&sdd->lock); - init_completion(&sdd->xfer_completion); - INIT_WORK(&sdd->work, s3c64xx_spi_work); - INIT_LIST_HEAD(&sdd->queue); - - if (spi_register_master(master)) { - dev_err(&pdev->dev, "cannot register SPI master\n"); - ret = -EBUSY; - goto err8; - } - - dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d " - "with %d Slaves attached\n", - pdev->id, master->num_chipselect); - dev_dbg(&pdev->dev, "\tIOmem=[0x%x-0x%x]\tDMA=[Rx-%d, Tx-%d]\n", - mem_res->end, mem_res->start, - sdd->rx_dmach, sdd->tx_dmach); - - return 0; - -err8: - destroy_workqueue(sdd->workqueue); -err7: - clk_disable(sdd->src_clk); -err6: - clk_put(sdd->src_clk); -err5: - clk_disable(sdd->clk); -err4: - clk_put(sdd->clk); -err3: -err2: - iounmap((void *) sdd->regs); -err1: - release_mem_region(mem_res->start, resource_size(mem_res)); -err0: - platform_set_drvdata(pdev, NULL); - spi_master_put(master); - - return ret; -} - -static int s3c64xx_spi_remove(struct platform_device *pdev) -{ - struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); - struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); - struct resource *mem_res; - unsigned long flags; - - spin_lock_irqsave(&sdd->lock, flags); - sdd->state |= SUSPND; - spin_unlock_irqrestore(&sdd->lock, flags); - - while (sdd->state & SPIBUSY) - msleep(10); - - spi_unregister_master(master); - - destroy_workqueue(sdd->workqueue); - - clk_disable(sdd->src_clk); - clk_put(sdd->src_clk); - - clk_disable(sdd->clk); - clk_put(sdd->clk); - - iounmap((void *) sdd->regs); - - mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (mem_res != NULL) - release_mem_region(mem_res->start, resource_size(mem_res)); - - platform_set_drvdata(pdev, NULL); - spi_master_put(master); - - return 0; -} - -#ifdef CONFIG_PM -static int s3c64xx_spi_suspend(struct platform_device *pdev, pm_message_t state) -{ - struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); - struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); - unsigned long flags; - - spin_lock_irqsave(&sdd->lock, flags); - sdd->state |= SUSPND; - spin_unlock_irqrestore(&sdd->lock, flags); - - while (sdd->state & SPIBUSY) - msleep(10); - - /* Disable the clock */ - clk_disable(sdd->src_clk); - clk_disable(sdd->clk); - - sdd->cur_speed = 0; /* Output Clock is stopped */ - - return 0; -} - -static int s3c64xx_spi_resume(struct platform_device *pdev) -{ - struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); - struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); - struct s3c64xx_spi_info *sci = sdd->cntrlr_info; - unsigned long flags; - - sci->cfg_gpio(pdev); - - /* Enable the clock */ - clk_enable(sdd->src_clk); - clk_enable(sdd->clk); - - s3c64xx_spi_hwinit(sdd, pdev->id); - - spin_lock_irqsave(&sdd->lock, flags); - sdd->state &= ~SUSPND; - spin_unlock_irqrestore(&sdd->lock, flags); - - return 0; -} -#else -#define s3c64xx_spi_suspend NULL -#define s3c64xx_spi_resume NULL -#endif /* CONFIG_PM */ - -static struct platform_driver s3c64xx_spi_driver = { - .driver = { - .name = "s3c64xx-spi", - .owner = THIS_MODULE, - }, - .remove = s3c64xx_spi_remove, - .suspend = s3c64xx_spi_suspend, - .resume = s3c64xx_spi_resume, -}; -MODULE_ALIAS("platform:s3c64xx-spi"); - -static int __init s3c64xx_spi_init(void) -{ - return platform_driver_probe(&s3c64xx_spi_driver, s3c64xx_spi_probe); -} -subsys_initcall(s3c64xx_spi_init); - -static void __exit s3c64xx_spi_exit(void) -{ - platform_driver_unregister(&s3c64xx_spi_driver); -} -module_exit(s3c64xx_spi_exit); - -MODULE_AUTHOR("Jaswinder Singh "); -MODULE_DESCRIPTION("S3C64XX SPI Controller Driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi_sh.c b/drivers/spi/spi_sh.c deleted file mode 100644 index 9eedd71..0000000 --- a/drivers/spi/spi_sh.c +++ /dev/null @@ -1,543 +0,0 @@ -/* - * SH SPI bus driver - * - * Copyright (C) 2011 Renesas Solutions Corp. - * - * Based on pxa2xx_spi.c: - * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define SPI_SH_TBR 0x00 -#define SPI_SH_RBR 0x00 -#define SPI_SH_CR1 0x08 -#define SPI_SH_CR2 0x10 -#define SPI_SH_CR3 0x18 -#define SPI_SH_CR4 0x20 -#define SPI_SH_CR5 0x28 - -/* CR1 */ -#define SPI_SH_TBE 0x80 -#define SPI_SH_TBF 0x40 -#define SPI_SH_RBE 0x20 -#define SPI_SH_RBF 0x10 -#define SPI_SH_PFONRD 0x08 -#define SPI_SH_SSDB 0x04 -#define SPI_SH_SSD 0x02 -#define SPI_SH_SSA 0x01 - -/* CR2 */ -#define SPI_SH_RSTF 0x80 -#define SPI_SH_LOOPBK 0x40 -#define SPI_SH_CPOL 0x20 -#define SPI_SH_CPHA 0x10 -#define SPI_SH_L1M0 0x08 - -/* CR3 */ -#define SPI_SH_MAX_BYTE 0xFF - -/* CR4 */ -#define SPI_SH_TBEI 0x80 -#define SPI_SH_TBFI 0x40 -#define SPI_SH_RBEI 0x20 -#define SPI_SH_RBFI 0x10 -#define SPI_SH_WPABRT 0x04 -#define SPI_SH_SSS 0x01 - -/* CR8 */ -#define SPI_SH_P1L0 0x80 -#define SPI_SH_PP1L0 0x40 -#define SPI_SH_MUXI 0x20 -#define SPI_SH_MUXIRQ 0x10 - -#define SPI_SH_FIFO_SIZE 32 -#define SPI_SH_SEND_TIMEOUT (3 * HZ) -#define SPI_SH_RECEIVE_TIMEOUT (HZ >> 3) - -#undef DEBUG - -struct spi_sh_data { - void __iomem *addr; - int irq; - struct spi_master *master; - struct list_head queue; - struct workqueue_struct *workqueue; - struct work_struct ws; - unsigned long cr1; - wait_queue_head_t wait; - spinlock_t lock; -}; - -static void spi_sh_write(struct spi_sh_data *ss, unsigned long data, - unsigned long offset) -{ - writel(data, ss->addr + offset); -} - -static unsigned long spi_sh_read(struct spi_sh_data *ss, unsigned long offset) -{ - return readl(ss->addr + offset); -} - -static void spi_sh_set_bit(struct spi_sh_data *ss, unsigned long val, - unsigned long offset) -{ - unsigned long tmp; - - tmp = spi_sh_read(ss, offset); - tmp |= val; - spi_sh_write(ss, tmp, offset); -} - -static void spi_sh_clear_bit(struct spi_sh_data *ss, unsigned long val, - unsigned long offset) -{ - unsigned long tmp; - - tmp = spi_sh_read(ss, offset); - tmp &= ~val; - spi_sh_write(ss, tmp, offset); -} - -static void clear_fifo(struct spi_sh_data *ss) -{ - spi_sh_set_bit(ss, SPI_SH_RSTF, SPI_SH_CR2); - spi_sh_clear_bit(ss, SPI_SH_RSTF, SPI_SH_CR2); -} - -static int spi_sh_wait_receive_buffer(struct spi_sh_data *ss) -{ - int timeout = 100000; - - while (spi_sh_read(ss, SPI_SH_CR1) & SPI_SH_RBE) { - udelay(10); - if (timeout-- < 0) - return -ETIMEDOUT; - } - return 0; -} - -static int spi_sh_wait_write_buffer_empty(struct spi_sh_data *ss) -{ - int timeout = 100000; - - while (!(spi_sh_read(ss, SPI_SH_CR1) & SPI_SH_TBE)) { - udelay(10); - if (timeout-- < 0) - return -ETIMEDOUT; - } - return 0; -} - -static int spi_sh_send(struct spi_sh_data *ss, struct spi_message *mesg, - struct spi_transfer *t) -{ - int i, retval = 0; - int remain = t->len; - int cur_len; - unsigned char *data; - unsigned long tmp; - long ret; - - if (t->len) - spi_sh_set_bit(ss, SPI_SH_SSA, SPI_SH_CR1); - - data = (unsigned char *)t->tx_buf; - while (remain > 0) { - cur_len = min(SPI_SH_FIFO_SIZE, remain); - for (i = 0; i < cur_len && - !(spi_sh_read(ss, SPI_SH_CR4) & - SPI_SH_WPABRT) && - !(spi_sh_read(ss, SPI_SH_CR1) & SPI_SH_TBF); - i++) - spi_sh_write(ss, (unsigned long)data[i], SPI_SH_TBR); - - if (spi_sh_read(ss, SPI_SH_CR4) & SPI_SH_WPABRT) { - /* Abort SPI operation */ - spi_sh_set_bit(ss, SPI_SH_WPABRT, SPI_SH_CR4); - retval = -EIO; - break; - } - - cur_len = i; - - remain -= cur_len; - data += cur_len; - - if (remain > 0) { - ss->cr1 &= ~SPI_SH_TBE; - spi_sh_set_bit(ss, SPI_SH_TBE, SPI_SH_CR4); - ret = wait_event_interruptible_timeout(ss->wait, - ss->cr1 & SPI_SH_TBE, - SPI_SH_SEND_TIMEOUT); - if (ret == 0 && !(ss->cr1 & SPI_SH_TBE)) { - printk(KERN_ERR "%s: timeout\n", __func__); - return -ETIMEDOUT; - } - } - } - - if (list_is_last(&t->transfer_list, &mesg->transfers)) { - tmp = spi_sh_read(ss, SPI_SH_CR1); - tmp = tmp & ~(SPI_SH_SSD | SPI_SH_SSDB); - spi_sh_write(ss, tmp, SPI_SH_CR1); - spi_sh_set_bit(ss, SPI_SH_SSA, SPI_SH_CR1); - - ss->cr1 &= ~SPI_SH_TBE; - spi_sh_set_bit(ss, SPI_SH_TBE, SPI_SH_CR4); - ret = wait_event_interruptible_timeout(ss->wait, - ss->cr1 & SPI_SH_TBE, - SPI_SH_SEND_TIMEOUT); - if (ret == 0 && (ss->cr1 & SPI_SH_TBE)) { - printk(KERN_ERR "%s: timeout\n", __func__); - return -ETIMEDOUT; - } - } - - return retval; -} - -static int spi_sh_receive(struct spi_sh_data *ss, struct spi_message *mesg, - struct spi_transfer *t) -{ - int i; - int remain = t->len; - int cur_len; - unsigned char *data; - unsigned long tmp; - long ret; - - if (t->len > SPI_SH_MAX_BYTE) - spi_sh_write(ss, SPI_SH_MAX_BYTE, SPI_SH_CR3); - else - spi_sh_write(ss, t->len, SPI_SH_CR3); - - tmp = spi_sh_read(ss, SPI_SH_CR1); - tmp = tmp & ~(SPI_SH_SSD | SPI_SH_SSDB); - spi_sh_write(ss, tmp, SPI_SH_CR1); - spi_sh_set_bit(ss, SPI_SH_SSA, SPI_SH_CR1); - - spi_sh_wait_write_buffer_empty(ss); - - data = (unsigned char *)t->rx_buf; - while (remain > 0) { - if (remain >= SPI_SH_FIFO_SIZE) { - ss->cr1 &= ~SPI_SH_RBF; - spi_sh_set_bit(ss, SPI_SH_RBF, SPI_SH_CR4); - ret = wait_event_interruptible_timeout(ss->wait, - ss->cr1 & SPI_SH_RBF, - SPI_SH_RECEIVE_TIMEOUT); - if (ret == 0 && - spi_sh_read(ss, SPI_SH_CR1) & SPI_SH_RBE) { - printk(KERN_ERR "%s: timeout\n", __func__); - return -ETIMEDOUT; - } - } - - cur_len = min(SPI_SH_FIFO_SIZE, remain); - for (i = 0; i < cur_len; i++) { - if (spi_sh_wait_receive_buffer(ss)) - break; - data[i] = (unsigned char)spi_sh_read(ss, SPI_SH_RBR); - } - - remain -= cur_len; - data += cur_len; - } - - /* deassert CS when SPI is receiving. */ - if (t->len > SPI_SH_MAX_BYTE) { - clear_fifo(ss); - spi_sh_write(ss, 1, SPI_SH_CR3); - } else { - spi_sh_write(ss, 0, SPI_SH_CR3); - } - - return 0; -} - -static void spi_sh_work(struct work_struct *work) -{ - struct spi_sh_data *ss = container_of(work, struct spi_sh_data, ws); - struct spi_message *mesg; - struct spi_transfer *t; - unsigned long flags; - int ret; - - pr_debug("%s: enter\n", __func__); - - spin_lock_irqsave(&ss->lock, flags); - while (!list_empty(&ss->queue)) { - mesg = list_entry(ss->queue.next, struct spi_message, queue); - list_del_init(&mesg->queue); - - spin_unlock_irqrestore(&ss->lock, flags); - list_for_each_entry(t, &mesg->transfers, transfer_list) { - pr_debug("tx_buf = %p, rx_buf = %p\n", - t->tx_buf, t->rx_buf); - pr_debug("len = %d, delay_usecs = %d\n", - t->len, t->delay_usecs); - - if (t->tx_buf) { - ret = spi_sh_send(ss, mesg, t); - if (ret < 0) - goto error; - } - if (t->rx_buf) { - ret = spi_sh_receive(ss, mesg, t); - if (ret < 0) - goto error; - } - mesg->actual_length += t->len; - } - spin_lock_irqsave(&ss->lock, flags); - - mesg->status = 0; - mesg->complete(mesg->context); - } - - clear_fifo(ss); - spi_sh_set_bit(ss, SPI_SH_SSD, SPI_SH_CR1); - udelay(100); - - spi_sh_clear_bit(ss, SPI_SH_SSA | SPI_SH_SSDB | SPI_SH_SSD, - SPI_SH_CR1); - - clear_fifo(ss); - - spin_unlock_irqrestore(&ss->lock, flags); - - return; - - error: - mesg->status = ret; - mesg->complete(mesg->context); - - spi_sh_clear_bit(ss, SPI_SH_SSA | SPI_SH_SSDB | SPI_SH_SSD, - SPI_SH_CR1); - clear_fifo(ss); - -} - -static int spi_sh_setup(struct spi_device *spi) -{ - struct spi_sh_data *ss = spi_master_get_devdata(spi->master); - - if (!spi->bits_per_word) - spi->bits_per_word = 8; - - pr_debug("%s: enter\n", __func__); - - spi_sh_write(ss, 0xfe, SPI_SH_CR1); /* SPI sycle stop */ - spi_sh_write(ss, 0x00, SPI_SH_CR1); /* CR1 init */ - spi_sh_write(ss, 0x00, SPI_SH_CR3); /* CR3 init */ - - clear_fifo(ss); - - /* 1/8 clock */ - spi_sh_write(ss, spi_sh_read(ss, SPI_SH_CR2) | 0x07, SPI_SH_CR2); - udelay(10); - - return 0; -} - -static int spi_sh_transfer(struct spi_device *spi, struct spi_message *mesg) -{ - struct spi_sh_data *ss = spi_master_get_devdata(spi->master); - unsigned long flags; - - pr_debug("%s: enter\n", __func__); - pr_debug("\tmode = %02x\n", spi->mode); - - spin_lock_irqsave(&ss->lock, flags); - - mesg->actual_length = 0; - mesg->status = -EINPROGRESS; - - spi_sh_clear_bit(ss, SPI_SH_SSA, SPI_SH_CR1); - - list_add_tail(&mesg->queue, &ss->queue); - queue_work(ss->workqueue, &ss->ws); - - spin_unlock_irqrestore(&ss->lock, flags); - - return 0; -} - -static void spi_sh_cleanup(struct spi_device *spi) -{ - struct spi_sh_data *ss = spi_master_get_devdata(spi->master); - - pr_debug("%s: enter\n", __func__); - - spi_sh_clear_bit(ss, SPI_SH_SSA | SPI_SH_SSDB | SPI_SH_SSD, - SPI_SH_CR1); -} - -static irqreturn_t spi_sh_irq(int irq, void *_ss) -{ - struct spi_sh_data *ss = (struct spi_sh_data *)_ss; - unsigned long cr1; - - cr1 = spi_sh_read(ss, SPI_SH_CR1); - if (cr1 & SPI_SH_TBE) - ss->cr1 |= SPI_SH_TBE; - if (cr1 & SPI_SH_TBF) - ss->cr1 |= SPI_SH_TBF; - if (cr1 & SPI_SH_RBE) - ss->cr1 |= SPI_SH_RBE; - if (cr1 & SPI_SH_RBF) - ss->cr1 |= SPI_SH_RBF; - - if (ss->cr1) { - spi_sh_clear_bit(ss, ss->cr1, SPI_SH_CR4); - wake_up(&ss->wait); - } - - return IRQ_HANDLED; -} - -static int __devexit spi_sh_remove(struct platform_device *pdev) -{ - struct spi_sh_data *ss = dev_get_drvdata(&pdev->dev); - - spi_unregister_master(ss->master); - destroy_workqueue(ss->workqueue); - free_irq(ss->irq, ss); - iounmap(ss->addr); - - return 0; -} - -static int __devinit spi_sh_probe(struct platform_device *pdev) -{ - struct resource *res; - struct spi_master *master; - struct spi_sh_data *ss; - int ret, irq; - - /* get base addr */ - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (unlikely(res == NULL)) { - dev_err(&pdev->dev, "invalid resource\n"); - return -EINVAL; - } - - irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(&pdev->dev, "platform_get_irq error\n"); - return -ENODEV; - } - - master = spi_alloc_master(&pdev->dev, sizeof(struct spi_sh_data)); - if (master == NULL) { - dev_err(&pdev->dev, "spi_alloc_master error.\n"); - return -ENOMEM; - } - - ss = spi_master_get_devdata(master); - dev_set_drvdata(&pdev->dev, ss); - - ss->irq = irq; - ss->master = master; - ss->addr = ioremap(res->start, resource_size(res)); - if (ss->addr == NULL) { - dev_err(&pdev->dev, "ioremap error.\n"); - ret = -ENOMEM; - goto error1; - } - INIT_LIST_HEAD(&ss->queue); - spin_lock_init(&ss->lock); - INIT_WORK(&ss->ws, spi_sh_work); - init_waitqueue_head(&ss->wait); - ss->workqueue = create_singlethread_workqueue( - dev_name(master->dev.parent)); - if (ss->workqueue == NULL) { - dev_err(&pdev->dev, "create workqueue error\n"); - ret = -EBUSY; - goto error2; - } - - ret = request_irq(irq, spi_sh_irq, IRQF_DISABLED, "spi_sh", ss); - if (ret < 0) { - dev_err(&pdev->dev, "request_irq error\n"); - goto error3; - } - - master->num_chipselect = 2; - master->bus_num = pdev->id; - master->setup = spi_sh_setup; - master->transfer = spi_sh_transfer; - master->cleanup = spi_sh_cleanup; - - ret = spi_register_master(master); - if (ret < 0) { - printk(KERN_ERR "spi_register_master error.\n"); - goto error4; - } - - return 0; - - error4: - free_irq(irq, ss); - error3: - destroy_workqueue(ss->workqueue); - error2: - iounmap(ss->addr); - error1: - spi_master_put(master); - - return ret; -} - -static struct platform_driver spi_sh_driver = { - .probe = spi_sh_probe, - .remove = __devexit_p(spi_sh_remove), - .driver = { - .name = "sh_spi", - .owner = THIS_MODULE, - }, -}; - -static int __init spi_sh_init(void) -{ - return platform_driver_register(&spi_sh_driver); -} -module_init(spi_sh_init); - -static void __exit spi_sh_exit(void) -{ - platform_driver_unregister(&spi_sh_driver); -} -module_exit(spi_sh_exit); - -MODULE_DESCRIPTION("SH SPI bus driver"); -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Yoshihiro Shimoda"); -MODULE_ALIAS("platform:sh_spi"); diff --git a/drivers/spi/spi_sh_msiof.c b/drivers/spi/spi_sh_msiof.c deleted file mode 100644 index e00d94b..0000000 --- a/drivers/spi/spi_sh_msiof.c +++ /dev/null @@ -1,749 +0,0 @@ -/* - * SuperH MSIOF SPI Master Interface - * - * Copyright (c) 2009 Magnus Damm - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#include - -struct sh_msiof_spi_priv { - struct spi_bitbang bitbang; /* must be first for spi_bitbang.c */ - void __iomem *mapbase; - struct clk *clk; - struct platform_device *pdev; - struct sh_msiof_spi_info *info; - struct completion done; - unsigned long flags; - int tx_fifo_size; - int rx_fifo_size; -}; - -#define TMDR1 0x00 -#define TMDR2 0x04 -#define TMDR3 0x08 -#define RMDR1 0x10 -#define RMDR2 0x14 -#define RMDR3 0x18 -#define TSCR 0x20 -#define RSCR 0x22 -#define CTR 0x28 -#define FCTR 0x30 -#define STR 0x40 -#define IER 0x44 -#define TDR1 0x48 -#define TDR2 0x4c -#define TFDR 0x50 -#define RDR1 0x58 -#define RDR2 0x5c -#define RFDR 0x60 - -#define CTR_TSCKE (1 << 15) -#define CTR_TFSE (1 << 14) -#define CTR_TXE (1 << 9) -#define CTR_RXE (1 << 8) - -#define STR_TEOF (1 << 23) -#define STR_REOF (1 << 7) - -static u32 sh_msiof_read(struct sh_msiof_spi_priv *p, int reg_offs) -{ - switch (reg_offs) { - case TSCR: - case RSCR: - return ioread16(p->mapbase + reg_offs); - default: - return ioread32(p->mapbase + reg_offs); - } -} - -static void sh_msiof_write(struct sh_msiof_spi_priv *p, int reg_offs, - u32 value) -{ - switch (reg_offs) { - case TSCR: - case RSCR: - iowrite16(value, p->mapbase + reg_offs); - break; - default: - iowrite32(value, p->mapbase + reg_offs); - break; - } -} - -static int sh_msiof_modify_ctr_wait(struct sh_msiof_spi_priv *p, - u32 clr, u32 set) -{ - u32 mask = clr | set; - u32 data; - int k; - - data = sh_msiof_read(p, CTR); - data &= ~clr; - data |= set; - sh_msiof_write(p, CTR, data); - - for (k = 100; k > 0; k--) { - if ((sh_msiof_read(p, CTR) & mask) == set) - break; - - udelay(10); - } - - return k > 0 ? 0 : -ETIMEDOUT; -} - -static irqreturn_t sh_msiof_spi_irq(int irq, void *data) -{ - struct sh_msiof_spi_priv *p = data; - - /* just disable the interrupt and wake up */ - sh_msiof_write(p, IER, 0); - complete(&p->done); - - return IRQ_HANDLED; -} - -static struct { - unsigned short div; - unsigned short scr; -} const sh_msiof_spi_clk_table[] = { - { 1, 0x0007 }, - { 2, 0x0000 }, - { 4, 0x0001 }, - { 8, 0x0002 }, - { 16, 0x0003 }, - { 32, 0x0004 }, - { 64, 0x1f00 }, - { 128, 0x1f01 }, - { 256, 0x1f02 }, - { 512, 0x1f03 }, - { 1024, 0x1f04 }, -}; - -static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p, - unsigned long parent_rate, - unsigned long spi_hz) -{ - unsigned long div = 1024; - size_t k; - - if (!WARN_ON(!spi_hz || !parent_rate)) - div = parent_rate / spi_hz; - - /* TODO: make more fine grained */ - - for (k = 0; k < ARRAY_SIZE(sh_msiof_spi_clk_table); k++) { - if (sh_msiof_spi_clk_table[k].div >= div) - break; - } - - k = min_t(int, k, ARRAY_SIZE(sh_msiof_spi_clk_table) - 1); - - sh_msiof_write(p, TSCR, sh_msiof_spi_clk_table[k].scr); - sh_msiof_write(p, RSCR, sh_msiof_spi_clk_table[k].scr); -} - -static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p, - u32 cpol, u32 cpha, - u32 tx_hi_z, u32 lsb_first) -{ - u32 tmp; - int edge; - - /* - * CPOL CPHA TSCKIZ RSCKIZ TEDG REDG - * 0 0 10 10 1 1 - * 0 1 10 10 0 0 - * 1 0 11 11 0 0 - * 1 1 11 11 1 1 - */ - sh_msiof_write(p, FCTR, 0); - sh_msiof_write(p, TMDR1, 0xe2000005 | (lsb_first << 24)); - sh_msiof_write(p, RMDR1, 0x22000005 | (lsb_first << 24)); - - tmp = 0xa0000000; - tmp |= cpol << 30; /* TSCKIZ */ - tmp |= cpol << 28; /* RSCKIZ */ - - edge = cpol ^ !cpha; - - tmp |= edge << 27; /* TEDG */ - tmp |= edge << 26; /* REDG */ - tmp |= (tx_hi_z ? 2 : 0) << 22; /* TXDIZ */ - sh_msiof_write(p, CTR, tmp); -} - -static void sh_msiof_spi_set_mode_regs(struct sh_msiof_spi_priv *p, - const void *tx_buf, void *rx_buf, - u32 bits, u32 words) -{ - u32 dr2 = ((bits - 1) << 24) | ((words - 1) << 16); - - if (tx_buf) - sh_msiof_write(p, TMDR2, dr2); - else - sh_msiof_write(p, TMDR2, dr2 | 1); - - if (rx_buf) - sh_msiof_write(p, RMDR2, dr2); - - sh_msiof_write(p, IER, STR_TEOF | STR_REOF); -} - -static void sh_msiof_reset_str(struct sh_msiof_spi_priv *p) -{ - sh_msiof_write(p, STR, sh_msiof_read(p, STR)); -} - -static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p, - const void *tx_buf, int words, int fs) -{ - const u8 *buf_8 = tx_buf; - int k; - - for (k = 0; k < words; k++) - sh_msiof_write(p, TFDR, buf_8[k] << fs); -} - -static void sh_msiof_spi_write_fifo_16(struct sh_msiof_spi_priv *p, - const void *tx_buf, int words, int fs) -{ - const u16 *buf_16 = tx_buf; - int k; - - for (k = 0; k < words; k++) - sh_msiof_write(p, TFDR, buf_16[k] << fs); -} - -static void sh_msiof_spi_write_fifo_16u(struct sh_msiof_spi_priv *p, - const void *tx_buf, int words, int fs) -{ - const u16 *buf_16 = tx_buf; - int k; - - for (k = 0; k < words; k++) - sh_msiof_write(p, TFDR, get_unaligned(&buf_16[k]) << fs); -} - -static void sh_msiof_spi_write_fifo_32(struct sh_msiof_spi_priv *p, - const void *tx_buf, int words, int fs) -{ - const u32 *buf_32 = tx_buf; - int k; - - for (k = 0; k < words; k++) - sh_msiof_write(p, TFDR, buf_32[k] << fs); -} - -static void sh_msiof_spi_write_fifo_32u(struct sh_msiof_spi_priv *p, - const void *tx_buf, int words, int fs) -{ - const u32 *buf_32 = tx_buf; - int k; - - for (k = 0; k < words; k++) - sh_msiof_write(p, TFDR, get_unaligned(&buf_32[k]) << fs); -} - -static void sh_msiof_spi_write_fifo_s32(struct sh_msiof_spi_priv *p, - const void *tx_buf, int words, int fs) -{ - const u32 *buf_32 = tx_buf; - int k; - - for (k = 0; k < words; k++) - sh_msiof_write(p, TFDR, swab32(buf_32[k] << fs)); -} - -static void sh_msiof_spi_write_fifo_s32u(struct sh_msiof_spi_priv *p, - const void *tx_buf, int words, int fs) -{ - const u32 *buf_32 = tx_buf; - int k; - - for (k = 0; k < words; k++) - sh_msiof_write(p, TFDR, swab32(get_unaligned(&buf_32[k]) << fs)); -} - -static void sh_msiof_spi_read_fifo_8(struct sh_msiof_spi_priv *p, - void *rx_buf, int words, int fs) -{ - u8 *buf_8 = rx_buf; - int k; - - for (k = 0; k < words; k++) - buf_8[k] = sh_msiof_read(p, RFDR) >> fs; -} - -static void sh_msiof_spi_read_fifo_16(struct sh_msiof_spi_priv *p, - void *rx_buf, int words, int fs) -{ - u16 *buf_16 = rx_buf; - int k; - - for (k = 0; k < words; k++) - buf_16[k] = sh_msiof_read(p, RFDR) >> fs; -} - -static void sh_msiof_spi_read_fifo_16u(struct sh_msiof_spi_priv *p, - void *rx_buf, int words, int fs) -{ - u16 *buf_16 = rx_buf; - int k; - - for (k = 0; k < words; k++) - put_unaligned(sh_msiof_read(p, RFDR) >> fs, &buf_16[k]); -} - -static void sh_msiof_spi_read_fifo_32(struct sh_msiof_spi_priv *p, - void *rx_buf, int words, int fs) -{ - u32 *buf_32 = rx_buf; - int k; - - for (k = 0; k < words; k++) - buf_32[k] = sh_msiof_read(p, RFDR) >> fs; -} - -static void sh_msiof_spi_read_fifo_32u(struct sh_msiof_spi_priv *p, - void *rx_buf, int words, int fs) -{ - u32 *buf_32 = rx_buf; - int k; - - for (k = 0; k < words; k++) - put_unaligned(sh_msiof_read(p, RFDR) >> fs, &buf_32[k]); -} - -static void sh_msiof_spi_read_fifo_s32(struct sh_msiof_spi_priv *p, - void *rx_buf, int words, int fs) -{ - u32 *buf_32 = rx_buf; - int k; - - for (k = 0; k < words; k++) - buf_32[k] = swab32(sh_msiof_read(p, RFDR) >> fs); -} - -static void sh_msiof_spi_read_fifo_s32u(struct sh_msiof_spi_priv *p, - void *rx_buf, int words, int fs) -{ - u32 *buf_32 = rx_buf; - int k; - - for (k = 0; k < words; k++) - put_unaligned(swab32(sh_msiof_read(p, RFDR) >> fs), &buf_32[k]); -} - -static int sh_msiof_spi_bits(struct spi_device *spi, struct spi_transfer *t) -{ - int bits; - - bits = t ? t->bits_per_word : 0; - if (!bits) - bits = spi->bits_per_word; - return bits; -} - -static unsigned long sh_msiof_spi_hz(struct spi_device *spi, - struct spi_transfer *t) -{ - unsigned long hz; - - hz = t ? t->speed_hz : 0; - if (!hz) - hz = spi->max_speed_hz; - return hz; -} - -static int sh_msiof_spi_setup_transfer(struct spi_device *spi, - struct spi_transfer *t) -{ - int bits; - - /* noting to check hz values against since parent clock is disabled */ - - bits = sh_msiof_spi_bits(spi, t); - if (bits < 8) - return -EINVAL; - if (bits > 32) - return -EINVAL; - - return spi_bitbang_setup_transfer(spi, t); -} - -static void sh_msiof_spi_chipselect(struct spi_device *spi, int is_on) -{ - struct sh_msiof_spi_priv *p = spi_master_get_devdata(spi->master); - int value; - - /* chip select is active low unless SPI_CS_HIGH is set */ - if (spi->mode & SPI_CS_HIGH) - value = (is_on == BITBANG_CS_ACTIVE) ? 1 : 0; - else - value = (is_on == BITBANG_CS_ACTIVE) ? 0 : 1; - - if (is_on == BITBANG_CS_ACTIVE) { - if (!test_and_set_bit(0, &p->flags)) { - pm_runtime_get_sync(&p->pdev->dev); - clk_enable(p->clk); - } - - /* Configure pins before asserting CS */ - sh_msiof_spi_set_pin_regs(p, !!(spi->mode & SPI_CPOL), - !!(spi->mode & SPI_CPHA), - !!(spi->mode & SPI_3WIRE), - !!(spi->mode & SPI_LSB_FIRST)); - } - - /* use spi->controller data for CS (same strategy as spi_gpio) */ - gpio_set_value((unsigned)spi->controller_data, value); - - if (is_on == BITBANG_CS_INACTIVE) { - if (test_and_clear_bit(0, &p->flags)) { - clk_disable(p->clk); - pm_runtime_put(&p->pdev->dev); - } - } -} - -static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p, - void (*tx_fifo)(struct sh_msiof_spi_priv *, - const void *, int, int), - void (*rx_fifo)(struct sh_msiof_spi_priv *, - void *, int, int), - const void *tx_buf, void *rx_buf, - int words, int bits) -{ - int fifo_shift; - int ret; - - /* limit maximum word transfer to rx/tx fifo size */ - if (tx_buf) - words = min_t(int, words, p->tx_fifo_size); - if (rx_buf) - words = min_t(int, words, p->rx_fifo_size); - - /* the fifo contents need shifting */ - fifo_shift = 32 - bits; - - /* setup msiof transfer mode registers */ - sh_msiof_spi_set_mode_regs(p, tx_buf, rx_buf, bits, words); - - /* write tx fifo */ - if (tx_buf) - tx_fifo(p, tx_buf, words, fifo_shift); - - /* setup clock and rx/tx signals */ - ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TSCKE); - if (rx_buf) - ret = ret ? ret : sh_msiof_modify_ctr_wait(p, 0, CTR_RXE); - ret = ret ? ret : sh_msiof_modify_ctr_wait(p, 0, CTR_TXE); - - /* start by setting frame bit */ - INIT_COMPLETION(p->done); - ret = ret ? ret : sh_msiof_modify_ctr_wait(p, 0, CTR_TFSE); - if (ret) { - dev_err(&p->pdev->dev, "failed to start hardware\n"); - goto err; - } - - /* wait for tx fifo to be emptied / rx fifo to be filled */ - wait_for_completion(&p->done); - - /* read rx fifo */ - if (rx_buf) - rx_fifo(p, rx_buf, words, fifo_shift); - - /* clear status bits */ - sh_msiof_reset_str(p); - - /* shut down frame, tx/tx and clock signals */ - ret = sh_msiof_modify_ctr_wait(p, CTR_TFSE, 0); - ret = ret ? ret : sh_msiof_modify_ctr_wait(p, CTR_TXE, 0); - if (rx_buf) - ret = ret ? ret : sh_msiof_modify_ctr_wait(p, CTR_RXE, 0); - ret = ret ? ret : sh_msiof_modify_ctr_wait(p, CTR_TSCKE, 0); - if (ret) { - dev_err(&p->pdev->dev, "failed to shut down hardware\n"); - goto err; - } - - return words; - - err: - sh_msiof_write(p, IER, 0); - return ret; -} - -static int sh_msiof_spi_txrx(struct spi_device *spi, struct spi_transfer *t) -{ - struct sh_msiof_spi_priv *p = spi_master_get_devdata(spi->master); - void (*tx_fifo)(struct sh_msiof_spi_priv *, const void *, int, int); - void (*rx_fifo)(struct sh_msiof_spi_priv *, void *, int, int); - int bits; - int bytes_per_word; - int bytes_done; - int words; - int n; - bool swab; - - bits = sh_msiof_spi_bits(spi, t); - - if (bits <= 8 && t->len > 15 && !(t->len & 3)) { - bits = 32; - swab = true; - } else { - swab = false; - } - - /* setup bytes per word and fifo read/write functions */ - if (bits <= 8) { - bytes_per_word = 1; - tx_fifo = sh_msiof_spi_write_fifo_8; - rx_fifo = sh_msiof_spi_read_fifo_8; - } else if (bits <= 16) { - bytes_per_word = 2; - if ((unsigned long)t->tx_buf & 0x01) - tx_fifo = sh_msiof_spi_write_fifo_16u; - else - tx_fifo = sh_msiof_spi_write_fifo_16; - - if ((unsigned long)t->rx_buf & 0x01) - rx_fifo = sh_msiof_spi_read_fifo_16u; - else - rx_fifo = sh_msiof_spi_read_fifo_16; - } else if (swab) { - bytes_per_word = 4; - if ((unsigned long)t->tx_buf & 0x03) - tx_fifo = sh_msiof_spi_write_fifo_s32u; - else - tx_fifo = sh_msiof_spi_write_fifo_s32; - - if ((unsigned long)t->rx_buf & 0x03) - rx_fifo = sh_msiof_spi_read_fifo_s32u; - else - rx_fifo = sh_msiof_spi_read_fifo_s32; - } else { - bytes_per_word = 4; - if ((unsigned long)t->tx_buf & 0x03) - tx_fifo = sh_msiof_spi_write_fifo_32u; - else - tx_fifo = sh_msiof_spi_write_fifo_32; - - if ((unsigned long)t->rx_buf & 0x03) - rx_fifo = sh_msiof_spi_read_fifo_32u; - else - rx_fifo = sh_msiof_spi_read_fifo_32; - } - - /* setup clocks (clock already enabled in chipselect()) */ - sh_msiof_spi_set_clk_regs(p, clk_get_rate(p->clk), - sh_msiof_spi_hz(spi, t)); - - /* transfer in fifo sized chunks */ - words = t->len / bytes_per_word; - bytes_done = 0; - - while (bytes_done < t->len) { - void *rx_buf = t->rx_buf ? t->rx_buf + bytes_done : NULL; - const void *tx_buf = t->tx_buf ? t->tx_buf + bytes_done : NULL; - n = sh_msiof_spi_txrx_once(p, tx_fifo, rx_fifo, - tx_buf, - rx_buf, - words, bits); - if (n < 0) - break; - - bytes_done += n * bytes_per_word; - words -= n; - } - - return bytes_done; -} - -static u32 sh_msiof_spi_txrx_word(struct spi_device *spi, unsigned nsecs, - u32 word, u8 bits) -{ - BUG(); /* unused but needed by bitbang code */ - return 0; -} - -static int sh_msiof_spi_probe(struct platform_device *pdev) -{ - struct resource *r; - struct spi_master *master; - struct sh_msiof_spi_priv *p; - char clk_name[16]; - int i; - int ret; - - master = spi_alloc_master(&pdev->dev, sizeof(struct sh_msiof_spi_priv)); - if (master == NULL) { - dev_err(&pdev->dev, "failed to allocate spi master\n"); - ret = -ENOMEM; - goto err0; - } - - p = spi_master_get_devdata(master); - - platform_set_drvdata(pdev, p); - p->info = pdev->dev.platform_data; - init_completion(&p->done); - - snprintf(clk_name, sizeof(clk_name), "msiof%d", pdev->id); - p->clk = clk_get(&pdev->dev, clk_name); - if (IS_ERR(p->clk)) { - dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name); - ret = PTR_ERR(p->clk); - goto err1; - } - - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - i = platform_get_irq(pdev, 0); - if (!r || i < 0) { - dev_err(&pdev->dev, "cannot get platform resources\n"); - ret = -ENOENT; - goto err2; - } - p->mapbase = ioremap_nocache(r->start, resource_size(r)); - if (!p->mapbase) { - dev_err(&pdev->dev, "unable to ioremap\n"); - ret = -ENXIO; - goto err2; - } - - ret = request_irq(i, sh_msiof_spi_irq, IRQF_DISABLED, - dev_name(&pdev->dev), p); - if (ret) { - dev_err(&pdev->dev, "unable to request irq\n"); - goto err3; - } - - p->pdev = pdev; - pm_runtime_enable(&pdev->dev); - - /* The standard version of MSIOF use 64 word FIFOs */ - p->tx_fifo_size = 64; - p->rx_fifo_size = 64; - - /* Platform data may override FIFO sizes */ - if (p->info->tx_fifo_override) - p->tx_fifo_size = p->info->tx_fifo_override; - if (p->info->rx_fifo_override) - p->rx_fifo_size = p->info->rx_fifo_override; - - /* init master and bitbang code */ - master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; - master->mode_bits |= SPI_LSB_FIRST | SPI_3WIRE; - master->flags = 0; - master->bus_num = pdev->id; - master->num_chipselect = p->info->num_chipselect; - master->setup = spi_bitbang_setup; - master->cleanup = spi_bitbang_cleanup; - - p->bitbang.master = master; - p->bitbang.chipselect = sh_msiof_spi_chipselect; - p->bitbang.setup_transfer = sh_msiof_spi_setup_transfer; - p->bitbang.txrx_bufs = sh_msiof_spi_txrx; - p->bitbang.txrx_word[SPI_MODE_0] = sh_msiof_spi_txrx_word; - p->bitbang.txrx_word[SPI_MODE_1] = sh_msiof_spi_txrx_word; - p->bitbang.txrx_word[SPI_MODE_2] = sh_msiof_spi_txrx_word; - p->bitbang.txrx_word[SPI_MODE_3] = sh_msiof_spi_txrx_word; - - ret = spi_bitbang_start(&p->bitbang); - if (ret == 0) - return 0; - - pm_runtime_disable(&pdev->dev); - err3: - iounmap(p->mapbase); - err2: - clk_put(p->clk); - err1: - spi_master_put(master); - err0: - return ret; -} - -static int sh_msiof_spi_remove(struct platform_device *pdev) -{ - struct sh_msiof_spi_priv *p = platform_get_drvdata(pdev); - int ret; - - ret = spi_bitbang_stop(&p->bitbang); - if (!ret) { - pm_runtime_disable(&pdev->dev); - free_irq(platform_get_irq(pdev, 0), p); - iounmap(p->mapbase); - clk_put(p->clk); - spi_master_put(p->bitbang.master); - } - return ret; -} - -static int sh_msiof_spi_runtime_nop(struct device *dev) -{ - /* Runtime PM callback shared between ->runtime_suspend() - * and ->runtime_resume(). Simply returns success. - * - * This driver re-initializes all registers after - * pm_runtime_get_sync() anyway so there is no need - * to save and restore registers here. - */ - return 0; -} - -static struct dev_pm_ops sh_msiof_spi_dev_pm_ops = { - .runtime_suspend = sh_msiof_spi_runtime_nop, - .runtime_resume = sh_msiof_spi_runtime_nop, -}; - -static struct platform_driver sh_msiof_spi_drv = { - .probe = sh_msiof_spi_probe, - .remove = sh_msiof_spi_remove, - .driver = { - .name = "spi_sh_msiof", - .owner = THIS_MODULE, - .pm = &sh_msiof_spi_dev_pm_ops, - }, -}; - -static int __init sh_msiof_spi_init(void) -{ - return platform_driver_register(&sh_msiof_spi_drv); -} -module_init(sh_msiof_spi_init); - -static void __exit sh_msiof_spi_exit(void) -{ - platform_driver_unregister(&sh_msiof_spi_drv); -} -module_exit(sh_msiof_spi_exit); - -MODULE_DESCRIPTION("SuperH MSIOF SPI Master Interface Driver"); -MODULE_AUTHOR("Magnus Damm"); -MODULE_LICENSE("GPL v2"); -MODULE_ALIAS("platform:spi_sh_msiof"); diff --git a/drivers/spi/spi_sh_sci.c b/drivers/spi/spi_sh_sci.c deleted file mode 100644 index 5c64391..0000000 --- a/drivers/spi/spi_sh_sci.c +++ /dev/null @@ -1,205 +0,0 @@ -/* - * SH SCI SPI interface - * - * Copyright (c) 2008 Magnus Damm - * - * Based on S3C24XX GPIO based SPI driver, which is: - * Copyright (c) 2006 Ben Dooks - * Copyright (c) 2006 Simtec Electronics - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - */ - -#include -#include -#include -#include -#include -#include - -#include -#include - -#include -#include - -struct sh_sci_spi { - struct spi_bitbang bitbang; - - void __iomem *membase; - unsigned char val; - struct sh_spi_info *info; - struct platform_device *dev; -}; - -#define SCSPTR(sp) (sp->membase + 0x1c) -#define PIN_SCK (1 << 2) -#define PIN_TXD (1 << 0) -#define PIN_RXD PIN_TXD -#define PIN_INIT ((1 << 1) | (1 << 3) | PIN_SCK | PIN_TXD) - -static inline void setbits(struct sh_sci_spi *sp, int bits, int on) -{ - /* - * We are the only user of SCSPTR so no locking is required. - * Reading bit 2 and 0 in SCSPTR gives pin state as input. - * Writing the same bits sets the output value. - * This makes regular read-modify-write difficult so we - * use sp->val to keep track of the latest register value. - */ - - if (on) - sp->val |= bits; - else - sp->val &= ~bits; - - iowrite8(sp->val, SCSPTR(sp)); -} - -static inline void setsck(struct spi_device *dev, int on) -{ - setbits(spi_master_get_devdata(dev->master), PIN_SCK, on); -} - -static inline void setmosi(struct spi_device *dev, int on) -{ - setbits(spi_master_get_devdata(dev->master), PIN_TXD, on); -} - -static inline u32 getmiso(struct spi_device *dev) -{ - struct sh_sci_spi *sp = spi_master_get_devdata(dev->master); - - return (ioread8(SCSPTR(sp)) & PIN_RXD) ? 1 : 0; -} - -#define spidelay(x) ndelay(x) - -#include "spi_bitbang_txrx.h" - -static u32 sh_sci_spi_txrx_mode0(struct spi_device *spi, - unsigned nsecs, u32 word, u8 bits) -{ - return bitbang_txrx_be_cpha0(spi, nsecs, 0, 0, word, bits); -} - -static u32 sh_sci_spi_txrx_mode1(struct spi_device *spi, - unsigned nsecs, u32 word, u8 bits) -{ - return bitbang_txrx_be_cpha1(spi, nsecs, 0, 0, word, bits); -} - -static u32 sh_sci_spi_txrx_mode2(struct spi_device *spi, - unsigned nsecs, u32 word, u8 bits) -{ - return bitbang_txrx_be_cpha0(spi, nsecs, 1, 0, word, bits); -} - -static u32 sh_sci_spi_txrx_mode3(struct spi_device *spi, - unsigned nsecs, u32 word, u8 bits) -{ - return bitbang_txrx_be_cpha1(spi, nsecs, 1, 0, word, bits); -} - -static void sh_sci_spi_chipselect(struct spi_device *dev, int value) -{ - struct sh_sci_spi *sp = spi_master_get_devdata(dev->master); - - if (sp->info && sp->info->chip_select) - (sp->info->chip_select)(sp->info, dev->chip_select, value); -} - -static int sh_sci_spi_probe(struct platform_device *dev) -{ - struct resource *r; - struct spi_master *master; - struct sh_sci_spi *sp; - int ret; - - master = spi_alloc_master(&dev->dev, sizeof(struct sh_sci_spi)); - if (master == NULL) { - dev_err(&dev->dev, "failed to allocate spi master\n"); - ret = -ENOMEM; - goto err0; - } - - sp = spi_master_get_devdata(master); - - platform_set_drvdata(dev, sp); - sp->info = dev->dev.platform_data; - - /* setup spi bitbang adaptor */ - sp->bitbang.master = spi_master_get(master); - sp->bitbang.master->bus_num = sp->info->bus_num; - sp->bitbang.master->num_chipselect = sp->info->num_chipselect; - sp->bitbang.chipselect = sh_sci_spi_chipselect; - - sp->bitbang.txrx_word[SPI_MODE_0] = sh_sci_spi_txrx_mode0; - sp->bitbang.txrx_word[SPI_MODE_1] = sh_sci_spi_txrx_mode1; - sp->bitbang.txrx_word[SPI_MODE_2] = sh_sci_spi_txrx_mode2; - sp->bitbang.txrx_word[SPI_MODE_3] = sh_sci_spi_txrx_mode3; - - r = platform_get_resource(dev, IORESOURCE_MEM, 0); - if (r == NULL) { - ret = -ENOENT; - goto err1; - } - sp->membase = ioremap(r->start, resource_size(r)); - if (!sp->membase) { - ret = -ENXIO; - goto err1; - } - sp->val = ioread8(SCSPTR(sp)); - setbits(sp, PIN_INIT, 1); - - ret = spi_bitbang_start(&sp->bitbang); - if (!ret) - return 0; - - setbits(sp, PIN_INIT, 0); - iounmap(sp->membase); - err1: - spi_master_put(sp->bitbang.master); - err0: - return ret; -} - -static int sh_sci_spi_remove(struct platform_device *dev) -{ - struct sh_sci_spi *sp = platform_get_drvdata(dev); - - iounmap(sp->membase); - setbits(sp, PIN_INIT, 0); - spi_bitbang_stop(&sp->bitbang); - spi_master_put(sp->bitbang.master); - return 0; -} - -static struct platform_driver sh_sci_spi_drv = { - .probe = sh_sci_spi_probe, - .remove = sh_sci_spi_remove, - .driver = { - .name = "spi_sh_sci", - .owner = THIS_MODULE, - }, -}; - -static int __init sh_sci_spi_init(void) -{ - return platform_driver_register(&sh_sci_spi_drv); -} -module_init(sh_sci_spi_init); - -static void __exit sh_sci_spi_exit(void) -{ - platform_driver_unregister(&sh_sci_spi_drv); -} -module_exit(sh_sci_spi_exit); - -MODULE_DESCRIPTION("SH SCI SPI Driver"); -MODULE_AUTHOR("Magnus Damm "); -MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:spi_sh_sci"); diff --git a/drivers/spi/spi_stmp.c b/drivers/spi/spi_stmp.c deleted file mode 100644 index fadff76..0000000 --- a/drivers/spi/spi_stmp.c +++ /dev/null @@ -1,679 +0,0 @@ -/* - * Freescale STMP378X SPI master driver - * - * Author: dmitry pervushin - * - * Copyright 2008 Freescale Semiconductor, Inc. All Rights Reserved. - * Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved. - */ - -/* - * The code contained herein is licensed under the GNU General Public - * License. You may obtain a copy of the GNU General Public License - * Version 2 or later at the following locations: - * - * http://www.opensource.org/licenses/gpl-license.html - * http://www.gnu.org/copyleft/gpl.html - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - - -/* 0 means DMA mode(recommended, default), !0 - PIO mode */ -static int pio; -static int clock; - -/* default timeout for busy waits is 2 seconds */ -#define STMP_SPI_TIMEOUT (2 * HZ) - -struct stmp_spi { - int id; - - void * __iomem regs; /* vaddr of the control registers */ - - int irq, err_irq; - u32 dma; - struct stmp3xxx_dma_descriptor d; - - u32 speed_khz; - u32 saved_timings; - u32 divider; - - struct clk *clk; - struct device *master_dev; - - struct work_struct work; - struct workqueue_struct *workqueue; - - /* lock protects queue access */ - spinlock_t lock; - struct list_head queue; - - struct completion done; -}; - -#define busy_wait(cond) \ - ({ \ - unsigned long end_jiffies = jiffies + STMP_SPI_TIMEOUT; \ - bool succeeded = false; \ - do { \ - if (cond) { \ - succeeded = true; \ - break; \ - } \ - cpu_relax(); \ - } while (time_before(jiffies, end_jiffies)); \ - succeeded; \ - }) - -/** - * stmp_spi_init_hw - * Initialize the SSP port - */ -static int stmp_spi_init_hw(struct stmp_spi *ss) -{ - int err = 0; - void *pins = ss->master_dev->platform_data; - - err = stmp3xxx_request_pin_group(pins, dev_name(ss->master_dev)); - if (err) - goto out; - - ss->clk = clk_get(NULL, "ssp"); - if (IS_ERR(ss->clk)) { - err = PTR_ERR(ss->clk); - goto out_free_pins; - } - clk_enable(ss->clk); - - stmp3xxx_reset_block(ss->regs, false); - stmp3xxx_dma_reset_channel(ss->dma); - - return 0; - -out_free_pins: - stmp3xxx_release_pin_group(pins, dev_name(ss->master_dev)); -out: - return err; -} - -static void stmp_spi_release_hw(struct stmp_spi *ss) -{ - void *pins = ss->master_dev->platform_data; - - if (ss->clk && !IS_ERR(ss->clk)) { - clk_disable(ss->clk); - clk_put(ss->clk); - } - stmp3xxx_release_pin_group(pins, dev_name(ss->master_dev)); -} - -static int stmp_spi_setup_transfer(struct spi_device *spi, - struct spi_transfer *t) -{ - u8 bits_per_word; - u32 hz; - struct stmp_spi *ss = spi_master_get_devdata(spi->master); - u16 rate; - - bits_per_word = spi->bits_per_word; - if (t && t->bits_per_word) - bits_per_word = t->bits_per_word; - - /* - * Calculate speed: - * - by default, use maximum speed from ssp clk - * - if device overrides it, use it - * - if transfer specifies other speed, use transfer's one - */ - hz = 1000 * ss->speed_khz / ss->divider; - if (spi->max_speed_hz) - hz = min(hz, spi->max_speed_hz); - if (t && t->speed_hz) - hz = min(hz, t->speed_hz); - - if (hz == 0) { - dev_err(&spi->dev, "Cannot continue with zero clock\n"); - return -EINVAL; - } - - if (bits_per_word != 8) { - dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n", - __func__, bits_per_word); - return -EINVAL; - } - - dev_dbg(&spi->dev, "Requested clk rate = %uHz, max = %uHz/%d = %uHz\n", - hz, ss->speed_khz, ss->divider, - ss->speed_khz * 1000 / ss->divider); - - if (ss->speed_khz * 1000 / ss->divider < hz) { - dev_err(&spi->dev, "%s, unsupported clock rate %uHz\n", - __func__, hz); - return -EINVAL; - } - - rate = 1000 * ss->speed_khz/ss->divider/hz; - - writel(BF(ss->divider, SSP_TIMING_CLOCK_DIVIDE) | - BF(rate - 1, SSP_TIMING_CLOCK_RATE), - HW_SSP_TIMING + ss->regs); - - writel(BF(1 /* mode SPI */, SSP_CTRL1_SSP_MODE) | - BF(4 /* 8 bits */, SSP_CTRL1_WORD_LENGTH) | - ((spi->mode & SPI_CPOL) ? BM_SSP_CTRL1_POLARITY : 0) | - ((spi->mode & SPI_CPHA) ? BM_SSP_CTRL1_PHASE : 0) | - (pio ? 0 : BM_SSP_CTRL1_DMA_ENABLE), - ss->regs + HW_SSP_CTRL1); - - return 0; -} - -static int stmp_spi_setup(struct spi_device *spi) -{ - /* spi_setup() does basic checks, - * stmp_spi_setup_transfer() does more later - */ - if (spi->bits_per_word != 8) { - dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n", - __func__, spi->bits_per_word); - return -EINVAL; - } - return 0; -} - -static inline u32 stmp_spi_cs(unsigned cs) -{ - return ((cs & 1) ? BM_SSP_CTRL0_WAIT_FOR_CMD : 0) | - ((cs & 2) ? BM_SSP_CTRL0_WAIT_FOR_IRQ : 0); -} - -static int stmp_spi_txrx_dma(struct stmp_spi *ss, int cs, - unsigned char *buf, dma_addr_t dma_buf, int len, - int first, int last, bool write) -{ - u32 c0 = 0; - dma_addr_t spi_buf_dma = dma_buf; - int status = 0; - enum dma_data_direction dir = write ? DMA_TO_DEVICE : DMA_FROM_DEVICE; - - c0 |= (first ? BM_SSP_CTRL0_LOCK_CS : 0); - c0 |= (last ? BM_SSP_CTRL0_IGNORE_CRC : 0); - c0 |= (write ? 0 : BM_SSP_CTRL0_READ); - c0 |= BM_SSP_CTRL0_DATA_XFER; - - c0 |= stmp_spi_cs(cs); - - c0 |= BF(len, SSP_CTRL0_XFER_COUNT); - - if (!dma_buf) - spi_buf_dma = dma_map_single(ss->master_dev, buf, len, dir); - - ss->d.command->cmd = - BF(len, APBH_CHn_CMD_XFER_COUNT) | - BF(1, APBH_CHn_CMD_CMDWORDS) | - BM_APBH_CHn_CMD_WAIT4ENDCMD | - BM_APBH_CHn_CMD_IRQONCMPLT | - BF(write ? BV_APBH_CHn_CMD_COMMAND__DMA_READ : - BV_APBH_CHn_CMD_COMMAND__DMA_WRITE, - APBH_CHn_CMD_COMMAND); - ss->d.command->pio_words[0] = c0; - ss->d.command->buf_ptr = spi_buf_dma; - - stmp3xxx_dma_reset_channel(ss->dma); - stmp3xxx_dma_clear_interrupt(ss->dma); - stmp3xxx_dma_enable_interrupt(ss->dma); - init_completion(&ss->done); - stmp3xxx_dma_go(ss->dma, &ss->d, 1); - wait_for_completion(&ss->done); - - if (!busy_wait(readl(ss->regs + HW_SSP_CTRL0) & BM_SSP_CTRL0_RUN)) - status = -ETIMEDOUT; - - if (!dma_buf) - dma_unmap_single(ss->master_dev, spi_buf_dma, len, dir); - - return status; -} - -static inline void stmp_spi_enable(struct stmp_spi *ss) -{ - stmp3xxx_setl(BM_SSP_CTRL0_LOCK_CS, ss->regs + HW_SSP_CTRL0); - stmp3xxx_clearl(BM_SSP_CTRL0_IGNORE_CRC, ss->regs + HW_SSP_CTRL0); -} - -static inline void stmp_spi_disable(struct stmp_spi *ss) -{ - stmp3xxx_clearl(BM_SSP_CTRL0_LOCK_CS, ss->regs + HW_SSP_CTRL0); - stmp3xxx_setl(BM_SSP_CTRL0_IGNORE_CRC, ss->regs + HW_SSP_CTRL0); -} - -static int stmp_spi_txrx_pio(struct stmp_spi *ss, int cs, - unsigned char *buf, int len, - bool first, bool last, bool write) -{ - if (first) - stmp_spi_enable(ss); - - stmp3xxx_setl(stmp_spi_cs(cs), ss->regs + HW_SSP_CTRL0); - - while (len--) { - if (last && len <= 0) - stmp_spi_disable(ss); - - stmp3xxx_clearl(BM_SSP_CTRL0_XFER_COUNT, - ss->regs + HW_SSP_CTRL0); - stmp3xxx_setl(1, ss->regs + HW_SSP_CTRL0); - - if (write) - stmp3xxx_clearl(BM_SSP_CTRL0_READ, - ss->regs + HW_SSP_CTRL0); - else - stmp3xxx_setl(BM_SSP_CTRL0_READ, - ss->regs + HW_SSP_CTRL0); - - /* Run! */ - stmp3xxx_setl(BM_SSP_CTRL0_RUN, ss->regs + HW_SSP_CTRL0); - - if (!busy_wait(readl(ss->regs + HW_SSP_CTRL0) & - BM_SSP_CTRL0_RUN)) - break; - - if (write) - writel(*buf, ss->regs + HW_SSP_DATA); - - /* Set TRANSFER */ - stmp3xxx_setl(BM_SSP_CTRL0_DATA_XFER, ss->regs + HW_SSP_CTRL0); - - if (!write) { - if (busy_wait((readl(ss->regs + HW_SSP_STATUS) & - BM_SSP_STATUS_FIFO_EMPTY))) - break; - *buf = readl(ss->regs + HW_SSP_DATA) & 0xFF; - } - - if (!busy_wait(readl(ss->regs + HW_SSP_CTRL0) & - BM_SSP_CTRL0_RUN)) - break; - - /* advance to the next byte */ - buf++; - } - - return len < 0 ? 0 : -ETIMEDOUT; -} - -static int stmp_spi_handle_message(struct stmp_spi *ss, struct spi_message *m) -{ - bool first, last; - struct spi_transfer *t, *tmp_t; - int status = 0; - int cs; - - cs = m->spi->chip_select; - - list_for_each_entry_safe(t, tmp_t, &m->transfers, transfer_list) { - - first = (&t->transfer_list == m->transfers.next); - last = (&t->transfer_list == m->transfers.prev); - - if (first || t->speed_hz || t->bits_per_word) - stmp_spi_setup_transfer(m->spi, t); - - /* reject "not last" transfers which request to change cs */ - if (t->cs_change && !last) { - dev_err(&m->spi->dev, - "Message with t->cs_change has been skipped\n"); - continue; - } - - if (t->tx_buf) { - status = pio ? - stmp_spi_txrx_pio(ss, cs, (void *)t->tx_buf, - t->len, first, last, true) : - stmp_spi_txrx_dma(ss, cs, (void *)t->tx_buf, - t->tx_dma, t->len, first, last, true); -#ifdef DEBUG - if (t->len < 0x10) - print_hex_dump_bytes("Tx ", - DUMP_PREFIX_OFFSET, - t->tx_buf, t->len); - else - pr_debug("Tx: %d bytes\n", t->len); -#endif - } - if (t->rx_buf) { - status = pio ? - stmp_spi_txrx_pio(ss, cs, t->rx_buf, - t->len, first, last, false) : - stmp_spi_txrx_dma(ss, cs, t->rx_buf, - t->rx_dma, t->len, first, last, false); -#ifdef DEBUG - if (t->len < 0x10) - print_hex_dump_bytes("Rx ", - DUMP_PREFIX_OFFSET, - t->rx_buf, t->len); - else - pr_debug("Rx: %d bytes\n", t->len); -#endif - } - - if (t->delay_usecs) - udelay(t->delay_usecs); - - if (status) - break; - - } - return status; -} - -/** - * stmp_spi_handle - handle messages from the queue - */ -static void stmp_spi_handle(struct work_struct *w) -{ - struct stmp_spi *ss = container_of(w, struct stmp_spi, work); - unsigned long flags; - struct spi_message *m; - - spin_lock_irqsave(&ss->lock, flags); - while (!list_empty(&ss->queue)) { - m = list_entry(ss->queue.next, struct spi_message, queue); - list_del_init(&m->queue); - spin_unlock_irqrestore(&ss->lock, flags); - - m->status = stmp_spi_handle_message(ss, m); - m->complete(m->context); - - spin_lock_irqsave(&ss->lock, flags); - } - spin_unlock_irqrestore(&ss->lock, flags); - - return; -} - -/** - * stmp_spi_transfer - perform message transfer. - * Called indirectly from spi_async, queues all the messages to - * spi_handle_message. - * @spi: spi device - * @m: message to be queued - */ -static int stmp_spi_transfer(struct spi_device *spi, struct spi_message *m) -{ - struct stmp_spi *ss = spi_master_get_devdata(spi->master); - unsigned long flags; - - m->status = -EINPROGRESS; - spin_lock_irqsave(&ss->lock, flags); - list_add_tail(&m->queue, &ss->queue); - queue_work(ss->workqueue, &ss->work); - spin_unlock_irqrestore(&ss->lock, flags); - return 0; -} - -static irqreturn_t stmp_spi_irq(int irq, void *dev_id) -{ - struct stmp_spi *ss = dev_id; - - stmp3xxx_dma_clear_interrupt(ss->dma); - complete(&ss->done); - return IRQ_HANDLED; -} - -static irqreturn_t stmp_spi_irq_err(int irq, void *dev_id) -{ - struct stmp_spi *ss = dev_id; - u32 c1, st; - - c1 = readl(ss->regs + HW_SSP_CTRL1); - st = readl(ss->regs + HW_SSP_STATUS); - dev_err(ss->master_dev, "%s: status = 0x%08X, c1 = 0x%08X\n", - __func__, st, c1); - stmp3xxx_clearl(c1 & 0xCCCC0000, ss->regs + HW_SSP_CTRL1); - - return IRQ_HANDLED; -} - -static int __devinit stmp_spi_probe(struct platform_device *dev) -{ - int err = 0; - struct spi_master *master; - struct stmp_spi *ss; - struct resource *r; - - master = spi_alloc_master(&dev->dev, sizeof(struct stmp_spi)); - if (master == NULL) { - err = -ENOMEM; - goto out0; - } - master->flags = SPI_MASTER_HALF_DUPLEX; - - ss = spi_master_get_devdata(master); - platform_set_drvdata(dev, master); - - /* Get resources(memory, IRQ) associated with the device */ - r = platform_get_resource(dev, IORESOURCE_MEM, 0); - if (r == NULL) { - err = -ENODEV; - goto out_put_master; - } - ss->regs = ioremap(r->start, resource_size(r)); - if (!ss->regs) { - err = -EINVAL; - goto out_put_master; - } - - ss->master_dev = &dev->dev; - ss->id = dev->id; - - INIT_WORK(&ss->work, stmp_spi_handle); - INIT_LIST_HEAD(&ss->queue); - spin_lock_init(&ss->lock); - - ss->workqueue = create_singlethread_workqueue(dev_name(&dev->dev)); - if (!ss->workqueue) { - err = -ENXIO; - goto out_put_master; - } - master->transfer = stmp_spi_transfer; - master->setup = stmp_spi_setup; - - /* the spi->mode bits understood by this driver: */ - master->mode_bits = SPI_CPOL | SPI_CPHA; - - ss->irq = platform_get_irq(dev, 0); - if (ss->irq < 0) { - err = ss->irq; - goto out_put_master; - } - ss->err_irq = platform_get_irq(dev, 1); - if (ss->err_irq < 0) { - err = ss->err_irq; - goto out_put_master; - } - - r = platform_get_resource(dev, IORESOURCE_DMA, 0); - if (r == NULL) { - err = -ENODEV; - goto out_put_master; - } - - ss->dma = r->start; - err = stmp3xxx_dma_request(ss->dma, &dev->dev, dev_name(&dev->dev)); - if (err) - goto out_put_master; - - err = stmp3xxx_dma_allocate_command(ss->dma, &ss->d); - if (err) - goto out_free_dma; - - master->bus_num = dev->id; - master->num_chipselect = 1; - - /* SPI controller initializations */ - err = stmp_spi_init_hw(ss); - if (err) { - dev_dbg(&dev->dev, "cannot initialize hardware\n"); - goto out_free_dma_desc; - } - - if (clock) { - dev_info(&dev->dev, "clock rate forced to %d\n", clock); - clk_set_rate(ss->clk, clock); - } - ss->speed_khz = clk_get_rate(ss->clk); - ss->divider = 2; - dev_info(&dev->dev, "max possible speed %d = %ld/%d kHz\n", - ss->speed_khz, clk_get_rate(ss->clk), ss->divider); - - /* Register for SPI interrupt */ - err = request_irq(ss->irq, stmp_spi_irq, 0, - dev_name(&dev->dev), ss); - if (err) { - dev_dbg(&dev->dev, "request_irq failed, %d\n", err); - goto out_release_hw; - } - - /* ..and shared interrupt for all SSP controllers */ - err = request_irq(ss->err_irq, stmp_spi_irq_err, IRQF_SHARED, - dev_name(&dev->dev), ss); - if (err) { - dev_dbg(&dev->dev, "request_irq(error) failed, %d\n", err); - goto out_free_irq; - } - - err = spi_register_master(master); - if (err) { - dev_dbg(&dev->dev, "cannot register spi master, %d\n", err); - goto out_free_irq_2; - } - dev_info(&dev->dev, "at (mapped) 0x%08X, irq=%d, bus %d, %s mode\n", - (u32)ss->regs, ss->irq, master->bus_num, - pio ? "PIO" : "DMA"); - return 0; - -out_free_irq_2: - free_irq(ss->err_irq, ss); -out_free_irq: - free_irq(ss->irq, ss); -out_free_dma_desc: - stmp3xxx_dma_free_command(ss->dma, &ss->d); -out_free_dma: - stmp3xxx_dma_release(ss->dma); -out_release_hw: - stmp_spi_release_hw(ss); -out_put_master: - if (ss->workqueue) - destroy_workqueue(ss->workqueue); - if (ss->regs) - iounmap(ss->regs); - platform_set_drvdata(dev, NULL); - spi_master_put(master); -out0: - return err; -} - -static int __devexit stmp_spi_remove(struct platform_device *dev) -{ - struct stmp_spi *ss; - struct spi_master *master; - - master = platform_get_drvdata(dev); - if (master == NULL) - goto out0; - ss = spi_master_get_devdata(master); - - spi_unregister_master(master); - - free_irq(ss->err_irq, ss); - free_irq(ss->irq, ss); - stmp3xxx_dma_free_command(ss->dma, &ss->d); - stmp3xxx_dma_release(ss->dma); - stmp_spi_release_hw(ss); - destroy_workqueue(ss->workqueue); - iounmap(ss->regs); - spi_master_put(master); - platform_set_drvdata(dev, NULL); -out0: - return 0; -} - -#ifdef CONFIG_PM -static int stmp_spi_suspend(struct platform_device *pdev, pm_message_t pmsg) -{ - struct stmp_spi *ss; - struct spi_master *master; - - master = platform_get_drvdata(pdev); - ss = spi_master_get_devdata(master); - - ss->saved_timings = readl(HW_SSP_TIMING + ss->regs); - clk_disable(ss->clk); - - return 0; -} - -static int stmp_spi_resume(struct platform_device *pdev) -{ - struct stmp_spi *ss; - struct spi_master *master; - - master = platform_get_drvdata(pdev); - ss = spi_master_get_devdata(master); - - clk_enable(ss->clk); - stmp3xxx_reset_block(ss->regs, false); - writel(ss->saved_timings, ss->regs + HW_SSP_TIMING); - - return 0; -} - -#else -#define stmp_spi_suspend NULL -#define stmp_spi_resume NULL -#endif - -static struct platform_driver stmp_spi_driver = { - .probe = stmp_spi_probe, - .remove = __devexit_p(stmp_spi_remove), - .driver = { - .name = "stmp3xxx_ssp", - .owner = THIS_MODULE, - }, - .suspend = stmp_spi_suspend, - .resume = stmp_spi_resume, -}; - -static int __init stmp_spi_init(void) -{ - return platform_driver_register(&stmp_spi_driver); -} - -static void __exit stmp_spi_exit(void) -{ - platform_driver_unregister(&stmp_spi_driver); -} - -module_init(stmp_spi_init); -module_exit(stmp_spi_exit); -module_param(pio, int, S_IRUGO); -module_param(clock, int, S_IRUGO); -MODULE_AUTHOR("dmitry pervushin "); -MODULE_DESCRIPTION("STMP3xxx SPI/SSP driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi_tegra.c b/drivers/spi/spi_tegra.c deleted file mode 100644 index 6c3aa6e..0000000 --- a/drivers/spi/spi_tegra.c +++ /dev/null @@ -1,618 +0,0 @@ -/* - * Driver for Nvidia TEGRA spi controller. - * - * Copyright (C) 2010 Google, Inc. - * - * Author: - * Erik Gilling - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include - -#define SLINK_COMMAND 0x000 -#define SLINK_BIT_LENGTH(x) (((x) & 0x1f) << 0) -#define SLINK_WORD_SIZE(x) (((x) & 0x1f) << 5) -#define SLINK_BOTH_EN (1 << 10) -#define SLINK_CS_SW (1 << 11) -#define SLINK_CS_VALUE (1 << 12) -#define SLINK_CS_POLARITY (1 << 13) -#define SLINK_IDLE_SDA_DRIVE_LOW (0 << 16) -#define SLINK_IDLE_SDA_DRIVE_HIGH (1 << 16) -#define SLINK_IDLE_SDA_PULL_LOW (2 << 16) -#define SLINK_IDLE_SDA_PULL_HIGH (3 << 16) -#define SLINK_IDLE_SDA_MASK (3 << 16) -#define SLINK_CS_POLARITY1 (1 << 20) -#define SLINK_CK_SDA (1 << 21) -#define SLINK_CS_POLARITY2 (1 << 22) -#define SLINK_CS_POLARITY3 (1 << 23) -#define SLINK_IDLE_SCLK_DRIVE_LOW (0 << 24) -#define SLINK_IDLE_SCLK_DRIVE_HIGH (1 << 24) -#define SLINK_IDLE_SCLK_PULL_LOW (2 << 24) -#define SLINK_IDLE_SCLK_PULL_HIGH (3 << 24) -#define SLINK_IDLE_SCLK_MASK (3 << 24) -#define SLINK_M_S (1 << 28) -#define SLINK_WAIT (1 << 29) -#define SLINK_GO (1 << 30) -#define SLINK_ENB (1 << 31) - -#define SLINK_COMMAND2 0x004 -#define SLINK_LSBFE (1 << 0) -#define SLINK_SSOE (1 << 1) -#define SLINK_SPIE (1 << 4) -#define SLINK_BIDIROE (1 << 6) -#define SLINK_MODFEN (1 << 7) -#define SLINK_INT_SIZE(x) (((x) & 0x1f) << 8) -#define SLINK_CS_ACTIVE_BETWEEN (1 << 17) -#define SLINK_SS_EN_CS(x) (((x) & 0x3) << 18) -#define SLINK_SS_SETUP(x) (((x) & 0x3) << 20) -#define SLINK_FIFO_REFILLS_0 (0 << 22) -#define SLINK_FIFO_REFILLS_1 (1 << 22) -#define SLINK_FIFO_REFILLS_2 (2 << 22) -#define SLINK_FIFO_REFILLS_3 (3 << 22) -#define SLINK_FIFO_REFILLS_MASK (3 << 22) -#define SLINK_WAIT_PACK_INT(x) (((x) & 0x7) << 26) -#define SLINK_SPC0 (1 << 29) -#define SLINK_TXEN (1 << 30) -#define SLINK_RXEN (1 << 31) - -#define SLINK_STATUS 0x008 -#define SLINK_COUNT(val) (((val) >> 0) & 0x1f) -#define SLINK_WORD(val) (((val) >> 5) & 0x1f) -#define SLINK_BLK_CNT(val) (((val) >> 0) & 0xffff) -#define SLINK_MODF (1 << 16) -#define SLINK_RX_UNF (1 << 18) -#define SLINK_TX_OVF (1 << 19) -#define SLINK_TX_FULL (1 << 20) -#define SLINK_TX_EMPTY (1 << 21) -#define SLINK_RX_FULL (1 << 22) -#define SLINK_RX_EMPTY (1 << 23) -#define SLINK_TX_UNF (1 << 24) -#define SLINK_RX_OVF (1 << 25) -#define SLINK_TX_FLUSH (1 << 26) -#define SLINK_RX_FLUSH (1 << 27) -#define SLINK_SCLK (1 << 28) -#define SLINK_ERR (1 << 29) -#define SLINK_RDY (1 << 30) -#define SLINK_BSY (1 << 31) - -#define SLINK_MAS_DATA 0x010 -#define SLINK_SLAVE_DATA 0x014 - -#define SLINK_DMA_CTL 0x018 -#define SLINK_DMA_BLOCK_SIZE(x) (((x) & 0xffff) << 0) -#define SLINK_TX_TRIG_1 (0 << 16) -#define SLINK_TX_TRIG_4 (1 << 16) -#define SLINK_TX_TRIG_8 (2 << 16) -#define SLINK_TX_TRIG_16 (3 << 16) -#define SLINK_TX_TRIG_MASK (3 << 16) -#define SLINK_RX_TRIG_1 (0 << 18) -#define SLINK_RX_TRIG_4 (1 << 18) -#define SLINK_RX_TRIG_8 (2 << 18) -#define SLINK_RX_TRIG_16 (3 << 18) -#define SLINK_RX_TRIG_MASK (3 << 18) -#define SLINK_PACKED (1 << 20) -#define SLINK_PACK_SIZE_4 (0 << 21) -#define SLINK_PACK_SIZE_8 (1 << 21) -#define SLINK_PACK_SIZE_16 (2 << 21) -#define SLINK_PACK_SIZE_32 (3 << 21) -#define SLINK_PACK_SIZE_MASK (3 << 21) -#define SLINK_IE_TXC (1 << 26) -#define SLINK_IE_RXC (1 << 27) -#define SLINK_DMA_EN (1 << 31) - -#define SLINK_STATUS2 0x01c -#define SLINK_TX_FIFO_EMPTY_COUNT(val) (((val) & 0x3f) >> 0) -#define SLINK_RX_FIFO_FULL_COUNT(val) (((val) & 0x3f) >> 16) - -#define SLINK_TX_FIFO 0x100 -#define SLINK_RX_FIFO 0x180 - -static const unsigned long spi_tegra_req_sels[] = { - TEGRA_DMA_REQ_SEL_SL2B1, - TEGRA_DMA_REQ_SEL_SL2B2, - TEGRA_DMA_REQ_SEL_SL2B3, - TEGRA_DMA_REQ_SEL_SL2B4, -}; - -#define BB_LEN 32 - -struct spi_tegra_data { - struct spi_master *master; - struct platform_device *pdev; - spinlock_t lock; - - struct clk *clk; - void __iomem *base; - unsigned long phys; - - u32 cur_speed; - - struct list_head queue; - struct spi_transfer *cur; - unsigned cur_pos; - unsigned cur_len; - unsigned cur_bytes_per_word; - - /* The tegra spi controller has a bug which causes the first word - * in PIO transactions to be garbage. Since packed DMA transactions - * require transfers to be 4 byte aligned we need a bounce buffer - * for the generic case. - */ - struct tegra_dma_req rx_dma_req; - struct tegra_dma_channel *rx_dma; - u32 *rx_bb; - dma_addr_t rx_bb_phys; -}; - - -static inline unsigned long spi_tegra_readl(struct spi_tegra_data *tspi, - unsigned long reg) -{ - return readl(tspi->base + reg); -} - -static inline void spi_tegra_writel(struct spi_tegra_data *tspi, - unsigned long val, - unsigned long reg) -{ - writel(val, tspi->base + reg); -} - -static void spi_tegra_go(struct spi_tegra_data *tspi) -{ - unsigned long val; - - wmb(); - - val = spi_tegra_readl(tspi, SLINK_DMA_CTL); - val &= ~SLINK_DMA_BLOCK_SIZE(~0) & ~SLINK_DMA_EN; - val |= SLINK_DMA_BLOCK_SIZE(tspi->rx_dma_req.size / 4 - 1); - spi_tegra_writel(tspi, val, SLINK_DMA_CTL); - - tegra_dma_enqueue_req(tspi->rx_dma, &tspi->rx_dma_req); - - val |= SLINK_DMA_EN; - spi_tegra_writel(tspi, val, SLINK_DMA_CTL); -} - -static unsigned spi_tegra_fill_tx_fifo(struct spi_tegra_data *tspi, - struct spi_transfer *t) -{ - unsigned len = min(t->len - tspi->cur_pos, BB_LEN * - tspi->cur_bytes_per_word); - u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_pos; - int i, j; - unsigned long val; - - val = spi_tegra_readl(tspi, SLINK_COMMAND); - val &= ~SLINK_WORD_SIZE(~0); - val |= SLINK_WORD_SIZE(len / tspi->cur_bytes_per_word - 1); - spi_tegra_writel(tspi, val, SLINK_COMMAND); - - for (i = 0; i < len; i += tspi->cur_bytes_per_word) { - val = 0; - for (j = 0; j < tspi->cur_bytes_per_word; j++) - val |= tx_buf[i + j] << j * 8; - - spi_tegra_writel(tspi, val, SLINK_TX_FIFO); - } - - tspi->rx_dma_req.size = len / tspi->cur_bytes_per_word * 4; - - return len; -} - -static unsigned spi_tegra_drain_rx_fifo(struct spi_tegra_data *tspi, - struct spi_transfer *t) -{ - unsigned len = tspi->cur_len; - u8 *rx_buf = (u8 *)t->rx_buf + tspi->cur_pos; - int i, j; - unsigned long val; - - for (i = 0; i < len; i += tspi->cur_bytes_per_word) { - val = tspi->rx_bb[i / tspi->cur_bytes_per_word]; - for (j = 0; j < tspi->cur_bytes_per_word; j++) - rx_buf[i + j] = (val >> (j * 8)) & 0xff; - } - - return len; -} - -static void spi_tegra_start_transfer(struct spi_device *spi, - struct spi_transfer *t) -{ - struct spi_tegra_data *tspi = spi_master_get_devdata(spi->master); - u32 speed; - u8 bits_per_word; - unsigned long val; - - speed = t->speed_hz ? t->speed_hz : spi->max_speed_hz; - bits_per_word = t->bits_per_word ? t->bits_per_word : - spi->bits_per_word; - - tspi->cur_bytes_per_word = (bits_per_word - 1) / 8 + 1; - - if (speed != tspi->cur_speed) - clk_set_rate(tspi->clk, speed); - - if (tspi->cur_speed == 0) - clk_enable(tspi->clk); - - tspi->cur_speed = speed; - - val = spi_tegra_readl(tspi, SLINK_COMMAND2); - val &= ~SLINK_SS_EN_CS(~0) | SLINK_RXEN | SLINK_TXEN; - if (t->rx_buf) - val |= SLINK_RXEN; - if (t->tx_buf) - val |= SLINK_TXEN; - val |= SLINK_SS_EN_CS(spi->chip_select); - val |= SLINK_SPIE; - spi_tegra_writel(tspi, val, SLINK_COMMAND2); - - val = spi_tegra_readl(tspi, SLINK_COMMAND); - val &= ~SLINK_BIT_LENGTH(~0); - val |= SLINK_BIT_LENGTH(bits_per_word - 1); - - /* FIXME: should probably control CS manually so that we can be sure - * it does not go low between transfer and to support delay_usecs - * correctly. - */ - val &= ~SLINK_IDLE_SCLK_MASK & ~SLINK_CK_SDA & ~SLINK_CS_SW; - - if (spi->mode & SPI_CPHA) - val |= SLINK_CK_SDA; - - if (spi->mode & SPI_CPOL) - val |= SLINK_IDLE_SCLK_DRIVE_HIGH; - else - val |= SLINK_IDLE_SCLK_DRIVE_LOW; - - val |= SLINK_M_S; - - spi_tegra_writel(tspi, val, SLINK_COMMAND); - - spi_tegra_writel(tspi, SLINK_RX_FLUSH | SLINK_TX_FLUSH, SLINK_STATUS); - - tspi->cur = t; - tspi->cur_pos = 0; - tspi->cur_len = spi_tegra_fill_tx_fifo(tspi, t); - - spi_tegra_go(tspi); -} - -static void spi_tegra_start_message(struct spi_device *spi, - struct spi_message *m) -{ - struct spi_transfer *t; - - m->actual_length = 0; - m->status = 0; - - t = list_first_entry(&m->transfers, struct spi_transfer, transfer_list); - spi_tegra_start_transfer(spi, t); -} - -static void tegra_spi_rx_dma_complete(struct tegra_dma_req *req) -{ - struct spi_tegra_data *tspi = req->dev; - unsigned long flags; - struct spi_message *m; - struct spi_device *spi; - int timeout = 0; - unsigned long val; - - /* the SPI controller may come back with both the BSY and RDY bits - * set. In this case we need to wait for the BSY bit to clear so - * that we are sure the DMA is finished. 1000 reads was empirically - * determined to be long enough. - */ - while (timeout++ < 1000) { - if (!(spi_tegra_readl(tspi, SLINK_STATUS) & SLINK_BSY)) - break; - } - - spin_lock_irqsave(&tspi->lock, flags); - - val = spi_tegra_readl(tspi, SLINK_STATUS); - val |= SLINK_RDY; - spi_tegra_writel(tspi, val, SLINK_STATUS); - - m = list_first_entry(&tspi->queue, struct spi_message, queue); - - if (timeout >= 1000) - m->status = -EIO; - - spi = m->state; - - tspi->cur_pos += spi_tegra_drain_rx_fifo(tspi, tspi->cur); - m->actual_length += tspi->cur_pos; - - if (tspi->cur_pos < tspi->cur->len) { - tspi->cur_len = spi_tegra_fill_tx_fifo(tspi, tspi->cur); - spi_tegra_go(tspi); - } else if (!list_is_last(&tspi->cur->transfer_list, - &m->transfers)) { - tspi->cur = list_first_entry(&tspi->cur->transfer_list, - struct spi_transfer, - transfer_list); - spi_tegra_start_transfer(spi, tspi->cur); - } else { - list_del(&m->queue); - - m->complete(m->context); - - if (!list_empty(&tspi->queue)) { - m = list_first_entry(&tspi->queue, struct spi_message, - queue); - spi = m->state; - spi_tegra_start_message(spi, m); - } else { - clk_disable(tspi->clk); - tspi->cur_speed = 0; - } - } - - spin_unlock_irqrestore(&tspi->lock, flags); -} - -static int spi_tegra_setup(struct spi_device *spi) -{ - struct spi_tegra_data *tspi = spi_master_get_devdata(spi->master); - unsigned long cs_bit; - unsigned long val; - unsigned long flags; - - dev_dbg(&spi->dev, "setup %d bpw, %scpol, %scpha, %dHz\n", - spi->bits_per_word, - spi->mode & SPI_CPOL ? "" : "~", - spi->mode & SPI_CPHA ? "" : "~", - spi->max_speed_hz); - - - switch (spi->chip_select) { - case 0: - cs_bit = SLINK_CS_POLARITY; - break; - - case 1: - cs_bit = SLINK_CS_POLARITY1; - break; - - case 2: - cs_bit = SLINK_CS_POLARITY2; - break; - - case 4: - cs_bit = SLINK_CS_POLARITY3; - break; - - default: - return -EINVAL; - } - - spin_lock_irqsave(&tspi->lock, flags); - - val = spi_tegra_readl(tspi, SLINK_COMMAND); - if (spi->mode & SPI_CS_HIGH) - val |= cs_bit; - else - val &= ~cs_bit; - spi_tegra_writel(tspi, val, SLINK_COMMAND); - - spin_unlock_irqrestore(&tspi->lock, flags); - - return 0; -} - -static int spi_tegra_transfer(struct spi_device *spi, struct spi_message *m) -{ - struct spi_tegra_data *tspi = spi_master_get_devdata(spi->master); - struct spi_transfer *t; - unsigned long flags; - int was_empty; - - if (list_empty(&m->transfers) || !m->complete) - return -EINVAL; - - list_for_each_entry(t, &m->transfers, transfer_list) { - if (t->bits_per_word < 0 || t->bits_per_word > 32) - return -EINVAL; - - if (t->len == 0) - return -EINVAL; - - if (!t->rx_buf && !t->tx_buf) - return -EINVAL; - } - - m->state = spi; - - spin_lock_irqsave(&tspi->lock, flags); - was_empty = list_empty(&tspi->queue); - list_add_tail(&m->queue, &tspi->queue); - - if (was_empty) - spi_tegra_start_message(spi, m); - - spin_unlock_irqrestore(&tspi->lock, flags); - - return 0; -} - -static int __init spi_tegra_probe(struct platform_device *pdev) -{ - struct spi_master *master; - struct spi_tegra_data *tspi; - struct resource *r; - int ret; - - master = spi_alloc_master(&pdev->dev, sizeof *tspi); - if (master == NULL) { - dev_err(&pdev->dev, "master allocation failed\n"); - return -ENOMEM; - } - - /* the spi->mode bits understood by this driver: */ - master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; - - master->bus_num = pdev->id; - - master->setup = spi_tegra_setup; - master->transfer = spi_tegra_transfer; - master->num_chipselect = 4; - - dev_set_drvdata(&pdev->dev, master); - tspi = spi_master_get_devdata(master); - tspi->master = master; - tspi->pdev = pdev; - spin_lock_init(&tspi->lock); - - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (r == NULL) { - ret = -ENODEV; - goto err0; - } - - if (!request_mem_region(r->start, (r->end - r->start) + 1, - dev_name(&pdev->dev))) { - ret = -EBUSY; - goto err0; - } - - tspi->phys = r->start; - tspi->base = ioremap(r->start, r->end - r->start + 1); - if (!tspi->base) { - dev_err(&pdev->dev, "can't ioremap iomem\n"); - ret = -ENOMEM; - goto err1; - } - - tspi->clk = clk_get(&pdev->dev, NULL); - if (IS_ERR(tspi->clk)) { - dev_err(&pdev->dev, "can not get clock\n"); - ret = PTR_ERR(tspi->clk); - goto err2; - } - - INIT_LIST_HEAD(&tspi->queue); - - tspi->rx_dma = tegra_dma_allocate_channel(TEGRA_DMA_MODE_ONESHOT); - if (!tspi->rx_dma) { - dev_err(&pdev->dev, "can not allocate rx dma channel\n"); - ret = -ENODEV; - goto err3; - } - - tspi->rx_bb = dma_alloc_coherent(&pdev->dev, sizeof(u32) * BB_LEN, - &tspi->rx_bb_phys, GFP_KERNEL); - if (!tspi->rx_bb) { - dev_err(&pdev->dev, "can not allocate rx bounce buffer\n"); - ret = -ENOMEM; - goto err4; - } - - tspi->rx_dma_req.complete = tegra_spi_rx_dma_complete; - tspi->rx_dma_req.to_memory = 1; - tspi->rx_dma_req.dest_addr = tspi->rx_bb_phys; - tspi->rx_dma_req.dest_bus_width = 32; - tspi->rx_dma_req.source_addr = tspi->phys + SLINK_RX_FIFO; - tspi->rx_dma_req.source_bus_width = 32; - tspi->rx_dma_req.source_wrap = 4; - tspi->rx_dma_req.req_sel = spi_tegra_req_sels[pdev->id]; - tspi->rx_dma_req.dev = tspi; - - ret = spi_register_master(master); - - if (ret < 0) - goto err5; - - return ret; - -err5: - dma_free_coherent(&pdev->dev, sizeof(u32) * BB_LEN, - tspi->rx_bb, tspi->rx_bb_phys); -err4: - tegra_dma_free_channel(tspi->rx_dma); -err3: - clk_put(tspi->clk); -err2: - iounmap(tspi->base); -err1: - release_mem_region(r->start, (r->end - r->start) + 1); -err0: - spi_master_put(master); - return ret; -} - -static int __devexit spi_tegra_remove(struct platform_device *pdev) -{ - struct spi_master *master; - struct spi_tegra_data *tspi; - struct resource *r; - - master = dev_get_drvdata(&pdev->dev); - tspi = spi_master_get_devdata(master); - - spi_unregister_master(master); - tegra_dma_free_channel(tspi->rx_dma); - - dma_free_coherent(&pdev->dev, sizeof(u32) * BB_LEN, - tspi->rx_bb, tspi->rx_bb_phys); - - clk_put(tspi->clk); - iounmap(tspi->base); - - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - release_mem_region(r->start, (r->end - r->start) + 1); - - return 0; -} - -MODULE_ALIAS("platform:spi_tegra"); - -static struct platform_driver spi_tegra_driver = { - .driver = { - .name = "spi_tegra", - .owner = THIS_MODULE, - }, - .remove = __devexit_p(spi_tegra_remove), -}; - -static int __init spi_tegra_init(void) -{ - return platform_driver_probe(&spi_tegra_driver, spi_tegra_probe); -} -module_init(spi_tegra_init); - -static void __exit spi_tegra_exit(void) -{ - platform_driver_unregister(&spi_tegra_driver); -} -module_exit(spi_tegra_exit); - -MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi_topcliff_pch.c b/drivers/spi/spi_topcliff_pch.c deleted file mode 100644 index 79e48d4..0000000 --- a/drivers/spi/spi_topcliff_pch.c +++ /dev/null @@ -1,1303 +0,0 @@ -/* - * SPI bus driver for the Topcliff PCH used by Intel SoCs - * - * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* Register offsets */ -#define PCH_SPCR 0x00 /* SPI control register */ -#define PCH_SPBRR 0x04 /* SPI baud rate register */ -#define PCH_SPSR 0x08 /* SPI status register */ -#define PCH_SPDWR 0x0C /* SPI write data register */ -#define PCH_SPDRR 0x10 /* SPI read data register */ -#define PCH_SSNXCR 0x18 /* SSN Expand Control Register */ -#define PCH_SRST 0x1C /* SPI reset register */ - -#define PCH_SPSR_TFD 0x000007C0 -#define PCH_SPSR_RFD 0x0000F800 - -#define PCH_READABLE(x) (((x) & PCH_SPSR_RFD)>>11) -#define PCH_WRITABLE(x) (((x) & PCH_SPSR_TFD)>>6) - -#define PCH_RX_THOLD 7 -#define PCH_RX_THOLD_MAX 15 - -#define PCH_MAX_BAUDRATE 5000000 -#define PCH_MAX_FIFO_DEPTH 16 - -#define STATUS_RUNNING 1 -#define STATUS_EXITING 2 -#define PCH_SLEEP_TIME 10 - -#define PCH_ADDRESS_SIZE 0x20 - -#define SSN_LOW 0x02U -#define SSN_NO_CONTROL 0x00U -#define PCH_MAX_CS 0xFF -#define PCI_DEVICE_ID_GE_SPI 0x8816 - -#define SPCR_SPE_BIT (1 << 0) -#define SPCR_MSTR_BIT (1 << 1) -#define SPCR_LSBF_BIT (1 << 4) -#define SPCR_CPHA_BIT (1 << 5) -#define SPCR_CPOL_BIT (1 << 6) -#define SPCR_TFIE_BIT (1 << 8) -#define SPCR_RFIE_BIT (1 << 9) -#define SPCR_FIE_BIT (1 << 10) -#define SPCR_ORIE_BIT (1 << 11) -#define SPCR_MDFIE_BIT (1 << 12) -#define SPCR_FICLR_BIT (1 << 24) -#define SPSR_TFI_BIT (1 << 0) -#define SPSR_RFI_BIT (1 << 1) -#define SPSR_FI_BIT (1 << 2) -#define SPBRR_SIZE_BIT (1 << 10) - -#define PCH_ALL (SPCR_TFIE_BIT|SPCR_RFIE_BIT|SPCR_FIE_BIT|SPCR_ORIE_BIT|SPCR_MDFIE_BIT) - -#define SPCR_RFIC_FIELD 20 -#define SPCR_TFIC_FIELD 16 - -#define SPSR_INT_BITS 0x1F -#define MASK_SPBRR_SPBR_BITS (~((1 << 10) - 1)) -#define MASK_RFIC_SPCR_BITS (~(0xf << 20)) -#define MASK_TFIC_SPCR_BITS (~(0xf000f << 12)) - -#define PCH_CLOCK_HZ 50000000 -#define PCH_MAX_SPBR 1023 - - -/** - * struct pch_spi_data - Holds the SPI channel specific details - * @io_remap_addr: The remapped PCI base address - * @master: Pointer to the SPI master structure - * @work: Reference to work queue handler - * @wk: Workqueue for carrying out execution of the - * requests - * @wait: Wait queue for waking up upon receiving an - * interrupt. - * @transfer_complete: Status of SPI Transfer - * @bcurrent_msg_processing: Status flag for message processing - * @lock: Lock for protecting this structure - * @queue: SPI Message queue - * @status: Status of the SPI driver - * @bpw_len: Length of data to be transferred in bits per - * word - * @transfer_active: Flag showing active transfer - * @tx_index: Transmit data count; for bookkeeping during - * transfer - * @rx_index: Receive data count; for bookkeeping during - * transfer - * @tx_buff: Buffer for data to be transmitted - * @rx_index: Buffer for Received data - * @n_curnt_chip: The chip number that this SPI driver currently - * operates on - * @current_chip: Reference to the current chip that this SPI - * driver currently operates on - * @current_msg: The current message that this SPI driver is - * handling - * @cur_trans: The current transfer that this SPI driver is - * handling - * @board_dat: Reference to the SPI device data structure - */ -struct pch_spi_data { - void __iomem *io_remap_addr; - struct spi_master *master; - struct work_struct work; - struct workqueue_struct *wk; - wait_queue_head_t wait; - u8 transfer_complete; - u8 bcurrent_msg_processing; - spinlock_t lock; - struct list_head queue; - u8 status; - u32 bpw_len; - u8 transfer_active; - u32 tx_index; - u32 rx_index; - u16 *pkt_tx_buff; - u16 *pkt_rx_buff; - u8 n_curnt_chip; - struct spi_device *current_chip; - struct spi_message *current_msg; - struct spi_transfer *cur_trans; - struct pch_spi_board_data *board_dat; -}; - -/** - * struct pch_spi_board_data - Holds the SPI device specific details - * @pdev: Pointer to the PCI device - * @irq_reg_sts: Status of IRQ registration - * @pci_req_sts: Status of pci_request_regions - * @suspend_sts: Status of suspend - * @data: Pointer to SPI channel data structure - */ -struct pch_spi_board_data { - struct pci_dev *pdev; - u8 irq_reg_sts; - u8 pci_req_sts; - u8 suspend_sts; - struct pch_spi_data *data; -}; - -static struct pci_device_id pch_spi_pcidev_id[] = { - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_GE_SPI)}, - {0,} -}; - -/** - * pch_spi_writereg() - Performs register writes - * @master: Pointer to struct spi_master. - * @idx: Register offset. - * @val: Value to be written to register. - */ -static inline void pch_spi_writereg(struct spi_master *master, int idx, u32 val) -{ - struct pch_spi_data *data = spi_master_get_devdata(master); - iowrite32(val, (data->io_remap_addr + idx)); -} - -/** - * pch_spi_readreg() - Performs register reads - * @master: Pointer to struct spi_master. - * @idx: Register offset. - */ -static inline u32 pch_spi_readreg(struct spi_master *master, int idx) -{ - struct pch_spi_data *data = spi_master_get_devdata(master); - return ioread32(data->io_remap_addr + idx); -} - -static inline void pch_spi_setclr_reg(struct spi_master *master, int idx, - u32 set, u32 clr) -{ - u32 tmp = pch_spi_readreg(master, idx); - tmp = (tmp & ~clr) | set; - pch_spi_writereg(master, idx, tmp); -} - -static void pch_spi_set_master_mode(struct spi_master *master) -{ - pch_spi_setclr_reg(master, PCH_SPCR, SPCR_MSTR_BIT, 0); -} - -/** - * pch_spi_clear_fifo() - Clears the Transmit and Receive FIFOs - * @master: Pointer to struct spi_master. - */ -static void pch_spi_clear_fifo(struct spi_master *master) -{ - pch_spi_setclr_reg(master, PCH_SPCR, SPCR_FICLR_BIT, 0); - pch_spi_setclr_reg(master, PCH_SPCR, 0, SPCR_FICLR_BIT); -} - -static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val, - void __iomem *io_remap_addr) -{ - u32 n_read, tx_index, rx_index, bpw_len; - u16 *pkt_rx_buffer, *pkt_tx_buff; - int read_cnt; - u32 reg_spcr_val; - void __iomem *spsr; - void __iomem *spdrr; - void __iomem *spdwr; - - spsr = io_remap_addr + PCH_SPSR; - iowrite32(reg_spsr_val, spsr); - - if (data->transfer_active) { - rx_index = data->rx_index; - tx_index = data->tx_index; - bpw_len = data->bpw_len; - pkt_rx_buffer = data->pkt_rx_buff; - pkt_tx_buff = data->pkt_tx_buff; - - spdrr = io_remap_addr + PCH_SPDRR; - spdwr = io_remap_addr + PCH_SPDWR; - - n_read = PCH_READABLE(reg_spsr_val); - - for (read_cnt = 0; (read_cnt < n_read); read_cnt++) { - pkt_rx_buffer[rx_index++] = ioread32(spdrr); - if (tx_index < bpw_len) - iowrite32(pkt_tx_buff[tx_index++], spdwr); - } - - /* disable RFI if not needed */ - if ((bpw_len - rx_index) <= PCH_MAX_FIFO_DEPTH) { - reg_spcr_val = ioread32(io_remap_addr + PCH_SPCR); - reg_spcr_val &= ~SPCR_RFIE_BIT; /* disable RFI */ - - /* reset rx threshold */ - reg_spcr_val &= MASK_RFIC_SPCR_BITS; - reg_spcr_val |= (PCH_RX_THOLD_MAX << SPCR_RFIC_FIELD); - iowrite32(((reg_spcr_val) &= (~(SPCR_RFIE_BIT))), - (io_remap_addr + PCH_SPCR)); - } - - /* update counts */ - data->tx_index = tx_index; - data->rx_index = rx_index; - - } - - /* if transfer complete interrupt */ - if (reg_spsr_val & SPSR_FI_BIT) { - /* disable FI & RFI interrupts */ - pch_spi_setclr_reg(data->master, PCH_SPCR, 0, - SPCR_FIE_BIT | SPCR_RFIE_BIT); - - /* transfer is completed;inform pch_spi_process_messages */ - data->transfer_complete = true; - wake_up(&data->wait); - } -} - -/** - * pch_spi_handler() - Interrupt handler - * @irq: The interrupt number. - * @dev_id: Pointer to struct pch_spi_board_data. - */ -static irqreturn_t pch_spi_handler(int irq, void *dev_id) -{ - u32 reg_spsr_val; - struct pch_spi_data *data; - void __iomem *spsr; - void __iomem *io_remap_addr; - irqreturn_t ret = IRQ_NONE; - struct pch_spi_board_data *board_dat = dev_id; - - if (board_dat->suspend_sts) { - dev_dbg(&board_dat->pdev->dev, - "%s returning due to suspend\n", __func__); - return IRQ_NONE; - } - - data = board_dat->data; - io_remap_addr = data->io_remap_addr; - spsr = io_remap_addr + PCH_SPSR; - - reg_spsr_val = ioread32(spsr); - - /* Check if the interrupt is for SPI device */ - if (reg_spsr_val & (SPSR_FI_BIT | SPSR_RFI_BIT)) { - pch_spi_handler_sub(data, reg_spsr_val, io_remap_addr); - ret = IRQ_HANDLED; - } - - dev_dbg(&board_dat->pdev->dev, "%s EXIT return value=%d\n", - __func__, ret); - - return ret; -} - -/** - * pch_spi_set_baud_rate() - Sets SPBR field in SPBRR - * @master: Pointer to struct spi_master. - * @speed_hz: Baud rate. - */ -static void pch_spi_set_baud_rate(struct spi_master *master, u32 speed_hz) -{ - u32 n_spbr = PCH_CLOCK_HZ / (speed_hz * 2); - - /* if baud rate is less than we can support limit it */ - if (n_spbr > PCH_MAX_SPBR) - n_spbr = PCH_MAX_SPBR; - - pch_spi_setclr_reg(master, PCH_SPBRR, n_spbr, ~MASK_SPBRR_SPBR_BITS); -} - -/** - * pch_spi_set_bits_per_word() - Sets SIZE field in SPBRR - * @master: Pointer to struct spi_master. - * @bits_per_word: Bits per word for SPI transfer. - */ -static void pch_spi_set_bits_per_word(struct spi_master *master, - u8 bits_per_word) -{ - if (bits_per_word == 8) - pch_spi_setclr_reg(master, PCH_SPBRR, 0, SPBRR_SIZE_BIT); - else - pch_spi_setclr_reg(master, PCH_SPBRR, SPBRR_SIZE_BIT, 0); -} - -/** - * pch_spi_setup_transfer() - Configures the PCH SPI hardware for transfer - * @spi: Pointer to struct spi_device. - */ -static void pch_spi_setup_transfer(struct spi_device *spi) -{ - u32 flags = 0; - - dev_dbg(&spi->dev, "%s SPBRR content =%x setting baud rate=%d\n", - __func__, pch_spi_readreg(spi->master, PCH_SPBRR), - spi->max_speed_hz); - pch_spi_set_baud_rate(spi->master, spi->max_speed_hz); - - /* set bits per word */ - pch_spi_set_bits_per_word(spi->master, spi->bits_per_word); - - if (!(spi->mode & SPI_LSB_FIRST)) - flags |= SPCR_LSBF_BIT; - if (spi->mode & SPI_CPOL) - flags |= SPCR_CPOL_BIT; - if (spi->mode & SPI_CPHA) - flags |= SPCR_CPHA_BIT; - pch_spi_setclr_reg(spi->master, PCH_SPCR, flags, - (SPCR_LSBF_BIT | SPCR_CPOL_BIT | SPCR_CPHA_BIT)); - - /* Clear the FIFO by toggling FICLR to 1 and back to 0 */ - pch_spi_clear_fifo(spi->master); -} - -/** - * pch_spi_reset() - Clears SPI registers - * @master: Pointer to struct spi_master. - */ -static void pch_spi_reset(struct spi_master *master) -{ - /* write 1 to reset SPI */ - pch_spi_writereg(master, PCH_SRST, 0x1); - - /* clear reset */ - pch_spi_writereg(master, PCH_SRST, 0x0); -} - -static int pch_spi_setup(struct spi_device *pspi) -{ - /* check bits per word */ - if (pspi->bits_per_word == 0) { - pspi->bits_per_word = 8; - dev_dbg(&pspi->dev, "%s 8 bits per word\n", __func__); - } - - if ((pspi->bits_per_word != 8) && (pspi->bits_per_word != 16)) { - dev_err(&pspi->dev, "%s Invalid bits per word\n", __func__); - return -EINVAL; - } - - /* Check baud rate setting */ - /* if baud rate of chip is greater than - max we can support,return error */ - if ((pspi->max_speed_hz) > PCH_MAX_BAUDRATE) - pspi->max_speed_hz = PCH_MAX_BAUDRATE; - - dev_dbg(&pspi->dev, "%s MODE = %x\n", __func__, - (pspi->mode) & (SPI_CPOL | SPI_CPHA)); - - return 0; -} - -static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg) -{ - - struct spi_transfer *transfer; - struct pch_spi_data *data = spi_master_get_devdata(pspi->master); - int retval; - unsigned long flags; - - /* validate spi message and baud rate */ - if (unlikely(list_empty(&pmsg->transfers) == 1)) { - dev_err(&pspi->dev, "%s list empty\n", __func__); - retval = -EINVAL; - goto err_out; - } - - if (unlikely(pspi->max_speed_hz == 0)) { - dev_err(&pspi->dev, "%s pch_spi_tranfer maxspeed=%d\n", - __func__, pspi->max_speed_hz); - retval = -EINVAL; - goto err_out; - } - - dev_dbg(&pspi->dev, "%s Transfer List not empty. " - "Transfer Speed is set.\n", __func__); - - /* validate Tx/Rx buffers and Transfer length */ - list_for_each_entry(transfer, &pmsg->transfers, transfer_list) { - if (!transfer->tx_buf && !transfer->rx_buf) { - dev_err(&pspi->dev, - "%s Tx and Rx buffer NULL\n", __func__); - retval = -EINVAL; - goto err_out; - } - - if (!transfer->len) { - dev_err(&pspi->dev, "%s Transfer length invalid\n", - __func__); - retval = -EINVAL; - goto err_out; - } - - dev_dbg(&pspi->dev, "%s Tx/Rx buffer valid. Transfer length" - " valid\n", __func__); - - /* if baud rate hs been specified validate the same */ - if (transfer->speed_hz > PCH_MAX_BAUDRATE) - transfer->speed_hz = PCH_MAX_BAUDRATE; - - /* if bits per word has been specified validate the same */ - if (transfer->bits_per_word) { - if ((transfer->bits_per_word != 8) - && (transfer->bits_per_word != 16)) { - retval = -EINVAL; - dev_err(&pspi->dev, - "%s Invalid bits per word\n", __func__); - goto err_out; - } - } - } - - spin_lock_irqsave(&data->lock, flags); - - /* We won't process any messages if we have been asked to terminate */ - if (data->status == STATUS_EXITING) { - dev_err(&pspi->dev, "%s status = STATUS_EXITING.\n", __func__); - retval = -ESHUTDOWN; - goto err_return_spinlock; - } - - /* If suspended ,return -EINVAL */ - if (data->board_dat->suspend_sts) { - dev_err(&pspi->dev, "%s suspend; returning EINVAL\n", __func__); - retval = -EINVAL; - goto err_return_spinlock; - } - - /* set status of message */ - pmsg->actual_length = 0; - dev_dbg(&pspi->dev, "%s - pmsg->status =%d\n", __func__, pmsg->status); - - pmsg->status = -EINPROGRESS; - - /* add message to queue */ - list_add_tail(&pmsg->queue, &data->queue); - dev_dbg(&pspi->dev, "%s - Invoked list_add_tail\n", __func__); - - /* schedule work queue to run */ - queue_work(data->wk, &data->work); - dev_dbg(&pspi->dev, "%s - Invoked queue work\n", __func__); - - retval = 0; - -err_return_spinlock: - spin_unlock_irqrestore(&data->lock, flags); -err_out: - dev_dbg(&pspi->dev, "%s RETURN=%d\n", __func__, retval); - return retval; -} - -static inline void pch_spi_select_chip(struct pch_spi_data *data, - struct spi_device *pspi) -{ - if (data->current_chip != NULL) { - if (pspi->chip_select != data->n_curnt_chip) { - dev_dbg(&pspi->dev, "%s : different slave\n", __func__); - data->current_chip = NULL; - } - } - - data->current_chip = pspi; - - data->n_curnt_chip = data->current_chip->chip_select; - - dev_dbg(&pspi->dev, "%s :Invoking pch_spi_setup_transfer\n", __func__); - pch_spi_setup_transfer(pspi); -} - -static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw, - struct spi_message **ppmsg) -{ - int size; - u32 n_writes; - int j; - struct spi_message *pmsg; - const u8 *tx_buf; - const u16 *tx_sbuf; - - pmsg = *ppmsg; - - /* set baud rate if needed */ - if (data->cur_trans->speed_hz) { - dev_dbg(&data->master->dev, "%s:setting baud rate\n", __func__); - pch_spi_set_baud_rate(data->master, data->cur_trans->speed_hz); - } - - /* set bits per word if needed */ - if (data->cur_trans->bits_per_word && - (data->current_msg->spi->bits_per_word != data->cur_trans->bits_per_word)) { - dev_dbg(&data->master->dev, "%s:set bits per word\n", __func__); - pch_spi_set_bits_per_word(data->master, - data->cur_trans->bits_per_word); - *bpw = data->cur_trans->bits_per_word; - } else { - *bpw = data->current_msg->spi->bits_per_word; - } - - /* reset Tx/Rx index */ - data->tx_index = 0; - data->rx_index = 0; - - data->bpw_len = data->cur_trans->len / (*bpw / 8); - - /* find alloc size */ - size = data->cur_trans->len * sizeof(*data->pkt_tx_buff); - - /* allocate memory for pkt_tx_buff & pkt_rx_buffer */ - data->pkt_tx_buff = kzalloc(size, GFP_KERNEL); - if (data->pkt_tx_buff != NULL) { - data->pkt_rx_buff = kzalloc(size, GFP_KERNEL); - if (!data->pkt_rx_buff) - kfree(data->pkt_tx_buff); - } - - if (!data->pkt_rx_buff) { - /* flush queue and set status of all transfers to -ENOMEM */ - dev_err(&data->master->dev, "%s :kzalloc failed\n", __func__); - list_for_each_entry(pmsg, data->queue.next, queue) { - pmsg->status = -ENOMEM; - - if (pmsg->complete != 0) - pmsg->complete(pmsg->context); - - /* delete from queue */ - list_del_init(&pmsg->queue); - } - return; - } - - /* copy Tx Data */ - if (data->cur_trans->tx_buf != NULL) { - if (*bpw == 8) { - tx_buf = data->cur_trans->tx_buf; - for (j = 0; j < data->bpw_len; j++) - data->pkt_tx_buff[j] = *tx_buf++; - } else { - tx_sbuf = data->cur_trans->tx_buf; - for (j = 0; j < data->bpw_len; j++) - data->pkt_tx_buff[j] = *tx_sbuf++; - } - } - - /* if len greater than PCH_MAX_FIFO_DEPTH, write 16,else len bytes */ - n_writes = data->bpw_len; - if (n_writes > PCH_MAX_FIFO_DEPTH) - n_writes = PCH_MAX_FIFO_DEPTH; - - dev_dbg(&data->master->dev, "\n%s:Pulling down SSN low - writing " - "0x2 to SSNXCR\n", __func__); - pch_spi_writereg(data->master, PCH_SSNXCR, SSN_LOW); - - for (j = 0; j < n_writes; j++) - pch_spi_writereg(data->master, PCH_SPDWR, data->pkt_tx_buff[j]); - - /* update tx_index */ - data->tx_index = j; - - /* reset transfer complete flag */ - data->transfer_complete = false; - data->transfer_active = true; -} - - -static void pch_spi_nomore_transfer(struct pch_spi_data *data, - struct spi_message *pmsg) -{ - dev_dbg(&data->master->dev, "%s called\n", __func__); - /* Invoke complete callback - * [To the spi core..indicating end of transfer] */ - data->current_msg->status = 0; - - if (data->current_msg->complete != 0) { - dev_dbg(&data->master->dev, - "%s:Invoking callback of SPI core\n", __func__); - data->current_msg->complete(data->current_msg->context); - } - - /* update status in global variable */ - data->bcurrent_msg_processing = false; - - dev_dbg(&data->master->dev, - "%s:data->bcurrent_msg_processing = false\n", __func__); - - data->current_msg = NULL; - data->cur_trans = NULL; - - /* check if we have items in list and not suspending - * return 1 if list empty */ - if ((list_empty(&data->queue) == 0) && - (!data->board_dat->suspend_sts) && - (data->status != STATUS_EXITING)) { - /* We have some more work to do (either there is more tranint - * bpw;sfer requests in the current message or there are - *more messages) - */ - dev_dbg(&data->master->dev, "%s:Invoke queue_work\n", __func__); - queue_work(data->wk, &data->work); - } else if (data->board_dat->suspend_sts || - data->status == STATUS_EXITING) { - dev_dbg(&data->master->dev, - "%s suspend/remove initiated, flushing queue\n", - __func__); - list_for_each_entry(pmsg, data->queue.next, queue) { - pmsg->status = -EIO; - - if (pmsg->complete) - pmsg->complete(pmsg->context); - - /* delete from queue */ - list_del_init(&pmsg->queue); - } - } -} - -static void pch_spi_set_ir(struct pch_spi_data *data) -{ - /* enable interrupts */ - if ((data->bpw_len) > PCH_MAX_FIFO_DEPTH) { - /* set receive threshold to PCH_RX_THOLD */ - pch_spi_setclr_reg(data->master, PCH_SPCR, - PCH_RX_THOLD << SPCR_RFIC_FIELD, - ~MASK_RFIC_SPCR_BITS); - /* enable FI and RFI interrupts */ - pch_spi_setclr_reg(data->master, PCH_SPCR, - SPCR_RFIE_BIT | SPCR_FIE_BIT, 0); - } else { - /* set receive threshold to maximum */ - pch_spi_setclr_reg(data->master, PCH_SPCR, - PCH_RX_THOLD_MAX << SPCR_TFIC_FIELD, - ~MASK_TFIC_SPCR_BITS); - /* enable FI interrupt */ - pch_spi_setclr_reg(data->master, PCH_SPCR, SPCR_FIE_BIT, 0); - } - - dev_dbg(&data->master->dev, - "%s:invoking pch_spi_set_enable to enable SPI\n", __func__); - - /* SPI set enable */ - pch_spi_setclr_reg(data->current_chip->master, PCH_SPCR, SPCR_SPE_BIT, 0); - - /* Wait until the transfer completes; go to sleep after - initiating the transfer. */ - dev_dbg(&data->master->dev, - "%s:waiting for transfer to get over\n", __func__); - - wait_event_interruptible(data->wait, data->transfer_complete); - - pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL); - dev_dbg(&data->master->dev, - "%s:no more control over SSN-writing 0 to SSNXCR.", __func__); - - data->transfer_active = false; - dev_dbg(&data->master->dev, - "%s set data->transfer_active = false\n", __func__); - - /* clear all interrupts */ - pch_spi_writereg(data->master, PCH_SPSR, - pch_spi_readreg(data->master, PCH_SPSR)); - /* disable interrupts */ - pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL); -} - -static void pch_spi_copy_rx_data(struct pch_spi_data *data, int bpw) -{ - int j; - u8 *rx_buf; - u16 *rx_sbuf; - - /* copy Rx Data */ - if (!data->cur_trans->rx_buf) - return; - - if (bpw == 8) { - rx_buf = data->cur_trans->rx_buf; - for (j = 0; j < data->bpw_len; j++) - *rx_buf++ = data->pkt_rx_buff[j] & 0xFF; - } else { - rx_sbuf = data->cur_trans->rx_buf; - for (j = 0; j < data->bpw_len; j++) - *rx_sbuf++ = data->pkt_rx_buff[j]; - } -} - - -static void pch_spi_process_messages(struct work_struct *pwork) -{ - struct spi_message *pmsg; - struct pch_spi_data *data; - int bpw; - - data = container_of(pwork, struct pch_spi_data, work); - dev_dbg(&data->master->dev, "%s data initialized\n", __func__); - - spin_lock(&data->lock); - - /* check if suspend has been initiated;if yes flush queue */ - if (data->board_dat->suspend_sts || (data->status == STATUS_EXITING)) { - dev_dbg(&data->master->dev, - "%s suspend/remove initiated,flushing queue\n", - __func__); - - list_for_each_entry(pmsg, data->queue.next, queue) { - pmsg->status = -EIO; - - if (pmsg->complete != 0) { - spin_unlock(&data->lock); - pmsg->complete(pmsg->context); - spin_lock(&data->lock); - } - - /* delete from queue */ - list_del_init(&pmsg->queue); - } - - spin_unlock(&data->lock); - return; - } - - data->bcurrent_msg_processing = true; - dev_dbg(&data->master->dev, - "%s Set data->bcurrent_msg_processing= true\n", __func__); - - /* Get the message from the queue and delete it from there. */ - data->current_msg = list_entry(data->queue.next, struct spi_message, - queue); - - list_del_init(&data->current_msg->queue); - - data->current_msg->status = 0; - - pch_spi_select_chip(data, data->current_msg->spi); - - spin_unlock(&data->lock); - - do { - /* If we are already processing a message get the next - transfer structure from the message otherwise retrieve - the 1st transfer request from the message. */ - spin_lock(&data->lock); - - if (data->cur_trans == NULL) { - data->cur_trans = - list_entry(data->current_msg->transfers. - next, struct spi_transfer, - transfer_list); - dev_dbg(&data->master->dev, - "%s :Getting 1st transfer message\n", __func__); - } else { - data->cur_trans = - list_entry(data->cur_trans->transfer_list.next, - struct spi_transfer, - transfer_list); - dev_dbg(&data->master->dev, - "%s :Getting next transfer message\n", - __func__); - } - - spin_unlock(&data->lock); - - pch_spi_set_tx(data, &bpw, &pmsg); - - /* Control interrupt*/ - pch_spi_set_ir(data); - - /* Disable SPI transfer */ - pch_spi_setclr_reg(data->current_chip->master, PCH_SPCR, 0, - SPCR_SPE_BIT); - - /* clear FIFO */ - pch_spi_clear_fifo(data->master); - - /* copy Rx Data */ - pch_spi_copy_rx_data(data, bpw); - - /* free memory */ - kfree(data->pkt_rx_buff); - data->pkt_rx_buff = NULL; - - kfree(data->pkt_tx_buff); - data->pkt_tx_buff = NULL; - - /* increment message count */ - data->current_msg->actual_length += data->cur_trans->len; - - dev_dbg(&data->master->dev, - "%s:data->current_msg->actual_length=%d\n", - __func__, data->current_msg->actual_length); - - /* check for delay */ - if (data->cur_trans->delay_usecs) { - dev_dbg(&data->master->dev, "%s:" - "delay in usec=%d\n", __func__, - data->cur_trans->delay_usecs); - udelay(data->cur_trans->delay_usecs); - } - - spin_lock(&data->lock); - - /* No more transfer in this message. */ - if ((data->cur_trans->transfer_list.next) == - &(data->current_msg->transfers)) { - pch_spi_nomore_transfer(data, pmsg); - } - - spin_unlock(&data->lock); - - } while (data->cur_trans != NULL); -} - -static void pch_spi_free_resources(struct pch_spi_board_data *board_dat) -{ - dev_dbg(&board_dat->pdev->dev, "%s ENTRY\n", __func__); - - /* free workqueue */ - if (board_dat->data->wk != NULL) { - destroy_workqueue(board_dat->data->wk); - board_dat->data->wk = NULL; - dev_dbg(&board_dat->pdev->dev, - "%s destroy_workqueue invoked successfully\n", - __func__); - } - - /* disable interrupts & free IRQ */ - if (board_dat->irq_reg_sts) { - /* disable interrupts */ - pch_spi_setclr_reg(board_dat->data->master, PCH_SPCR, 0, - PCH_ALL); - - /* free IRQ */ - free_irq(board_dat->pdev->irq, board_dat); - - dev_dbg(&board_dat->pdev->dev, - "%s free_irq invoked successfully\n", __func__); - - board_dat->irq_reg_sts = false; - } - - /* unmap PCI base address */ - if (board_dat->data->io_remap_addr != 0) { - pci_iounmap(board_dat->pdev, board_dat->data->io_remap_addr); - - board_dat->data->io_remap_addr = 0; - - dev_dbg(&board_dat->pdev->dev, - "%s pci_iounmap invoked successfully\n", __func__); - } - - /* release PCI region */ - if (board_dat->pci_req_sts) { - pci_release_regions(board_dat->pdev); - dev_dbg(&board_dat->pdev->dev, - "%s pci_release_regions invoked successfully\n", - __func__); - board_dat->pci_req_sts = false; - } -} - -static int pch_spi_get_resources(struct pch_spi_board_data *board_dat) -{ - void __iomem *io_remap_addr; - int retval; - dev_dbg(&board_dat->pdev->dev, "%s ENTRY\n", __func__); - - /* create workqueue */ - board_dat->data->wk = create_singlethread_workqueue(KBUILD_MODNAME); - if (!board_dat->data->wk) { - dev_err(&board_dat->pdev->dev, - "%s create_singlet hread_workqueue failed\n", __func__); - retval = -EBUSY; - goto err_return; - } - - dev_dbg(&board_dat->pdev->dev, - "%s create_singlethread_workqueue success\n", __func__); - - retval = pci_request_regions(board_dat->pdev, KBUILD_MODNAME); - if (retval != 0) { - dev_err(&board_dat->pdev->dev, - "%s request_region failed\n", __func__); - goto err_return; - } - - board_dat->pci_req_sts = true; - - io_remap_addr = pci_iomap(board_dat->pdev, 1, 0); - if (io_remap_addr == 0) { - dev_err(&board_dat->pdev->dev, - "%s pci_iomap failed\n", __func__); - retval = -ENOMEM; - goto err_return; - } - - /* calculate base address for all channels */ - board_dat->data->io_remap_addr = io_remap_addr; - - /* reset PCH SPI h/w */ - pch_spi_reset(board_dat->data->master); - dev_dbg(&board_dat->pdev->dev, - "%s pch_spi_reset invoked successfully\n", __func__); - - /* register IRQ */ - retval = request_irq(board_dat->pdev->irq, pch_spi_handler, - IRQF_SHARED, KBUILD_MODNAME, board_dat); - if (retval != 0) { - dev_err(&board_dat->pdev->dev, - "%s request_irq failed\n", __func__); - goto err_return; - } - - dev_dbg(&board_dat->pdev->dev, "%s request_irq returned=%d\n", - __func__, retval); - - board_dat->irq_reg_sts = true; - dev_dbg(&board_dat->pdev->dev, "%s data->irq_reg_sts=true\n", __func__); - -err_return: - if (retval != 0) { - dev_err(&board_dat->pdev->dev, - "%s FAIL:invoking pch_spi_free_resources\n", __func__); - pch_spi_free_resources(board_dat); - } - - dev_dbg(&board_dat->pdev->dev, "%s Return=%d\n", __func__, retval); - - return retval; -} - -static int pch_spi_probe(struct pci_dev *pdev, const struct pci_device_id *id) -{ - - struct spi_master *master; - - struct pch_spi_board_data *board_dat; - int retval; - - dev_dbg(&pdev->dev, "%s ENTRY\n", __func__); - - /* allocate memory for private data */ - board_dat = kzalloc(sizeof(struct pch_spi_board_data), GFP_KERNEL); - if (board_dat == NULL) { - dev_err(&pdev->dev, - " %s memory allocation for private data failed\n", - __func__); - retval = -ENOMEM; - goto err_kmalloc; - } - - dev_dbg(&pdev->dev, - "%s memory allocation for private data success\n", __func__); - - /* enable PCI device */ - retval = pci_enable_device(pdev); - if (retval != 0) { - dev_err(&pdev->dev, "%s pci_enable_device FAILED\n", __func__); - - goto err_pci_en_device; - } - - dev_dbg(&pdev->dev, "%s pci_enable_device returned=%d\n", - __func__, retval); - - board_dat->pdev = pdev; - - /* alllocate memory for SPI master */ - master = spi_alloc_master(&pdev->dev, sizeof(struct pch_spi_data)); - if (master == NULL) { - retval = -ENOMEM; - dev_err(&pdev->dev, "%s Fail.\n", __func__); - goto err_spi_alloc_master; - } - - dev_dbg(&pdev->dev, - "%s spi_alloc_master returned non NULL\n", __func__); - - /* initialize members of SPI master */ - master->bus_num = -1; - master->num_chipselect = PCH_MAX_CS; - master->setup = pch_spi_setup; - master->transfer = pch_spi_transfer; - dev_dbg(&pdev->dev, - "%s transfer member of SPI master initialized\n", __func__); - - board_dat->data = spi_master_get_devdata(master); - - board_dat->data->master = master; - board_dat->data->n_curnt_chip = 255; - board_dat->data->board_dat = board_dat; - board_dat->data->status = STATUS_RUNNING; - - INIT_LIST_HEAD(&board_dat->data->queue); - spin_lock_init(&board_dat->data->lock); - INIT_WORK(&board_dat->data->work, pch_spi_process_messages); - init_waitqueue_head(&board_dat->data->wait); - - /* allocate resources for PCH SPI */ - retval = pch_spi_get_resources(board_dat); - if (retval) { - dev_err(&pdev->dev, "%s fail(retval=%d)\n", __func__, retval); - goto err_spi_get_resources; - } - - dev_dbg(&pdev->dev, "%s pch_spi_get_resources returned=%d\n", - __func__, retval); - - /* save private data in dev */ - pci_set_drvdata(pdev, board_dat); - dev_dbg(&pdev->dev, "%s invoked pci_set_drvdata\n", __func__); - - /* set master mode */ - pch_spi_set_master_mode(master); - dev_dbg(&pdev->dev, - "%s invoked pch_spi_set_master_mode\n", __func__); - - /* Register the controller with the SPI core. */ - retval = spi_register_master(master); - if (retval != 0) { - dev_err(&pdev->dev, - "%s spi_register_master FAILED\n", __func__); - goto err_spi_reg_master; - } - - dev_dbg(&pdev->dev, "%s spi_register_master returned=%d\n", - __func__, retval); - - - return 0; - -err_spi_reg_master: - spi_unregister_master(master); -err_spi_get_resources: -err_spi_alloc_master: - spi_master_put(master); - pci_disable_device(pdev); -err_pci_en_device: - kfree(board_dat); -err_kmalloc: - return retval; -} - -static void pch_spi_remove(struct pci_dev *pdev) -{ - struct pch_spi_board_data *board_dat = pci_get_drvdata(pdev); - int count; - - dev_dbg(&pdev->dev, "%s ENTRY\n", __func__); - - if (!board_dat) { - dev_err(&pdev->dev, - "%s pci_get_drvdata returned NULL\n", __func__); - return; - } - - /* check for any pending messages; no action is taken if the queue - * is still full; but at least we tried. Unload anyway */ - count = 500; - spin_lock(&board_dat->data->lock); - board_dat->data->status = STATUS_EXITING; - while ((list_empty(&board_dat->data->queue) == 0) && --count) { - dev_dbg(&board_dat->pdev->dev, "%s :queue not empty\n", - __func__); - spin_unlock(&board_dat->data->lock); - msleep(PCH_SLEEP_TIME); - spin_lock(&board_dat->data->lock); - } - spin_unlock(&board_dat->data->lock); - - /* Free resources allocated for PCH SPI */ - pch_spi_free_resources(board_dat); - - spi_unregister_master(board_dat->data->master); - - /* free memory for private data */ - kfree(board_dat); - - pci_set_drvdata(pdev, NULL); - - /* disable PCI device */ - pci_disable_device(pdev); - - dev_dbg(&pdev->dev, "%s invoked pci_disable_device\n", __func__); -} - -#ifdef CONFIG_PM -static int pch_spi_suspend(struct pci_dev *pdev, pm_message_t state) -{ - u8 count; - int retval; - - struct pch_spi_board_data *board_dat = pci_get_drvdata(pdev); - - dev_dbg(&pdev->dev, "%s ENTRY\n", __func__); - - if (!board_dat) { - dev_err(&pdev->dev, - "%s pci_get_drvdata returned NULL\n", __func__); - return -EFAULT; - } - - retval = 0; - board_dat->suspend_sts = true; - - /* check if the current message is processed: - Only after thats done the transfer will be suspended */ - count = 255; - while ((--count) > 0) { - if (!(board_dat->data->bcurrent_msg_processing)) { - dev_dbg(&pdev->dev, "%s board_dat->data->bCurrent_" - "msg_processing = false\n", __func__); - break; - } else { - dev_dbg(&pdev->dev, "%s board_dat->data->bCurrent_msg_" - "processing = true\n", __func__); - } - msleep(PCH_SLEEP_TIME); - } - - /* Free IRQ */ - if (board_dat->irq_reg_sts) { - /* disable all interrupts */ - pch_spi_setclr_reg(board_dat->data->master, PCH_SPCR, 0, - PCH_ALL); - pch_spi_reset(board_dat->data->master); - - free_irq(board_dat->pdev->irq, board_dat); - - board_dat->irq_reg_sts = false; - dev_dbg(&pdev->dev, - "%s free_irq invoked successfully.\n", __func__); - } - - /* save config space */ - retval = pci_save_state(pdev); - - if (retval == 0) { - dev_dbg(&pdev->dev, "%s pci_save_state returned=%d\n", - __func__, retval); - /* disable PM notifications */ - pci_enable_wake(pdev, PCI_D3hot, 0); - dev_dbg(&pdev->dev, - "%s pci_enable_wake invoked successfully\n", __func__); - /* disable PCI device */ - pci_disable_device(pdev); - dev_dbg(&pdev->dev, - "%s pci_disable_device invoked successfully\n", - __func__); - /* move device to D3hot state */ - pci_set_power_state(pdev, PCI_D3hot); - dev_dbg(&pdev->dev, - "%s pci_set_power_state invoked successfully\n", - __func__); - } else { - dev_err(&pdev->dev, "%s pci_save_state failed\n", __func__); - } - - dev_dbg(&pdev->dev, "%s return=%d\n", __func__, retval); - - return retval; -} - -static int pch_spi_resume(struct pci_dev *pdev) -{ - int retval; - - struct pch_spi_board_data *board = pci_get_drvdata(pdev); - dev_dbg(&pdev->dev, "%s ENTRY\n", __func__); - - if (!board) { - dev_err(&pdev->dev, - "%s pci_get_drvdata returned NULL\n", __func__); - return -EFAULT; - } - - /* move device to DO power state */ - pci_set_power_state(pdev, PCI_D0); - - /* restore state */ - pci_restore_state(pdev); - - retval = pci_enable_device(pdev); - if (retval < 0) { - dev_err(&pdev->dev, - "%s pci_enable_device failed\n", __func__); - } else { - /* disable PM notifications */ - pci_enable_wake(pdev, PCI_D3hot, 0); - - /* register IRQ handler */ - if (!board->irq_reg_sts) { - /* register IRQ */ - retval = request_irq(board->pdev->irq, pch_spi_handler, - IRQF_SHARED, KBUILD_MODNAME, - board); - if (retval < 0) { - dev_err(&pdev->dev, - "%s request_irq failed\n", __func__); - return retval; - } - board->irq_reg_sts = true; - - /* reset PCH SPI h/w */ - pch_spi_reset(board->data->master); - pch_spi_set_master_mode(board->data->master); - - /* set suspend status to false */ - board->suspend_sts = false; - - } - } - - dev_dbg(&pdev->dev, "%s returning=%d\n", __func__, retval); - - return retval; -} -#else -#define pch_spi_suspend NULL -#define pch_spi_resume NULL - -#endif - -static struct pci_driver pch_spi_pcidev = { - .name = "pch_spi", - .id_table = pch_spi_pcidev_id, - .probe = pch_spi_probe, - .remove = pch_spi_remove, - .suspend = pch_spi_suspend, - .resume = pch_spi_resume, -}; - -static int __init pch_spi_init(void) -{ - return pci_register_driver(&pch_spi_pcidev); -} -module_init(pch_spi_init); - -static void __exit pch_spi_exit(void) -{ - pci_unregister_driver(&pch_spi_pcidev); -} -module_exit(pch_spi_exit); - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("Topcliff PCH SPI PCI Driver"); diff --git a/drivers/spi/spi_txx9.c b/drivers/spi/spi_txx9.c deleted file mode 100644 index dfa024b..0000000 --- a/drivers/spi/spi_txx9.c +++ /dev/null @@ -1,472 +0,0 @@ -/* - * spi_txx9.c - TXx9 SPI controller driver. - * - * Based on linux/arch/mips/tx4938/toshiba_rbtx4938/spi_txx9.c - * Copyright (C) 2000-2001 Toshiba Corporation - * - * 2003-2005 (c) MontaVista Software, Inc. This file is licensed under the - * terms of the GNU General Public License version 2. This program is - * licensed "as is" without any warranty of any kind, whether express - * or implied. - * - * Support for TX4938 in 2.6 - Manish Lachwani (mlachwani@mvista.com) - * - * Convert to generic SPI framework - Atsushi Nemoto (anemo@mba.ocn.ne.jp) - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - - -#define SPI_FIFO_SIZE 4 -#define SPI_MAX_DIVIDER 0xff /* Max. value for SPCR1.SER */ -#define SPI_MIN_DIVIDER 1 /* Min. value for SPCR1.SER */ - -#define TXx9_SPMCR 0x00 -#define TXx9_SPCR0 0x04 -#define TXx9_SPCR1 0x08 -#define TXx9_SPFS 0x0c -#define TXx9_SPSR 0x14 -#define TXx9_SPDR 0x18 - -/* SPMCR : SPI Master Control */ -#define TXx9_SPMCR_OPMODE 0xc0 -#define TXx9_SPMCR_CONFIG 0x40 -#define TXx9_SPMCR_ACTIVE 0x80 -#define TXx9_SPMCR_SPSTP 0x02 -#define TXx9_SPMCR_BCLR 0x01 - -/* SPCR0 : SPI Control 0 */ -#define TXx9_SPCR0_TXIFL_MASK 0xc000 -#define TXx9_SPCR0_RXIFL_MASK 0x3000 -#define TXx9_SPCR0_SIDIE 0x0800 -#define TXx9_SPCR0_SOEIE 0x0400 -#define TXx9_SPCR0_RBSIE 0x0200 -#define TXx9_SPCR0_TBSIE 0x0100 -#define TXx9_SPCR0_IFSPSE 0x0010 -#define TXx9_SPCR0_SBOS 0x0004 -#define TXx9_SPCR0_SPHA 0x0002 -#define TXx9_SPCR0_SPOL 0x0001 - -/* SPSR : SPI Status */ -#define TXx9_SPSR_TBSI 0x8000 -#define TXx9_SPSR_RBSI 0x4000 -#define TXx9_SPSR_TBS_MASK 0x3800 -#define TXx9_SPSR_RBS_MASK 0x0700 -#define TXx9_SPSR_SPOE 0x0080 -#define TXx9_SPSR_IFSD 0x0008 -#define TXx9_SPSR_SIDLE 0x0004 -#define TXx9_SPSR_STRDY 0x0002 -#define TXx9_SPSR_SRRDY 0x0001 - - -struct txx9spi { - struct workqueue_struct *workqueue; - struct work_struct work; - spinlock_t lock; /* protect 'queue' */ - struct list_head queue; - wait_queue_head_t waitq; - void __iomem *membase; - int baseclk; - struct clk *clk; - u32 max_speed_hz, min_speed_hz; - int last_chipselect; - int last_chipselect_val; -}; - -static u32 txx9spi_rd(struct txx9spi *c, int reg) -{ - return __raw_readl(c->membase + reg); -} -static void txx9spi_wr(struct txx9spi *c, u32 val, int reg) -{ - __raw_writel(val, c->membase + reg); -} - -static void txx9spi_cs_func(struct spi_device *spi, struct txx9spi *c, - int on, unsigned int cs_delay) -{ - int val = (spi->mode & SPI_CS_HIGH) ? on : !on; - if (on) { - /* deselect the chip with cs_change hint in last transfer */ - if (c->last_chipselect >= 0) - gpio_set_value(c->last_chipselect, - !c->last_chipselect_val); - c->last_chipselect = spi->chip_select; - c->last_chipselect_val = val; - } else { - c->last_chipselect = -1; - ndelay(cs_delay); /* CS Hold Time */ - } - gpio_set_value(spi->chip_select, val); - ndelay(cs_delay); /* CS Setup Time / CS Recovery Time */ -} - -static int txx9spi_setup(struct spi_device *spi) -{ - struct txx9spi *c = spi_master_get_devdata(spi->master); - u8 bits_per_word; - - if (!spi->max_speed_hz - || spi->max_speed_hz > c->max_speed_hz - || spi->max_speed_hz < c->min_speed_hz) - return -EINVAL; - - bits_per_word = spi->bits_per_word; - if (bits_per_word != 8 && bits_per_word != 16) - return -EINVAL; - - if (gpio_direction_output(spi->chip_select, - !(spi->mode & SPI_CS_HIGH))) { - dev_err(&spi->dev, "Cannot setup GPIO for chipselect.\n"); - return -EINVAL; - } - - /* deselect chip */ - spin_lock(&c->lock); - txx9spi_cs_func(spi, c, 0, (NSEC_PER_SEC / 2) / spi->max_speed_hz); - spin_unlock(&c->lock); - - return 0; -} - -static irqreturn_t txx9spi_interrupt(int irq, void *dev_id) -{ - struct txx9spi *c = dev_id; - - /* disable rx intr */ - txx9spi_wr(c, txx9spi_rd(c, TXx9_SPCR0) & ~TXx9_SPCR0_RBSIE, - TXx9_SPCR0); - wake_up(&c->waitq); - return IRQ_HANDLED; -} - -static void txx9spi_work_one(struct txx9spi *c, struct spi_message *m) -{ - struct spi_device *spi = m->spi; - struct spi_transfer *t; - unsigned int cs_delay; - unsigned int cs_change = 1; - int status = 0; - u32 mcr; - u32 prev_speed_hz = 0; - u8 prev_bits_per_word = 0; - - /* CS setup/hold/recovery time in nsec */ - cs_delay = 100 + (NSEC_PER_SEC / 2) / spi->max_speed_hz; - - mcr = txx9spi_rd(c, TXx9_SPMCR); - if (unlikely((mcr & TXx9_SPMCR_OPMODE) == TXx9_SPMCR_ACTIVE)) { - dev_err(&spi->dev, "Bad mode.\n"); - status = -EIO; - goto exit; - } - mcr &= ~(TXx9_SPMCR_OPMODE | TXx9_SPMCR_SPSTP | TXx9_SPMCR_BCLR); - - /* enter config mode */ - txx9spi_wr(c, mcr | TXx9_SPMCR_CONFIG | TXx9_SPMCR_BCLR, TXx9_SPMCR); - txx9spi_wr(c, TXx9_SPCR0_SBOS - | ((spi->mode & SPI_CPOL) ? TXx9_SPCR0_SPOL : 0) - | ((spi->mode & SPI_CPHA) ? TXx9_SPCR0_SPHA : 0) - | 0x08, - TXx9_SPCR0); - - list_for_each_entry (t, &m->transfers, transfer_list) { - const void *txbuf = t->tx_buf; - void *rxbuf = t->rx_buf; - u32 data; - unsigned int len = t->len; - unsigned int wsize; - u32 speed_hz = t->speed_hz ? : spi->max_speed_hz; - u8 bits_per_word = t->bits_per_word ? : spi->bits_per_word; - - bits_per_word = bits_per_word ? : 8; - wsize = bits_per_word >> 3; /* in bytes */ - - if (prev_speed_hz != speed_hz - || prev_bits_per_word != bits_per_word) { - int n = DIV_ROUND_UP(c->baseclk, speed_hz) - 1; - n = clamp(n, SPI_MIN_DIVIDER, SPI_MAX_DIVIDER); - /* enter config mode */ - txx9spi_wr(c, mcr | TXx9_SPMCR_CONFIG | TXx9_SPMCR_BCLR, - TXx9_SPMCR); - txx9spi_wr(c, (n << 8) | bits_per_word, TXx9_SPCR1); - /* enter active mode */ - txx9spi_wr(c, mcr | TXx9_SPMCR_ACTIVE, TXx9_SPMCR); - - prev_speed_hz = speed_hz; - prev_bits_per_word = bits_per_word; - } - - if (cs_change) - txx9spi_cs_func(spi, c, 1, cs_delay); - cs_change = t->cs_change; - while (len) { - unsigned int count = SPI_FIFO_SIZE; - int i; - u32 cr0; - - if (len < count * wsize) - count = len / wsize; - /* now tx must be idle... */ - while (!(txx9spi_rd(c, TXx9_SPSR) & TXx9_SPSR_SIDLE)) - cpu_relax(); - cr0 = txx9spi_rd(c, TXx9_SPCR0); - cr0 &= ~TXx9_SPCR0_RXIFL_MASK; - cr0 |= (count - 1) << 12; - /* enable rx intr */ - cr0 |= TXx9_SPCR0_RBSIE; - txx9spi_wr(c, cr0, TXx9_SPCR0); - /* send */ - for (i = 0; i < count; i++) { - if (txbuf) { - data = (wsize == 1) - ? *(const u8 *)txbuf - : *(const u16 *)txbuf; - txx9spi_wr(c, data, TXx9_SPDR); - txbuf += wsize; - } else - txx9spi_wr(c, 0, TXx9_SPDR); - } - /* wait all rx data */ - wait_event(c->waitq, - txx9spi_rd(c, TXx9_SPSR) & TXx9_SPSR_RBSI); - /* receive */ - for (i = 0; i < count; i++) { - data = txx9spi_rd(c, TXx9_SPDR); - if (rxbuf) { - if (wsize == 1) - *(u8 *)rxbuf = data; - else - *(u16 *)rxbuf = data; - rxbuf += wsize; - } - } - len -= count * wsize; - } - m->actual_length += t->len; - if (t->delay_usecs) - udelay(t->delay_usecs); - - if (!cs_change) - continue; - if (t->transfer_list.next == &m->transfers) - break; - /* sometimes a short mid-message deselect of the chip - * may be needed to terminate a mode or command - */ - txx9spi_cs_func(spi, c, 0, cs_delay); - } - -exit: - m->status = status; - m->complete(m->context); - - /* normally deactivate chipselect ... unless no error and - * cs_change has hinted that the next message will probably - * be for this chip too. - */ - if (!(status == 0 && cs_change)) - txx9spi_cs_func(spi, c, 0, cs_delay); - - /* enter config mode */ - txx9spi_wr(c, mcr | TXx9_SPMCR_CONFIG | TXx9_SPMCR_BCLR, TXx9_SPMCR); -} - -static void txx9spi_work(struct work_struct *work) -{ - struct txx9spi *c = container_of(work, struct txx9spi, work); - unsigned long flags; - - spin_lock_irqsave(&c->lock, flags); - while (!list_empty(&c->queue)) { - struct spi_message *m; - - m = container_of(c->queue.next, struct spi_message, queue); - list_del_init(&m->queue); - spin_unlock_irqrestore(&c->lock, flags); - - txx9spi_work_one(c, m); - - spin_lock_irqsave(&c->lock, flags); - } - spin_unlock_irqrestore(&c->lock, flags); -} - -static int txx9spi_transfer(struct spi_device *spi, struct spi_message *m) -{ - struct spi_master *master = spi->master; - struct txx9spi *c = spi_master_get_devdata(master); - struct spi_transfer *t; - unsigned long flags; - - m->actual_length = 0; - - /* check each transfer's parameters */ - list_for_each_entry (t, &m->transfers, transfer_list) { - u32 speed_hz = t->speed_hz ? : spi->max_speed_hz; - u8 bits_per_word = t->bits_per_word ? : spi->bits_per_word; - - bits_per_word = bits_per_word ? : 8; - if (!t->tx_buf && !t->rx_buf && t->len) - return -EINVAL; - if (bits_per_word != 8 && bits_per_word != 16) - return -EINVAL; - if (t->len & ((bits_per_word >> 3) - 1)) - return -EINVAL; - if (speed_hz < c->min_speed_hz || speed_hz > c->max_speed_hz) - return -EINVAL; - } - - spin_lock_irqsave(&c->lock, flags); - list_add_tail(&m->queue, &c->queue); - queue_work(c->workqueue, &c->work); - spin_unlock_irqrestore(&c->lock, flags); - - return 0; -} - -static int __init txx9spi_probe(struct platform_device *dev) -{ - struct spi_master *master; - struct txx9spi *c; - struct resource *res; - int ret = -ENODEV; - u32 mcr; - int irq; - - master = spi_alloc_master(&dev->dev, sizeof(*c)); - if (!master) - return ret; - c = spi_master_get_devdata(master); - platform_set_drvdata(dev, master); - - INIT_WORK(&c->work, txx9spi_work); - spin_lock_init(&c->lock); - INIT_LIST_HEAD(&c->queue); - init_waitqueue_head(&c->waitq); - - c->clk = clk_get(&dev->dev, "spi-baseclk"); - if (IS_ERR(c->clk)) { - ret = PTR_ERR(c->clk); - c->clk = NULL; - goto exit; - } - ret = clk_enable(c->clk); - if (ret) { - clk_put(c->clk); - c->clk = NULL; - goto exit; - } - c->baseclk = clk_get_rate(c->clk); - c->min_speed_hz = DIV_ROUND_UP(c->baseclk, SPI_MAX_DIVIDER + 1); - c->max_speed_hz = c->baseclk / (SPI_MIN_DIVIDER + 1); - - res = platform_get_resource(dev, IORESOURCE_MEM, 0); - if (!res) - goto exit_busy; - if (!devm_request_mem_region(&dev->dev, res->start, resource_size(res), - "spi_txx9")) - goto exit_busy; - c->membase = devm_ioremap(&dev->dev, res->start, resource_size(res)); - if (!c->membase) - goto exit_busy; - - /* enter config mode */ - mcr = txx9spi_rd(c, TXx9_SPMCR); - mcr &= ~(TXx9_SPMCR_OPMODE | TXx9_SPMCR_SPSTP | TXx9_SPMCR_BCLR); - txx9spi_wr(c, mcr | TXx9_SPMCR_CONFIG | TXx9_SPMCR_BCLR, TXx9_SPMCR); - - irq = platform_get_irq(dev, 0); - if (irq < 0) - goto exit_busy; - ret = devm_request_irq(&dev->dev, irq, txx9spi_interrupt, 0, - "spi_txx9", c); - if (ret) - goto exit; - - c->workqueue = create_singlethread_workqueue( - dev_name(master->dev.parent)); - if (!c->workqueue) - goto exit_busy; - c->last_chipselect = -1; - - dev_info(&dev->dev, "at %#llx, irq %d, %dMHz\n", - (unsigned long long)res->start, irq, - (c->baseclk + 500000) / 1000000); - - /* the spi->mode bits understood by this driver: */ - master->mode_bits = SPI_CS_HIGH | SPI_CPOL | SPI_CPHA; - - master->bus_num = dev->id; - master->setup = txx9spi_setup; - master->transfer = txx9spi_transfer; - master->num_chipselect = (u16)UINT_MAX; /* any GPIO numbers */ - - ret = spi_register_master(master); - if (ret) - goto exit; - return 0; -exit_busy: - ret = -EBUSY; -exit: - if (c->workqueue) - destroy_workqueue(c->workqueue); - if (c->clk) { - clk_disable(c->clk); - clk_put(c->clk); - } - platform_set_drvdata(dev, NULL); - spi_master_put(master); - return ret; -} - -static int __exit txx9spi_remove(struct platform_device *dev) -{ - struct spi_master *master = spi_master_get(platform_get_drvdata(dev)); - struct txx9spi *c = spi_master_get_devdata(master); - - spi_unregister_master(master); - platform_set_drvdata(dev, NULL); - destroy_workqueue(c->workqueue); - clk_disable(c->clk); - clk_put(c->clk); - spi_master_put(master); - return 0; -} - -/* work with hotplug and coldplug */ -MODULE_ALIAS("platform:spi_txx9"); - -static struct platform_driver txx9spi_driver = { - .remove = __exit_p(txx9spi_remove), - .driver = { - .name = "spi_txx9", - .owner = THIS_MODULE, - }, -}; - -static int __init txx9spi_init(void) -{ - return platform_driver_probe(&txx9spi_driver, txx9spi_probe); -} -subsys_initcall(txx9spi_init); - -static void __exit txx9spi_exit(void) -{ - platform_driver_unregister(&txx9spi_driver); -} -module_exit(txx9spi_exit); - -MODULE_DESCRIPTION("TXx9 SPI Driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index d9fd862..830adbe 100644 --- a/drivers/spi/spidev.c +++ b/drivers/spi/spidev.c @@ -1,5 +1,5 @@ /* - * spidev.c -- simple synchronous userspace interface to SPI devices + * Simple synchronous userspace interface to SPI devices * * Copyright (C) 2006 SWAPP * Andrea Paterniani diff --git a/drivers/spi/ti-ssp-spi.c b/drivers/spi/ti-ssp-spi.c deleted file mode 100644 index ee22795..0000000 --- a/drivers/spi/ti-ssp-spi.c +++ /dev/null @@ -1,402 +0,0 @@ -/* - * Sequencer Serial Port (SSP) based SPI master driver - * - * Copyright (C) 2010 Texas Instruments Inc - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#include -#include -#include -#include -#include -#include -#include - -#define MODE_BITS (SPI_CPHA | SPI_CPOL | SPI_CS_HIGH) - -struct ti_ssp_spi { - struct spi_master *master; - struct device *dev; - spinlock_t lock; - struct list_head msg_queue; - struct completion complete; - bool shutdown; - struct workqueue_struct *workqueue; - struct work_struct work; - u8 mode, bpw; - int cs_active; - u32 pc_en, pc_dis, pc_wr, pc_rd; - void (*select)(int cs); -}; - -static u32 ti_ssp_spi_rx(struct ti_ssp_spi *hw) -{ - u32 ret; - - ti_ssp_run(hw->dev, hw->pc_rd, 0, &ret); - return ret; -} - -static void ti_ssp_spi_tx(struct ti_ssp_spi *hw, u32 data) -{ - ti_ssp_run(hw->dev, hw->pc_wr, data << (32 - hw->bpw), NULL); -} - -static int ti_ssp_spi_txrx(struct ti_ssp_spi *hw, struct spi_message *msg, - struct spi_transfer *t) -{ - int count; - - if (hw->bpw <= 8) { - u8 *rx = t->rx_buf; - const u8 *tx = t->tx_buf; - - for (count = 0; count < t->len; count += 1) { - if (t->tx_buf) - ti_ssp_spi_tx(hw, *tx++); - if (t->rx_buf) - *rx++ = ti_ssp_spi_rx(hw); - } - } else if (hw->bpw <= 16) { - u16 *rx = t->rx_buf; - const u16 *tx = t->tx_buf; - - for (count = 0; count < t->len; count += 2) { - if (t->tx_buf) - ti_ssp_spi_tx(hw, *tx++); - if (t->rx_buf) - *rx++ = ti_ssp_spi_rx(hw); - } - } else { - u32 *rx = t->rx_buf; - const u32 *tx = t->tx_buf; - - for (count = 0; count < t->len; count += 4) { - if (t->tx_buf) - ti_ssp_spi_tx(hw, *tx++); - if (t->rx_buf) - *rx++ = ti_ssp_spi_rx(hw); - } - } - - msg->actual_length += count; /* bytes transferred */ - - dev_dbg(&msg->spi->dev, "xfer %s%s, %d bytes, %d bpw, count %d%s\n", - t->tx_buf ? "tx" : "", t->rx_buf ? "rx" : "", t->len, - hw->bpw, count, (count < t->len) ? " (under)" : ""); - - return (count < t->len) ? -EIO : 0; /* left over data */ -} - -static void ti_ssp_spi_chip_select(struct ti_ssp_spi *hw, int cs_active) -{ - cs_active = !!cs_active; - if (cs_active == hw->cs_active) - return; - ti_ssp_run(hw->dev, cs_active ? hw->pc_en : hw->pc_dis, 0, NULL); - hw->cs_active = cs_active; -} - -#define __SHIFT_OUT(bits) (SSP_OPCODE_SHIFT | SSP_OUT_MODE | \ - cs_en | clk | SSP_COUNT((bits) * 2 - 1)) -#define __SHIFT_IN(bits) (SSP_OPCODE_SHIFT | SSP_IN_MODE | \ - cs_en | clk | SSP_COUNT((bits) * 2 - 1)) - -static int ti_ssp_spi_setup_transfer(struct ti_ssp_spi *hw, u8 bpw, u8 mode) -{ - int error, idx = 0; - u32 seqram[16]; - u32 cs_en, cs_dis, clk; - u32 topbits, botbits; - - mode &= MODE_BITS; - if (mode == hw->mode && bpw == hw->bpw) - return 0; - - cs_en = (mode & SPI_CS_HIGH) ? SSP_CS_HIGH : SSP_CS_LOW; - cs_dis = (mode & SPI_CS_HIGH) ? SSP_CS_LOW : SSP_CS_HIGH; - clk = (mode & SPI_CPOL) ? SSP_CLK_HIGH : SSP_CLK_LOW; - - /* Construct instructions */ - - /* Disable Chip Select */ - hw->pc_dis = idx; - seqram[idx++] = SSP_OPCODE_DIRECT | SSP_OUT_MODE | cs_dis | clk; - seqram[idx++] = SSP_OPCODE_STOP | SSP_OUT_MODE | cs_dis | clk; - - /* Enable Chip Select */ - hw->pc_en = idx; - seqram[idx++] = SSP_OPCODE_DIRECT | SSP_OUT_MODE | cs_en | clk; - seqram[idx++] = SSP_OPCODE_STOP | SSP_OUT_MODE | cs_en | clk; - - /* Reads and writes need to be split for bpw > 16 */ - topbits = (bpw > 16) ? 16 : bpw; - botbits = bpw - topbits; - - /* Write */ - hw->pc_wr = idx; - seqram[idx++] = __SHIFT_OUT(topbits) | SSP_ADDR_REG; - if (botbits) - seqram[idx++] = __SHIFT_OUT(botbits) | SSP_DATA_REG; - seqram[idx++] = SSP_OPCODE_STOP | SSP_OUT_MODE | cs_en | clk; - - /* Read */ - hw->pc_rd = idx; - if (botbits) - seqram[idx++] = __SHIFT_IN(botbits) | SSP_ADDR_REG; - seqram[idx++] = __SHIFT_IN(topbits) | SSP_DATA_REG; - seqram[idx++] = SSP_OPCODE_STOP | SSP_OUT_MODE | cs_en | clk; - - error = ti_ssp_load(hw->dev, 0, seqram, idx); - if (error < 0) - return error; - - error = ti_ssp_set_mode(hw->dev, ((mode & SPI_CPHA) ? - 0 : SSP_EARLY_DIN)); - if (error < 0) - return error; - - hw->bpw = bpw; - hw->mode = mode; - - return error; -} - -static void ti_ssp_spi_work(struct work_struct *work) -{ - struct ti_ssp_spi *hw = container_of(work, struct ti_ssp_spi, work); - - spin_lock(&hw->lock); - - while (!list_empty(&hw->msg_queue)) { - struct spi_message *m; - struct spi_device *spi; - struct spi_transfer *t = NULL; - int status = 0; - - m = container_of(hw->msg_queue.next, struct spi_message, - queue); - - list_del_init(&m->queue); - - spin_unlock(&hw->lock); - - spi = m->spi; - - if (hw->select) - hw->select(spi->chip_select); - - list_for_each_entry(t, &m->transfers, transfer_list) { - int bpw = spi->bits_per_word; - int xfer_status; - - if (t->bits_per_word) - bpw = t->bits_per_word; - - if (ti_ssp_spi_setup_transfer(hw, bpw, spi->mode) < 0) - break; - - ti_ssp_spi_chip_select(hw, 1); - - xfer_status = ti_ssp_spi_txrx(hw, m, t); - if (xfer_status < 0) - status = xfer_status; - - if (t->delay_usecs) - udelay(t->delay_usecs); - - if (t->cs_change) - ti_ssp_spi_chip_select(hw, 0); - } - - ti_ssp_spi_chip_select(hw, 0); - m->status = status; - m->complete(m->context); - - spin_lock(&hw->lock); - } - - if (hw->shutdown) - complete(&hw->complete); - - spin_unlock(&hw->lock); -} - -static int ti_ssp_spi_setup(struct spi_device *spi) -{ - if (spi->bits_per_word > 32) - return -EINVAL; - - return 0; -} - -static int ti_ssp_spi_transfer(struct spi_device *spi, struct spi_message *m) -{ - struct ti_ssp_spi *hw; - struct spi_transfer *t; - int error = 0; - - m->actual_length = 0; - m->status = -EINPROGRESS; - - hw = spi_master_get_devdata(spi->master); - - if (list_empty(&m->transfers) || !m->complete) - return -EINVAL; - - list_for_each_entry(t, &m->transfers, transfer_list) { - if (t->len && !(t->rx_buf || t->tx_buf)) { - dev_err(&spi->dev, "invalid xfer, no buffer\n"); - return -EINVAL; - } - - if (t->len && t->rx_buf && t->tx_buf) { - dev_err(&spi->dev, "invalid xfer, full duplex\n"); - return -EINVAL; - } - - if (t->bits_per_word > 32) { - dev_err(&spi->dev, "invalid xfer width %d\n", - t->bits_per_word); - return -EINVAL; - } - } - - spin_lock(&hw->lock); - if (hw->shutdown) { - error = -ESHUTDOWN; - goto error_unlock; - } - list_add_tail(&m->queue, &hw->msg_queue); - queue_work(hw->workqueue, &hw->work); -error_unlock: - spin_unlock(&hw->lock); - return error; -} - -static int __devinit ti_ssp_spi_probe(struct platform_device *pdev) -{ - const struct ti_ssp_spi_data *pdata; - struct ti_ssp_spi *hw; - struct spi_master *master; - struct device *dev = &pdev->dev; - int error = 0; - - pdata = dev->platform_data; - if (!pdata) { - dev_err(dev, "platform data not found\n"); - return -EINVAL; - } - - master = spi_alloc_master(dev, sizeof(struct ti_ssp_spi)); - if (!master) { - dev_err(dev, "cannot allocate SPI master\n"); - return -ENOMEM; - } - - hw = spi_master_get_devdata(master); - platform_set_drvdata(pdev, hw); - - hw->master = master; - hw->dev = dev; - hw->select = pdata->select; - - spin_lock_init(&hw->lock); - init_completion(&hw->complete); - INIT_LIST_HEAD(&hw->msg_queue); - INIT_WORK(&hw->work, ti_ssp_spi_work); - - hw->workqueue = create_singlethread_workqueue(dev_name(dev)); - if (!hw->workqueue) { - error = -ENOMEM; - dev_err(dev, "work queue creation failed\n"); - goto error_wq; - } - - error = ti_ssp_set_iosel(hw->dev, pdata->iosel); - if (error < 0) { - dev_err(dev, "io setup failed\n"); - goto error_iosel; - } - - master->bus_num = pdev->id; - master->num_chipselect = pdata->num_cs; - master->mode_bits = MODE_BITS; - master->flags = SPI_MASTER_HALF_DUPLEX; - master->setup = ti_ssp_spi_setup; - master->transfer = ti_ssp_spi_transfer; - - error = spi_register_master(master); - if (error) { - dev_err(dev, "master registration failed\n"); - goto error_reg; - } - - return 0; - -error_reg: -error_iosel: - destroy_workqueue(hw->workqueue); -error_wq: - spi_master_put(master); - return error; -} - -static int __devexit ti_ssp_spi_remove(struct platform_device *pdev) -{ - struct ti_ssp_spi *hw = platform_get_drvdata(pdev); - int error; - - hw->shutdown = 1; - while (!list_empty(&hw->msg_queue)) { - error = wait_for_completion_interruptible(&hw->complete); - if (error < 0) { - hw->shutdown = 0; - return error; - } - } - destroy_workqueue(hw->workqueue); - spi_unregister_master(hw->master); - - return 0; -} - -static struct platform_driver ti_ssp_spi_driver = { - .probe = ti_ssp_spi_probe, - .remove = __devexit_p(ti_ssp_spi_remove), - .driver = { - .name = "ti-ssp-spi", - .owner = THIS_MODULE, - }, -}; - -static int __init ti_ssp_spi_init(void) -{ - return platform_driver_register(&ti_ssp_spi_driver); -} -module_init(ti_ssp_spi_init); - -static void __exit ti_ssp_spi_exit(void) -{ - platform_driver_unregister(&ti_ssp_spi_driver); -} -module_exit(ti_ssp_spi_exit); - -MODULE_DESCRIPTION("SSP SPI Master"); -MODULE_AUTHOR("Cyril Chemparathy"); -MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:ti-ssp-spi"); diff --git a/drivers/spi/tle62x0.c b/drivers/spi/tle62x0.c deleted file mode 100644 index 32a4087..0000000 --- a/drivers/spi/tle62x0.c +++ /dev/null @@ -1,334 +0,0 @@ -/* - * tle62x0.c -- support Infineon TLE62x0 driver chips - * - * Copyright (c) 2007 Simtec Electronics - * Ben Dooks, - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include - -#include -#include - - -#define CMD_READ 0x00 -#define CMD_SET 0xff - -#define DIAG_NORMAL 0x03 -#define DIAG_OVERLOAD 0x02 -#define DIAG_OPEN 0x01 -#define DIAG_SHORTGND 0x00 - -struct tle62x0_state { - struct spi_device *us; - struct mutex lock; - unsigned int nr_gpio; - unsigned int gpio_state; - - unsigned char tx_buff[4]; - unsigned char rx_buff[4]; -}; - -static int to_gpio_num(struct device_attribute *attr); - -static inline int tle62x0_write(struct tle62x0_state *st) -{ - unsigned char *buff = st->tx_buff; - unsigned int gpio_state = st->gpio_state; - - buff[0] = CMD_SET; - - if (st->nr_gpio == 16) { - buff[1] = gpio_state >> 8; - buff[2] = gpio_state; - } else { - buff[1] = gpio_state; - } - - dev_dbg(&st->us->dev, "buff %02x,%02x,%02x\n", - buff[0], buff[1], buff[2]); - - return spi_write(st->us, buff, (st->nr_gpio == 16) ? 3 : 2); -} - -static inline int tle62x0_read(struct tle62x0_state *st) -{ - unsigned char *txbuff = st->tx_buff; - struct spi_transfer xfer = { - .tx_buf = txbuff, - .rx_buf = st->rx_buff, - .len = (st->nr_gpio * 2) / 8, - }; - struct spi_message msg; - - txbuff[0] = CMD_READ; - txbuff[1] = 0x00; - txbuff[2] = 0x00; - txbuff[3] = 0x00; - - spi_message_init(&msg); - spi_message_add_tail(&xfer, &msg); - - return spi_sync(st->us, &msg); -} - -static unsigned char *decode_fault(unsigned int fault_code) -{ - fault_code &= 3; - - switch (fault_code) { - case DIAG_NORMAL: - return "N"; - case DIAG_OVERLOAD: - return "V"; - case DIAG_OPEN: - return "O"; - case DIAG_SHORTGND: - return "G"; - } - - return "?"; -} - -static ssize_t tle62x0_status_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct tle62x0_state *st = dev_get_drvdata(dev); - char *bp = buf; - unsigned char *buff = st->rx_buff; - unsigned long fault = 0; - int ptr; - int ret; - - mutex_lock(&st->lock); - ret = tle62x0_read(st); - dev_dbg(dev, "tle62x0_read() returned %d\n", ret); - if (ret < 0) { - mutex_unlock(&st->lock); - return ret; - } - - for (ptr = 0; ptr < (st->nr_gpio * 2)/8; ptr += 1) { - fault <<= 8; - fault |= ((unsigned long)buff[ptr]); - - dev_dbg(dev, "byte %d is %02x\n", ptr, buff[ptr]); - } - - for (ptr = 0; ptr < st->nr_gpio; ptr++) { - bp += sprintf(bp, "%s ", decode_fault(fault >> (ptr * 2))); - } - - *bp++ = '\n'; - - mutex_unlock(&st->lock); - return bp - buf; -} - -static DEVICE_ATTR(status_show, S_IRUGO, tle62x0_status_show, NULL); - -static ssize_t tle62x0_gpio_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct tle62x0_state *st = dev_get_drvdata(dev); - int gpio_num = to_gpio_num(attr); - int value; - - mutex_lock(&st->lock); - value = (st->gpio_state >> gpio_num) & 1; - mutex_unlock(&st->lock); - - return snprintf(buf, PAGE_SIZE, "%d", value); -} - -static ssize_t tle62x0_gpio_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t len) -{ - struct tle62x0_state *st = dev_get_drvdata(dev); - int gpio_num = to_gpio_num(attr); - unsigned long val; - char *endp; - - val = simple_strtoul(buf, &endp, 0); - if (buf == endp) - return -EINVAL; - - dev_dbg(dev, "setting gpio %d to %ld\n", gpio_num, val); - - mutex_lock(&st->lock); - - if (val) - st->gpio_state |= 1 << gpio_num; - else - st->gpio_state &= ~(1 << gpio_num); - - tle62x0_write(st); - mutex_unlock(&st->lock); - - return len; -} - -static DEVICE_ATTR(gpio1, S_IWUSR|S_IRUGO, - tle62x0_gpio_show, tle62x0_gpio_store); -static DEVICE_ATTR(gpio2, S_IWUSR|S_IRUGO, - tle62x0_gpio_show, tle62x0_gpio_store); -static DEVICE_ATTR(gpio3, S_IWUSR|S_IRUGO, - tle62x0_gpio_show, tle62x0_gpio_store); -static DEVICE_ATTR(gpio4, S_IWUSR|S_IRUGO, - tle62x0_gpio_show, tle62x0_gpio_store); -static DEVICE_ATTR(gpio5, S_IWUSR|S_IRUGO, - tle62x0_gpio_show, tle62x0_gpio_store); -static DEVICE_ATTR(gpio6, S_IWUSR|S_IRUGO, - tle62x0_gpio_show, tle62x0_gpio_store); -static DEVICE_ATTR(gpio7, S_IWUSR|S_IRUGO, - tle62x0_gpio_show, tle62x0_gpio_store); -static DEVICE_ATTR(gpio8, S_IWUSR|S_IRUGO, - tle62x0_gpio_show, tle62x0_gpio_store); -static DEVICE_ATTR(gpio9, S_IWUSR|S_IRUGO, - tle62x0_gpio_show, tle62x0_gpio_store); -static DEVICE_ATTR(gpio10, S_IWUSR|S_IRUGO, - tle62x0_gpio_show, tle62x0_gpio_store); -static DEVICE_ATTR(gpio11, S_IWUSR|S_IRUGO, - tle62x0_gpio_show, tle62x0_gpio_store); -static DEVICE_ATTR(gpio12, S_IWUSR|S_IRUGO, - tle62x0_gpio_show, tle62x0_gpio_store); -static DEVICE_ATTR(gpio13, S_IWUSR|S_IRUGO, - tle62x0_gpio_show, tle62x0_gpio_store); -static DEVICE_ATTR(gpio14, S_IWUSR|S_IRUGO, - tle62x0_gpio_show, tle62x0_gpio_store); -static DEVICE_ATTR(gpio15, S_IWUSR|S_IRUGO, - tle62x0_gpio_show, tle62x0_gpio_store); -static DEVICE_ATTR(gpio16, S_IWUSR|S_IRUGO, - tle62x0_gpio_show, tle62x0_gpio_store); - -static struct device_attribute *gpio_attrs[] = { - [0] = &dev_attr_gpio1, - [1] = &dev_attr_gpio2, - [2] = &dev_attr_gpio3, - [3] = &dev_attr_gpio4, - [4] = &dev_attr_gpio5, - [5] = &dev_attr_gpio6, - [6] = &dev_attr_gpio7, - [7] = &dev_attr_gpio8, - [8] = &dev_attr_gpio9, - [9] = &dev_attr_gpio10, - [10] = &dev_attr_gpio11, - [11] = &dev_attr_gpio12, - [12] = &dev_attr_gpio13, - [13] = &dev_attr_gpio14, - [14] = &dev_attr_gpio15, - [15] = &dev_attr_gpio16 -}; - -static int to_gpio_num(struct device_attribute *attr) -{ - int ptr; - - for (ptr = 0; ptr < ARRAY_SIZE(gpio_attrs); ptr++) { - if (gpio_attrs[ptr] == attr) - return ptr; - } - - return -1; -} - -static int __devinit tle62x0_probe(struct spi_device *spi) -{ - struct tle62x0_state *st; - struct tle62x0_pdata *pdata; - int ptr; - int ret; - - pdata = spi->dev.platform_data; - if (pdata == NULL) { - dev_err(&spi->dev, "no device data specified\n"); - return -EINVAL; - } - - st = kzalloc(sizeof(struct tle62x0_state), GFP_KERNEL); - if (st == NULL) { - dev_err(&spi->dev, "no memory for device state\n"); - return -ENOMEM; - } - - st->us = spi; - st->nr_gpio = pdata->gpio_count; - st->gpio_state = pdata->init_state; - - mutex_init(&st->lock); - - ret = device_create_file(&spi->dev, &dev_attr_status_show); - if (ret) { - dev_err(&spi->dev, "cannot create status attribute\n"); - goto err_status; - } - - for (ptr = 0; ptr < pdata->gpio_count; ptr++) { - ret = device_create_file(&spi->dev, gpio_attrs[ptr]); - if (ret) { - dev_err(&spi->dev, "cannot create gpio attribute\n"); - goto err_gpios; - } - } - - /* tle62x0_write(st); */ - spi_set_drvdata(spi, st); - return 0; - - err_gpios: - while (--ptr >= 0) - device_remove_file(&spi->dev, gpio_attrs[ptr]); - - device_remove_file(&spi->dev, &dev_attr_status_show); - - err_status: - kfree(st); - return ret; -} - -static int __devexit tle62x0_remove(struct spi_device *spi) -{ - struct tle62x0_state *st = spi_get_drvdata(spi); - int ptr; - - for (ptr = 0; ptr < st->nr_gpio; ptr++) - device_remove_file(&spi->dev, gpio_attrs[ptr]); - - device_remove_file(&spi->dev, &dev_attr_status_show); - kfree(st); - return 0; -} - -static struct spi_driver tle62x0_driver = { - .driver = { - .name = "tle62x0", - .owner = THIS_MODULE, - }, - .probe = tle62x0_probe, - .remove = __devexit_p(tle62x0_remove), -}; - -static __init int tle62x0_init(void) -{ - return spi_register_driver(&tle62x0_driver); -} - -static __exit void tle62x0_exit(void) -{ - spi_unregister_driver(&tle62x0_driver); -} - -module_init(tle62x0_init); -module_exit(tle62x0_exit); - -MODULE_AUTHOR("Ben Dooks "); -MODULE_DESCRIPTION("TLE62x0 SPI driver"); -MODULE_LICENSE("GPL v2"); -MODULE_ALIAS("spi:tle62x0"); diff --git a/drivers/spi/xilinx_spi.c b/drivers/spi/xilinx_spi.c deleted file mode 100644 index 4d2c75d..0000000 --- a/drivers/spi/xilinx_spi.c +++ /dev/null @@ -1,556 +0,0 @@ -/* - * Xilinx SPI controller driver (master mode only) - * - * Author: MontaVista Software, Inc. - * source@mvista.com - * - * Copyright (c) 2010 Secret Lab Technologies, Ltd. - * Copyright (c) 2009 Intel Corporation - * 2002-2007 (c) MontaVista Software, Inc. - - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define XILINX_SPI_NAME "xilinx_spi" - -/* Register definitions as per "OPB Serial Peripheral Interface (SPI) (v1.00e) - * Product Specification", DS464 - */ -#define XSPI_CR_OFFSET 0x60 /* Control Register */ - -#define XSPI_CR_ENABLE 0x02 -#define XSPI_CR_MASTER_MODE 0x04 -#define XSPI_CR_CPOL 0x08 -#define XSPI_CR_CPHA 0x10 -#define XSPI_CR_MODE_MASK (XSPI_CR_CPHA | XSPI_CR_CPOL) -#define XSPI_CR_TXFIFO_RESET 0x20 -#define XSPI_CR_RXFIFO_RESET 0x40 -#define XSPI_CR_MANUAL_SSELECT 0x80 -#define XSPI_CR_TRANS_INHIBIT 0x100 -#define XSPI_CR_LSB_FIRST 0x200 - -#define XSPI_SR_OFFSET 0x64 /* Status Register */ - -#define XSPI_SR_RX_EMPTY_MASK 0x01 /* Receive FIFO is empty */ -#define XSPI_SR_RX_FULL_MASK 0x02 /* Receive FIFO is full */ -#define XSPI_SR_TX_EMPTY_MASK 0x04 /* Transmit FIFO is empty */ -#define XSPI_SR_TX_FULL_MASK 0x08 /* Transmit FIFO is full */ -#define XSPI_SR_MODE_FAULT_MASK 0x10 /* Mode fault error */ - -#define XSPI_TXD_OFFSET 0x68 /* Data Transmit Register */ -#define XSPI_RXD_OFFSET 0x6c /* Data Receive Register */ - -#define XSPI_SSR_OFFSET 0x70 /* 32-bit Slave Select Register */ - -/* Register definitions as per "OPB IPIF (v3.01c) Product Specification", DS414 - * IPIF registers are 32 bit - */ -#define XIPIF_V123B_DGIER_OFFSET 0x1c /* IPIF global int enable reg */ -#define XIPIF_V123B_GINTR_ENABLE 0x80000000 - -#define XIPIF_V123B_IISR_OFFSET 0x20 /* IPIF interrupt status reg */ -#define XIPIF_V123B_IIER_OFFSET 0x28 /* IPIF interrupt enable reg */ - -#define XSPI_INTR_MODE_FAULT 0x01 /* Mode fault error */ -#define XSPI_INTR_SLAVE_MODE_FAULT 0x02 /* Selected as slave while - * disabled */ -#define XSPI_INTR_TX_EMPTY 0x04 /* TxFIFO is empty */ -#define XSPI_INTR_TX_UNDERRUN 0x08 /* TxFIFO was underrun */ -#define XSPI_INTR_RX_FULL 0x10 /* RxFIFO is full */ -#define XSPI_INTR_RX_OVERRUN 0x20 /* RxFIFO was overrun */ -#define XSPI_INTR_TX_HALF_EMPTY 0x40 /* TxFIFO is half empty */ - -#define XIPIF_V123B_RESETR_OFFSET 0x40 /* IPIF reset register */ -#define XIPIF_V123B_RESET_MASK 0x0a /* the value to write */ - -struct xilinx_spi { - /* bitbang has to be first */ - struct spi_bitbang bitbang; - struct completion done; - struct resource mem; /* phys mem */ - void __iomem *regs; /* virt. address of the control registers */ - - u32 irq; - - u8 *rx_ptr; /* pointer in the Tx buffer */ - const u8 *tx_ptr; /* pointer in the Rx buffer */ - int remaining_bytes; /* the number of bytes left to transfer */ - u8 bits_per_word; - unsigned int (*read_fn) (void __iomem *); - void (*write_fn) (u32, void __iomem *); - void (*tx_fn) (struct xilinx_spi *); - void (*rx_fn) (struct xilinx_spi *); -}; - -static void xspi_write32(u32 val, void __iomem *addr) -{ - iowrite32(val, addr); -} - -static unsigned int xspi_read32(void __iomem *addr) -{ - return ioread32(addr); -} - -static void xspi_write32_be(u32 val, void __iomem *addr) -{ - iowrite32be(val, addr); -} - -static unsigned int xspi_read32_be(void __iomem *addr) -{ - return ioread32be(addr); -} - -static void xspi_tx8(struct xilinx_spi *xspi) -{ - xspi->write_fn(*xspi->tx_ptr, xspi->regs + XSPI_TXD_OFFSET); - xspi->tx_ptr++; -} - -static void xspi_tx16(struct xilinx_spi *xspi) -{ - xspi->write_fn(*(u16 *)(xspi->tx_ptr), xspi->regs + XSPI_TXD_OFFSET); - xspi->tx_ptr += 2; -} - -static void xspi_tx32(struct xilinx_spi *xspi) -{ - xspi->write_fn(*(u32 *)(xspi->tx_ptr), xspi->regs + XSPI_TXD_OFFSET); - xspi->tx_ptr += 4; -} - -static void xspi_rx8(struct xilinx_spi *xspi) -{ - u32 data = xspi->read_fn(xspi->regs + XSPI_RXD_OFFSET); - if (xspi->rx_ptr) { - *xspi->rx_ptr = data & 0xff; - xspi->rx_ptr++; - } -} - -static void xspi_rx16(struct xilinx_spi *xspi) -{ - u32 data = xspi->read_fn(xspi->regs + XSPI_RXD_OFFSET); - if (xspi->rx_ptr) { - *(u16 *)(xspi->rx_ptr) = data & 0xffff; - xspi->rx_ptr += 2; - } -} - -static void xspi_rx32(struct xilinx_spi *xspi) -{ - u32 data = xspi->read_fn(xspi->regs + XSPI_RXD_OFFSET); - if (xspi->rx_ptr) { - *(u32 *)(xspi->rx_ptr) = data; - xspi->rx_ptr += 4; - } -} - -static void xspi_init_hw(struct xilinx_spi *xspi) -{ - void __iomem *regs_base = xspi->regs; - - /* Reset the SPI device */ - xspi->write_fn(XIPIF_V123B_RESET_MASK, - regs_base + XIPIF_V123B_RESETR_OFFSET); - /* Disable all the interrupts just in case */ - xspi->write_fn(0, regs_base + XIPIF_V123B_IIER_OFFSET); - /* Enable the global IPIF interrupt */ - xspi->write_fn(XIPIF_V123B_GINTR_ENABLE, - regs_base + XIPIF_V123B_DGIER_OFFSET); - /* Deselect the slave on the SPI bus */ - xspi->write_fn(0xffff, regs_base + XSPI_SSR_OFFSET); - /* Disable the transmitter, enable Manual Slave Select Assertion, - * put SPI controller into master mode, and enable it */ - xspi->write_fn(XSPI_CR_TRANS_INHIBIT | XSPI_CR_MANUAL_SSELECT | - XSPI_CR_MASTER_MODE | XSPI_CR_ENABLE | XSPI_CR_TXFIFO_RESET | - XSPI_CR_RXFIFO_RESET, regs_base + XSPI_CR_OFFSET); -} - -static void xilinx_spi_chipselect(struct spi_device *spi, int is_on) -{ - struct xilinx_spi *xspi = spi_master_get_devdata(spi->master); - - if (is_on == BITBANG_CS_INACTIVE) { - /* Deselect the slave on the SPI bus */ - xspi->write_fn(0xffff, xspi->regs + XSPI_SSR_OFFSET); - } else if (is_on == BITBANG_CS_ACTIVE) { - /* Set the SPI clock phase and polarity */ - u16 cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET) - & ~XSPI_CR_MODE_MASK; - if (spi->mode & SPI_CPHA) - cr |= XSPI_CR_CPHA; - if (spi->mode & SPI_CPOL) - cr |= XSPI_CR_CPOL; - xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET); - - /* We do not check spi->max_speed_hz here as the SPI clock - * frequency is not software programmable (the IP block design - * parameter) - */ - - /* Activate the chip select */ - xspi->write_fn(~(0x0001 << spi->chip_select), - xspi->regs + XSPI_SSR_OFFSET); - } -} - -/* spi_bitbang requires custom setup_transfer() to be defined if there is a - * custom txrx_bufs(). We have nothing to setup here as the SPI IP block - * supports 8 or 16 bits per word which cannot be changed in software. - * SPI clock can't be changed in software either. - * Check for correct bits per word. Chip select delay calculations could be - * added here as soon as bitbang_work() can be made aware of the delay value. - */ -static int xilinx_spi_setup_transfer(struct spi_device *spi, - struct spi_transfer *t) -{ - struct xilinx_spi *xspi = spi_master_get_devdata(spi->master); - u8 bits_per_word; - - bits_per_word = (t && t->bits_per_word) - ? t->bits_per_word : spi->bits_per_word; - if (bits_per_word != xspi->bits_per_word) { - dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n", - __func__, bits_per_word); - return -EINVAL; - } - - return 0; -} - -static int xilinx_spi_setup(struct spi_device *spi) -{ - /* always return 0, we can not check the number of bits. - * There are cases when SPI setup is called before any driver is - * there, in that case the SPI core defaults to 8 bits, which we - * do not support in some cases. But if we return an error, the - * SPI device would not be registered and no driver can get hold of it - * When the driver is there, it will call SPI setup again with the - * correct number of bits per transfer. - * If a driver setups with the wrong bit number, it will fail when - * it tries to do a transfer - */ - return 0; -} - -static void xilinx_spi_fill_tx_fifo(struct xilinx_spi *xspi) -{ - u8 sr; - - /* Fill the Tx FIFO with as many bytes as possible */ - sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET); - while ((sr & XSPI_SR_TX_FULL_MASK) == 0 && xspi->remaining_bytes > 0) { - if (xspi->tx_ptr) - xspi->tx_fn(xspi); - else - xspi->write_fn(0, xspi->regs + XSPI_TXD_OFFSET); - xspi->remaining_bytes -= xspi->bits_per_word / 8; - sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET); - } -} - -static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t) -{ - struct xilinx_spi *xspi = spi_master_get_devdata(spi->master); - u32 ipif_ier; - u16 cr; - - /* We get here with transmitter inhibited */ - - xspi->tx_ptr = t->tx_buf; - xspi->rx_ptr = t->rx_buf; - xspi->remaining_bytes = t->len; - INIT_COMPLETION(xspi->done); - - xilinx_spi_fill_tx_fifo(xspi); - - /* Enable the transmit empty interrupt, which we use to determine - * progress on the transmission. - */ - ipif_ier = xspi->read_fn(xspi->regs + XIPIF_V123B_IIER_OFFSET); - xspi->write_fn(ipif_ier | XSPI_INTR_TX_EMPTY, - xspi->regs + XIPIF_V123B_IIER_OFFSET); - - /* Start the transfer by not inhibiting the transmitter any longer */ - cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET) & - ~XSPI_CR_TRANS_INHIBIT; - xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET); - - wait_for_completion(&xspi->done); - - /* Disable the transmit empty interrupt */ - xspi->write_fn(ipif_ier, xspi->regs + XIPIF_V123B_IIER_OFFSET); - - return t->len - xspi->remaining_bytes; -} - - -/* This driver supports single master mode only. Hence Tx FIFO Empty - * is the only interrupt we care about. - * Receive FIFO Overrun, Transmit FIFO Underrun, Mode Fault, and Slave Mode - * Fault are not to happen. - */ -static irqreturn_t xilinx_spi_irq(int irq, void *dev_id) -{ - struct xilinx_spi *xspi = dev_id; - u32 ipif_isr; - - /* Get the IPIF interrupts, and clear them immediately */ - ipif_isr = xspi->read_fn(xspi->regs + XIPIF_V123B_IISR_OFFSET); - xspi->write_fn(ipif_isr, xspi->regs + XIPIF_V123B_IISR_OFFSET); - - if (ipif_isr & XSPI_INTR_TX_EMPTY) { /* Transmission completed */ - u16 cr; - u8 sr; - - /* A transmit has just completed. Process received data and - * check for more data to transmit. Always inhibit the - * transmitter while the Isr refills the transmit register/FIFO, - * or make sure it is stopped if we're done. - */ - cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET); - xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT, - xspi->regs + XSPI_CR_OFFSET); - - /* Read out all the data from the Rx FIFO */ - sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET); - while ((sr & XSPI_SR_RX_EMPTY_MASK) == 0) { - xspi->rx_fn(xspi); - sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET); - } - - /* See if there is more data to send */ - if (xspi->remaining_bytes > 0) { - xilinx_spi_fill_tx_fifo(xspi); - /* Start the transfer by not inhibiting the - * transmitter any longer - */ - xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET); - } else { - /* No more data to send. - * Indicate the transfer is completed. - */ - complete(&xspi->done); - } - } - - return IRQ_HANDLED; -} - -static const struct of_device_id xilinx_spi_of_match[] = { - { .compatible = "xlnx,xps-spi-2.00.a", }, - { .compatible = "xlnx,xps-spi-2.00.b", }, - {} -}; -MODULE_DEVICE_TABLE(of, xilinx_spi_of_match); - -struct spi_master *xilinx_spi_init(struct device *dev, struct resource *mem, - u32 irq, s16 bus_num, int num_cs, int little_endian, int bits_per_word) -{ - struct spi_master *master; - struct xilinx_spi *xspi; - int ret; - - master = spi_alloc_master(dev, sizeof(struct xilinx_spi)); - if (!master) - return NULL; - - /* the spi->mode bits understood by this driver: */ - master->mode_bits = SPI_CPOL | SPI_CPHA; - - xspi = spi_master_get_devdata(master); - xspi->bitbang.master = spi_master_get(master); - xspi->bitbang.chipselect = xilinx_spi_chipselect; - xspi->bitbang.setup_transfer = xilinx_spi_setup_transfer; - xspi->bitbang.txrx_bufs = xilinx_spi_txrx_bufs; - xspi->bitbang.master->setup = xilinx_spi_setup; - init_completion(&xspi->done); - - if (!request_mem_region(mem->start, resource_size(mem), - XILINX_SPI_NAME)) - goto put_master; - - xspi->regs = ioremap(mem->start, resource_size(mem)); - if (xspi->regs == NULL) { - dev_warn(dev, "ioremap failure\n"); - goto map_failed; - } - - master->bus_num = bus_num; - master->num_chipselect = num_cs; - master->dev.of_node = dev->of_node; - - xspi->mem = *mem; - xspi->irq = irq; - if (little_endian) { - xspi->read_fn = xspi_read32; - xspi->write_fn = xspi_write32; - } else { - xspi->read_fn = xspi_read32_be; - xspi->write_fn = xspi_write32_be; - } - xspi->bits_per_word = bits_per_word; - if (xspi->bits_per_word == 8) { - xspi->tx_fn = xspi_tx8; - xspi->rx_fn = xspi_rx8; - } else if (xspi->bits_per_word == 16) { - xspi->tx_fn = xspi_tx16; - xspi->rx_fn = xspi_rx16; - } else if (xspi->bits_per_word == 32) { - xspi->tx_fn = xspi_tx32; - xspi->rx_fn = xspi_rx32; - } else - goto unmap_io; - - - /* SPI controller initializations */ - xspi_init_hw(xspi); - - /* Register for SPI Interrupt */ - ret = request_irq(xspi->irq, xilinx_spi_irq, 0, XILINX_SPI_NAME, xspi); - if (ret) - goto unmap_io; - - ret = spi_bitbang_start(&xspi->bitbang); - if (ret) { - dev_err(dev, "spi_bitbang_start FAILED\n"); - goto free_irq; - } - - dev_info(dev, "at 0x%08llX mapped to 0x%p, irq=%d\n", - (unsigned long long)mem->start, xspi->regs, xspi->irq); - return master; - -free_irq: - free_irq(xspi->irq, xspi); -unmap_io: - iounmap(xspi->regs); -map_failed: - release_mem_region(mem->start, resource_size(mem)); -put_master: - spi_master_put(master); - return NULL; -} -EXPORT_SYMBOL(xilinx_spi_init); - -void xilinx_spi_deinit(struct spi_master *master) -{ - struct xilinx_spi *xspi; - - xspi = spi_master_get_devdata(master); - - spi_bitbang_stop(&xspi->bitbang); - free_irq(xspi->irq, xspi); - iounmap(xspi->regs); - - release_mem_region(xspi->mem.start, resource_size(&xspi->mem)); - spi_master_put(xspi->bitbang.master); -} -EXPORT_SYMBOL(xilinx_spi_deinit); - -static int __devinit xilinx_spi_probe(struct platform_device *dev) -{ - struct xspi_platform_data *pdata; - struct resource *r; - int irq, num_cs = 0, little_endian = 0, bits_per_word = 8; - struct spi_master *master; - u8 i; - - pdata = dev->dev.platform_data; - if (pdata) { - num_cs = pdata->num_chipselect; - little_endian = pdata->little_endian; - bits_per_word = pdata->bits_per_word; - } - -#ifdef CONFIG_OF - if (dev->dev.of_node) { - const __be32 *prop; - int len; - - /* number of slave select bits is required */ - prop = of_get_property(dev->dev.of_node, "xlnx,num-ss-bits", - &len); - if (prop && len >= sizeof(*prop)) - num_cs = __be32_to_cpup(prop); - } -#endif - - if (!num_cs) { - dev_err(&dev->dev, "Missing slave select configuration data\n"); - return -EINVAL; - } - - - r = platform_get_resource(dev, IORESOURCE_MEM, 0); - if (!r) - return -ENODEV; - - irq = platform_get_irq(dev, 0); - if (irq < 0) - return -ENXIO; - - master = xilinx_spi_init(&dev->dev, r, irq, dev->id, num_cs, - little_endian, bits_per_word); - if (!master) - return -ENODEV; - - if (pdata) { - for (i = 0; i < pdata->num_devices; i++) - spi_new_device(master, pdata->devices + i); - } - - platform_set_drvdata(dev, master); - return 0; -} - -static int __devexit xilinx_spi_remove(struct platform_device *dev) -{ - xilinx_spi_deinit(platform_get_drvdata(dev)); - platform_set_drvdata(dev, 0); - - return 0; -} - -/* work with hotplug and coldplug */ -MODULE_ALIAS("platform:" XILINX_SPI_NAME); - -static struct platform_driver xilinx_spi_driver = { - .probe = xilinx_spi_probe, - .remove = __devexit_p(xilinx_spi_remove), - .driver = { - .name = XILINX_SPI_NAME, - .owner = THIS_MODULE, - .of_match_table = xilinx_spi_of_match, - }, -}; - -static int __init xilinx_spi_pltfm_init(void) -{ - return platform_driver_register(&xilinx_spi_driver); -} -module_init(xilinx_spi_pltfm_init); - -static void __exit xilinx_spi_pltfm_exit(void) -{ - platform_driver_unregister(&xilinx_spi_driver); -} -module_exit(xilinx_spi_pltfm_exit); - -MODULE_AUTHOR("MontaVista Software, Inc. "); -MODULE_DESCRIPTION("Xilinx SPI driver"); -MODULE_LICENSE("GPL"); -- cgit v0.10.2 From 760ee1c4aafac8fcaf3be5ff2b19c5485c5886e1 Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Sun, 29 May 2011 13:10:02 +0300 Subject: dmaengine: add ep93xx DMA support The ep93xx DMA controller has 10 independent memory to peripheral (M2P) channels, and 2 dedicated memory to memory (M2M) channels. M2M channels can also be used by SPI and IDE to perform DMA transfers to/from their memory mapped FIFOs. This driver supports both M2P and M2M channels with DMA_SLAVE, DMA_CYCLIC and DMA_MEMCPY (M2M only) capabilities. Signed-off-by: Mika Westerberg Signed-off-by: Ryan Mallon Acked-by: H Hartley Sweeten Acked-by: Vinod Koul Signed-off-by: Grant Likely diff --git a/arch/arm/mach-ep93xx/include/mach/dma.h b/arch/arm/mach-ep93xx/include/mach/dma.h index 5e31b2b..6e7049a 100644 --- a/arch/arm/mach-ep93xx/include/mach/dma.h +++ b/arch/arm/mach-ep93xx/include/mach/dma.h @@ -15,6 +15,8 @@ #include #include +#include +#include /** * struct ep93xx_dma_buffer - Information about a buffer to be transferred @@ -146,4 +148,89 @@ void ep93xx_dma_m2p_submit_recursive(struct ep93xx_dma_m2p_client *m2p, */ void ep93xx_dma_m2p_flush(struct ep93xx_dma_m2p_client *m2p); +/* + * M2P channels. + * + * Note that these values are also directly used for setting the PPALLOC + * register. + */ +#define EP93XX_DMA_I2S1 0 +#define EP93XX_DMA_I2S2 1 +#define EP93XX_DMA_AAC1 2 +#define EP93XX_DMA_AAC2 3 +#define EP93XX_DMA_AAC3 4 +#define EP93XX_DMA_I2S3 5 +#define EP93XX_DMA_UART1 6 +#define EP93XX_DMA_UART2 7 +#define EP93XX_DMA_UART3 8 +#define EP93XX_DMA_IRDA 9 +/* M2M channels */ +#define EP93XX_DMA_SSP 10 +#define EP93XX_DMA_IDE 11 + +/** + * struct ep93xx_dma_data - configuration data for the EP93xx dmaengine + * @port: peripheral which is requesting the channel + * @direction: TX/RX channel + * @name: optional name for the channel, this is displayed in /proc/interrupts + * + * This information is passed as private channel parameter in a filter + * function. Note that this is only needed for slave/cyclic channels. For + * memcpy channels %NULL data should be passed. + */ +struct ep93xx_dma_data { + int port; + enum dma_data_direction direction; + const char *name; +}; + +/** + * struct ep93xx_dma_chan_data - platform specific data for a DMA channel + * @name: name of the channel, used for getting the right clock for the channel + * @base: mapped registers + * @irq: interrupt number used by this channel + */ +struct ep93xx_dma_chan_data { + const char *name; + void __iomem *base; + int irq; +}; + +/** + * struct ep93xx_dma_platform_data - platform data for the dmaengine driver + * @channels: array of channels which are passed to the driver + * @num_channels: number of channels in the array + * + * This structure is passed to the DMA engine driver via platform data. For + * M2P channels, contract is that even channels are for TX and odd for RX. + * There is no requirement for the M2M channels. + */ +struct ep93xx_dma_platform_data { + struct ep93xx_dma_chan_data *channels; + size_t num_channels; +}; + +static inline bool ep93xx_dma_chan_is_m2p(struct dma_chan *chan) +{ + return !strcmp(dev_name(chan->device->dev), "ep93xx-dma-m2p"); +} + +/** + * ep93xx_dma_chan_direction - returns direction the channel can be used + * @chan: channel + * + * This function can be used in filter functions to find out whether the + * channel supports given DMA direction. Only M2P channels have such + * limitation, for M2M channels the direction is configurable. + */ +static inline enum dma_data_direction +ep93xx_dma_chan_direction(struct dma_chan *chan) +{ + if (!ep93xx_dma_chan_is_m2p(chan)) + return DMA_NONE; + + /* even channels are for TX, odd for RX */ + return (chan->chan_id % 2 == 0) ? DMA_TO_DEVICE : DMA_FROM_DEVICE; +} + #endif /* __ASM_ARCH_DMA_H */ diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 25cf327..2e3b3d3 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -237,6 +237,13 @@ config MXS_DMA Support the MXS DMA engine. This engine including APBH-DMA and APBX-DMA is integrated into Freescale i.MX23/28 chips. +config EP93XX_DMA + bool "Cirrus Logic EP93xx DMA support" + depends on ARCH_EP93XX + select DMA_ENGINE + help + Enable support for the Cirrus Logic EP93xx M2P/M2M DMA controller. + config DMA_ENGINE bool diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 836095a..30cf3b1 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -25,3 +25,4 @@ obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o obj-$(CONFIG_PL330_DMA) += pl330.o obj-$(CONFIG_PCH_DMA) += pch_dma.o obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o +obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c new file mode 100644 index 0000000..0766c1e --- /dev/null +++ b/drivers/dma/ep93xx_dma.c @@ -0,0 +1,1355 @@ +/* + * Driver for the Cirrus Logic EP93xx DMA Controller + * + * Copyright (C) 2011 Mika Westerberg + * + * DMA M2P implementation is based on the original + * arch/arm/mach-ep93xx/dma-m2p.c which has following copyrights: + * + * Copyright (C) 2006 Lennert Buytenhek + * Copyright (C) 2006 Applied Data Systems + * Copyright (C) 2009 Ryan Mallon + * + * This driver is based on dw_dmac and amba-pl08x drivers. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include + +#include + +/* M2P registers */ +#define M2P_CONTROL 0x0000 +#define M2P_CONTROL_STALLINT BIT(0) +#define M2P_CONTROL_NFBINT BIT(1) +#define M2P_CONTROL_CH_ERROR_INT BIT(3) +#define M2P_CONTROL_ENABLE BIT(4) +#define M2P_CONTROL_ICE BIT(6) + +#define M2P_INTERRUPT 0x0004 +#define M2P_INTERRUPT_STALL BIT(0) +#define M2P_INTERRUPT_NFB BIT(1) +#define M2P_INTERRUPT_ERROR BIT(3) + +#define M2P_PPALLOC 0x0008 +#define M2P_STATUS 0x000c + +#define M2P_MAXCNT0 0x0020 +#define M2P_BASE0 0x0024 +#define M2P_MAXCNT1 0x0030 +#define M2P_BASE1 0x0034 + +#define M2P_STATE_IDLE 0 +#define M2P_STATE_STALL 1 +#define M2P_STATE_ON 2 +#define M2P_STATE_NEXT 3 + +/* M2M registers */ +#define M2M_CONTROL 0x0000 +#define M2M_CONTROL_DONEINT BIT(2) +#define M2M_CONTROL_ENABLE BIT(3) +#define M2M_CONTROL_START BIT(4) +#define M2M_CONTROL_DAH BIT(11) +#define M2M_CONTROL_SAH BIT(12) +#define M2M_CONTROL_PW_SHIFT 9 +#define M2M_CONTROL_PW_8 (0 << M2M_CONTROL_PW_SHIFT) +#define M2M_CONTROL_PW_16 (1 << M2M_CONTROL_PW_SHIFT) +#define M2M_CONTROL_PW_32 (2 << M2M_CONTROL_PW_SHIFT) +#define M2M_CONTROL_PW_MASK (3 << M2M_CONTROL_PW_SHIFT) +#define M2M_CONTROL_TM_SHIFT 13 +#define M2M_CONTROL_TM_TX (1 << M2M_CONTROL_TM_SHIFT) +#define M2M_CONTROL_TM_RX (2 << M2M_CONTROL_TM_SHIFT) +#define M2M_CONTROL_RSS_SHIFT 22 +#define M2M_CONTROL_RSS_SSPRX (1 << M2M_CONTROL_RSS_SHIFT) +#define M2M_CONTROL_RSS_SSPTX (2 << M2M_CONTROL_RSS_SHIFT) +#define M2M_CONTROL_RSS_IDE (3 << M2M_CONTROL_RSS_SHIFT) +#define M2M_CONTROL_NO_HDSK BIT(24) +#define M2M_CONTROL_PWSC_SHIFT 25 + +#define M2M_INTERRUPT 0x0004 +#define M2M_INTERRUPT_DONEINT BIT(1) + +#define M2M_BCR0 0x0010 +#define M2M_BCR1 0x0014 +#define M2M_SAR_BASE0 0x0018 +#define M2M_SAR_BASE1 0x001c +#define M2M_DAR_BASE0 0x002c +#define M2M_DAR_BASE1 0x0030 + +#define DMA_MAX_CHAN_BYTES 0xffff +#define DMA_MAX_CHAN_DESCRIPTORS 32 + +struct ep93xx_dma_engine; + +/** + * struct ep93xx_dma_desc - EP93xx specific transaction descriptor + * @src_addr: source address of the transaction + * @dst_addr: destination address of the transaction + * @size: size of the transaction (in bytes) + * @complete: this descriptor is completed + * @txd: dmaengine API descriptor + * @tx_list: list of linked descriptors + * @node: link used for putting this into a channel queue + */ +struct ep93xx_dma_desc { + u32 src_addr; + u32 dst_addr; + size_t size; + bool complete; + struct dma_async_tx_descriptor txd; + struct list_head tx_list; + struct list_head node; +}; + +/** + * struct ep93xx_dma_chan - an EP93xx DMA M2P/M2M channel + * @chan: dmaengine API channel + * @edma: pointer to to the engine device + * @regs: memory mapped registers + * @irq: interrupt number of the channel + * @clk: clock used by this channel + * @tasklet: channel specific tasklet used for callbacks + * @lock: lock protecting the fields following + * @flags: flags for the channel + * @buffer: which buffer to use next (0/1) + * @last_completed: last completed cookie value + * @active: flattened chain of descriptors currently being processed + * @queue: pending descriptors which are handled next + * @free_list: list of free descriptors which can be used + * @runtime_addr: physical address currently used as dest/src (M2M only). This + * is set via %DMA_SLAVE_CONFIG before slave operation is + * prepared + * @runtime_ctrl: M2M runtime values for the control register. + * + * As EP93xx DMA controller doesn't support real chained DMA descriptors we + * will have slightly different scheme here: @active points to a head of + * flattened DMA descriptor chain. + * + * @queue holds pending transactions. These are linked through the first + * descriptor in the chain. When a descriptor is moved to the @active queue, + * the first and chained descriptors are flattened into a single list. + * + * @chan.private holds pointer to &struct ep93xx_dma_data which contains + * necessary channel configuration information. For memcpy channels this must + * be %NULL. + */ +struct ep93xx_dma_chan { + struct dma_chan chan; + const struct ep93xx_dma_engine *edma; + void __iomem *regs; + int irq; + struct clk *clk; + struct tasklet_struct tasklet; + /* protects the fields following */ + spinlock_t lock; + unsigned long flags; +/* Channel is configured for cyclic transfers */ +#define EP93XX_DMA_IS_CYCLIC 0 + + int buffer; + dma_cookie_t last_completed; + struct list_head active; + struct list_head queue; + struct list_head free_list; + u32 runtime_addr; + u32 runtime_ctrl; +}; + +/** + * struct ep93xx_dma_engine - the EP93xx DMA engine instance + * @dma_dev: holds the dmaengine device + * @m2m: is this an M2M or M2P device + * @hw_setup: method which sets the channel up for operation + * @hw_shutdown: shuts the channel down and flushes whatever is left + * @hw_submit: pushes active descriptor(s) to the hardware + * @hw_interrupt: handle the interrupt + * @num_channels: number of channels for this instance + * @channels: array of channels + * + * There is one instance of this struct for the M2P channels and one for the + * M2M channels. hw_xxx() methods are used to perform operations which are + * different on M2M and M2P channels. These methods are called with channel + * lock held and interrupts disabled so they cannot sleep. + */ +struct ep93xx_dma_engine { + struct dma_device dma_dev; + bool m2m; + int (*hw_setup)(struct ep93xx_dma_chan *); + void (*hw_shutdown)(struct ep93xx_dma_chan *); + void (*hw_submit)(struct ep93xx_dma_chan *); + int (*hw_interrupt)(struct ep93xx_dma_chan *); +#define INTERRUPT_UNKNOWN 0 +#define INTERRUPT_DONE 1 +#define INTERRUPT_NEXT_BUFFER 2 + + size_t num_channels; + struct ep93xx_dma_chan channels[]; +}; + +static inline struct device *chan2dev(struct ep93xx_dma_chan *edmac) +{ + return &edmac->chan.dev->device; +} + +static struct ep93xx_dma_chan *to_ep93xx_dma_chan(struct dma_chan *chan) +{ + return container_of(chan, struct ep93xx_dma_chan, chan); +} + +/** + * ep93xx_dma_set_active - set new active descriptor chain + * @edmac: channel + * @desc: head of the new active descriptor chain + * + * Sets @desc to be the head of the new active descriptor chain. This is the + * chain which is processed next. The active list must be empty before calling + * this function. + * + * Called with @edmac->lock held and interrupts disabled. + */ +static void ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac, + struct ep93xx_dma_desc *desc) +{ + BUG_ON(!list_empty(&edmac->active)); + + list_add_tail(&desc->node, &edmac->active); + + /* Flatten the @desc->tx_list chain into @edmac->active list */ + while (!list_empty(&desc->tx_list)) { + struct ep93xx_dma_desc *d = list_first_entry(&desc->tx_list, + struct ep93xx_dma_desc, node); + + /* + * We copy the callback parameters from the first descriptor + * to all the chained descriptors. This way we can call the + * callback without having to find out the first descriptor in + * the chain. Useful for cyclic transfers. + */ + d->txd.callback = desc->txd.callback; + d->txd.callback_param = desc->txd.callback_param; + + list_move_tail(&d->node, &edmac->active); + } +} + +/* Called with @edmac->lock held and interrupts disabled */ +static struct ep93xx_dma_desc * +ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac) +{ + return list_first_entry(&edmac->active, struct ep93xx_dma_desc, node); +} + +/** + * ep93xx_dma_advance_active - advances to the next active descriptor + * @edmac: channel + * + * Function advances active descriptor to the next in the @edmac->active and + * returns %true if we still have descriptors in the chain to process. + * Otherwise returns %false. + * + * When the channel is in cyclic mode always returns %true. + * + * Called with @edmac->lock held and interrupts disabled. + */ +static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac) +{ + list_rotate_left(&edmac->active); + + if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) + return true; + + /* + * If txd.cookie is set it means that we are back in the first + * descriptor in the chain and hence done with it. + */ + return !ep93xx_dma_get_active(edmac)->txd.cookie; +} + +/* + * M2P DMA implementation + */ + +static void m2p_set_control(struct ep93xx_dma_chan *edmac, u32 control) +{ + writel(control, edmac->regs + M2P_CONTROL); + /* + * EP93xx User's Guide states that we must perform a dummy read after + * write to the control register. + */ + readl(edmac->regs + M2P_CONTROL); +} + +static int m2p_hw_setup(struct ep93xx_dma_chan *edmac) +{ + struct ep93xx_dma_data *data = edmac->chan.private; + u32 control; + + writel(data->port & 0xf, edmac->regs + M2P_PPALLOC); + + control = M2P_CONTROL_CH_ERROR_INT | M2P_CONTROL_ICE + | M2P_CONTROL_ENABLE; + m2p_set_control(edmac, control); + + return 0; +} + +static inline u32 m2p_channel_state(struct ep93xx_dma_chan *edmac) +{ + return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3; +} + +static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac) +{ + u32 control; + + control = readl(edmac->regs + M2P_CONTROL); + control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT); + m2p_set_control(edmac, control); + + while (m2p_channel_state(edmac) >= M2P_STATE_ON) + cpu_relax(); + + m2p_set_control(edmac, 0); + + while (m2p_channel_state(edmac) == M2P_STATE_STALL) + cpu_relax(); +} + +static void m2p_fill_desc(struct ep93xx_dma_chan *edmac) +{ + struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac); + u32 bus_addr; + + if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_TO_DEVICE) + bus_addr = desc->src_addr; + else + bus_addr = desc->dst_addr; + + if (edmac->buffer == 0) { + writel(desc->size, edmac->regs + M2P_MAXCNT0); + writel(bus_addr, edmac->regs + M2P_BASE0); + } else { + writel(desc->size, edmac->regs + M2P_MAXCNT1); + writel(bus_addr, edmac->regs + M2P_BASE1); + } + + edmac->buffer ^= 1; +} + +static void m2p_hw_submit(struct ep93xx_dma_chan *edmac) +{ + u32 control = readl(edmac->regs + M2P_CONTROL); + + m2p_fill_desc(edmac); + control |= M2P_CONTROL_STALLINT; + + if (ep93xx_dma_advance_active(edmac)) { + m2p_fill_desc(edmac); + control |= M2P_CONTROL_NFBINT; + } + + m2p_set_control(edmac, control); +} + +static int m2p_hw_interrupt(struct ep93xx_dma_chan *edmac) +{ + u32 irq_status = readl(edmac->regs + M2P_INTERRUPT); + u32 control; + + if (irq_status & M2P_INTERRUPT_ERROR) { + struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac); + + /* Clear the error interrupt */ + writel(1, edmac->regs + M2P_INTERRUPT); + + /* + * It seems that there is no easy way of reporting errors back + * to client so we just report the error here and continue as + * usual. + * + * Revisit this when there is a mechanism to report back the + * errors. + */ + dev_err(chan2dev(edmac), + "DMA transfer failed! Details:\n" + "\tcookie : %d\n" + "\tsrc_addr : 0x%08x\n" + "\tdst_addr : 0x%08x\n" + "\tsize : %zu\n", + desc->txd.cookie, desc->src_addr, desc->dst_addr, + desc->size); + } + + switch (irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)) { + case M2P_INTERRUPT_STALL: + /* Disable interrupts */ + control = readl(edmac->regs + M2P_CONTROL); + control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT); + m2p_set_control(edmac, control); + + return INTERRUPT_DONE; + + case M2P_INTERRUPT_NFB: + if (ep93xx_dma_advance_active(edmac)) + m2p_fill_desc(edmac); + + return INTERRUPT_NEXT_BUFFER; + } + + return INTERRUPT_UNKNOWN; +} + +/* + * M2M DMA implementation + * + * For the M2M transfers we don't use NFB at all. This is because it simply + * doesn't work well with memcpy transfers. When you submit both buffers it is + * extremely unlikely that you get an NFB interrupt, but it instead reports + * DONE interrupt and both buffers are already transferred which means that we + * weren't able to update the next buffer. + * + * So for now we "simulate" NFB by just submitting buffer after buffer + * without double buffering. + */ + +static int m2m_hw_setup(struct ep93xx_dma_chan *edmac) +{ + const struct ep93xx_dma_data *data = edmac->chan.private; + u32 control = 0; + + if (!data) { + /* This is memcpy channel, nothing to configure */ + writel(control, edmac->regs + M2M_CONTROL); + return 0; + } + + switch (data->port) { + case EP93XX_DMA_SSP: + /* + * This was found via experimenting - anything less than 5 + * causes the channel to perform only a partial transfer which + * leads to problems since we don't get DONE interrupt then. + */ + control = (5 << M2M_CONTROL_PWSC_SHIFT); + control |= M2M_CONTROL_NO_HDSK; + + if (data->direction == DMA_TO_DEVICE) { + control |= M2M_CONTROL_DAH; + control |= M2M_CONTROL_TM_TX; + control |= M2M_CONTROL_RSS_SSPTX; + } else { + control |= M2M_CONTROL_SAH; + control |= M2M_CONTROL_TM_RX; + control |= M2M_CONTROL_RSS_SSPRX; + } + break; + + case EP93XX_DMA_IDE: + /* + * This IDE part is totally untested. Values below are taken + * from the EP93xx Users's Guide and might not be correct. + */ + control |= M2M_CONTROL_NO_HDSK; + control |= M2M_CONTROL_RSS_IDE; + control |= M2M_CONTROL_PW_16; + + if (data->direction == DMA_TO_DEVICE) { + /* Worst case from the UG */ + control = (3 << M2M_CONTROL_PWSC_SHIFT); + control |= M2M_CONTROL_DAH; + control |= M2M_CONTROL_TM_TX; + } else { + control = (2 << M2M_CONTROL_PWSC_SHIFT); + control |= M2M_CONTROL_SAH; + control |= M2M_CONTROL_TM_RX; + } + break; + + default: + return -EINVAL; + } + + writel(control, edmac->regs + M2M_CONTROL); + return 0; +} + +static void m2m_hw_shutdown(struct ep93xx_dma_chan *edmac) +{ + /* Just disable the channel */ + writel(0, edmac->regs + M2M_CONTROL); +} + +static void m2m_fill_desc(struct ep93xx_dma_chan *edmac) +{ + struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac); + + if (edmac->buffer == 0) { + writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0); + writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE0); + writel(desc->size, edmac->regs + M2M_BCR0); + } else { + writel(desc->src_addr, edmac->regs + M2M_SAR_BASE1); + writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE1); + writel(desc->size, edmac->regs + M2M_BCR1); + } + + edmac->buffer ^= 1; +} + +static void m2m_hw_submit(struct ep93xx_dma_chan *edmac) +{ + struct ep93xx_dma_data *data = edmac->chan.private; + u32 control = readl(edmac->regs + M2M_CONTROL); + + /* + * Since we allow clients to configure PW (peripheral width) we always + * clear PW bits here and then set them according what is given in + * the runtime configuration. + */ + control &= ~M2M_CONTROL_PW_MASK; + control |= edmac->runtime_ctrl; + + m2m_fill_desc(edmac); + control |= M2M_CONTROL_DONEINT; + + /* + * Now we can finally enable the channel. For M2M channel this must be + * done _after_ the BCRx registers are programmed. + */ + control |= M2M_CONTROL_ENABLE; + writel(control, edmac->regs + M2M_CONTROL); + + if (!data) { + /* + * For memcpy channels the software trigger must be asserted + * in order to start the memcpy operation. + */ + control |= M2M_CONTROL_START; + writel(control, edmac->regs + M2M_CONTROL); + } +} + +static int m2m_hw_interrupt(struct ep93xx_dma_chan *edmac) +{ + u32 control; + + if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_DONEINT)) + return INTERRUPT_UNKNOWN; + + /* Clear the DONE bit */ + writel(0, edmac->regs + M2M_INTERRUPT); + + /* Disable interrupts and the channel */ + control = readl(edmac->regs + M2M_CONTROL); + control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_ENABLE); + writel(control, edmac->regs + M2M_CONTROL); + + /* + * Since we only get DONE interrupt we have to find out ourselves + * whether there still is something to process. So we try to advance + * the chain an see whether it succeeds. + */ + if (ep93xx_dma_advance_active(edmac)) { + edmac->edma->hw_submit(edmac); + return INTERRUPT_NEXT_BUFFER; + } + + return INTERRUPT_DONE; +} + +/* + * DMA engine API implementation + */ + +static struct ep93xx_dma_desc * +ep93xx_dma_desc_get(struct ep93xx_dma_chan *edmac) +{ + struct ep93xx_dma_desc *desc, *_desc; + struct ep93xx_dma_desc *ret = NULL; + unsigned long flags; + + spin_lock_irqsave(&edmac->lock, flags); + list_for_each_entry_safe(desc, _desc, &edmac->free_list, node) { + if (async_tx_test_ack(&desc->txd)) { + list_del_init(&desc->node); + + /* Re-initialize the descriptor */ + desc->src_addr = 0; + desc->dst_addr = 0; + desc->size = 0; + desc->complete = false; + desc->txd.cookie = 0; + desc->txd.callback = NULL; + desc->txd.callback_param = NULL; + + ret = desc; + break; + } + } + spin_unlock_irqrestore(&edmac->lock, flags); + return ret; +} + +static void ep93xx_dma_desc_put(struct ep93xx_dma_chan *edmac, + struct ep93xx_dma_desc *desc) +{ + if (desc) { + unsigned long flags; + + spin_lock_irqsave(&edmac->lock, flags); + list_splice_init(&desc->tx_list, &edmac->free_list); + list_add(&desc->node, &edmac->free_list); + spin_unlock_irqrestore(&edmac->lock, flags); + } +} + +/** + * ep93xx_dma_advance_work - start processing the next pending transaction + * @edmac: channel + * + * If we have pending transactions queued and we are currently idling, this + * function takes the next queued transaction from the @edmac->queue and + * pushes it to the hardware for execution. + */ +static void ep93xx_dma_advance_work(struct ep93xx_dma_chan *edmac) +{ + struct ep93xx_dma_desc *new; + unsigned long flags; + + spin_lock_irqsave(&edmac->lock, flags); + if (!list_empty(&edmac->active) || list_empty(&edmac->queue)) { + spin_unlock_irqrestore(&edmac->lock, flags); + return; + } + + /* Take the next descriptor from the pending queue */ + new = list_first_entry(&edmac->queue, struct ep93xx_dma_desc, node); + list_del_init(&new->node); + + ep93xx_dma_set_active(edmac, new); + + /* Push it to the hardware */ + edmac->edma->hw_submit(edmac); + spin_unlock_irqrestore(&edmac->lock, flags); +} + +static void ep93xx_dma_unmap_buffers(struct ep93xx_dma_desc *desc) +{ + struct device *dev = desc->txd.chan->device->dev; + + if (!(desc->txd.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { + if (desc->txd.flags & DMA_COMPL_SRC_UNMAP_SINGLE) + dma_unmap_single(dev, desc->src_addr, desc->size, + DMA_TO_DEVICE); + else + dma_unmap_page(dev, desc->src_addr, desc->size, + DMA_TO_DEVICE); + } + if (!(desc->txd.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { + if (desc->txd.flags & DMA_COMPL_DEST_UNMAP_SINGLE) + dma_unmap_single(dev, desc->dst_addr, desc->size, + DMA_FROM_DEVICE); + else + dma_unmap_page(dev, desc->dst_addr, desc->size, + DMA_FROM_DEVICE); + } +} + +static void ep93xx_dma_tasklet(unsigned long data) +{ + struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data; + struct ep93xx_dma_desc *desc, *d; + dma_async_tx_callback callback; + void *callback_param; + LIST_HEAD(list); + + spin_lock_irq(&edmac->lock); + desc = ep93xx_dma_get_active(edmac); + if (desc->complete) { + edmac->last_completed = desc->txd.cookie; + list_splice_init(&edmac->active, &list); + } + spin_unlock_irq(&edmac->lock); + + /* Pick up the next descriptor from the queue */ + ep93xx_dma_advance_work(edmac); + + callback = desc->txd.callback; + callback_param = desc->txd.callback_param; + + /* Now we can release all the chained descriptors */ + list_for_each_entry_safe(desc, d, &list, node) { + /* + * For the memcpy channels the API requires us to unmap the + * buffers unless requested otherwise. + */ + if (!edmac->chan.private) + ep93xx_dma_unmap_buffers(desc); + + ep93xx_dma_desc_put(edmac, desc); + } + + if (callback) + callback(callback_param); +} + +static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id) +{ + struct ep93xx_dma_chan *edmac = dev_id; + irqreturn_t ret = IRQ_HANDLED; + + spin_lock(&edmac->lock); + + switch (edmac->edma->hw_interrupt(edmac)) { + case INTERRUPT_DONE: + ep93xx_dma_get_active(edmac)->complete = true; + tasklet_schedule(&edmac->tasklet); + break; + + case INTERRUPT_NEXT_BUFFER: + if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) + tasklet_schedule(&edmac->tasklet); + break; + + default: + dev_warn(chan2dev(edmac), "unknown interrupt!\n"); + ret = IRQ_NONE; + break; + } + + spin_unlock(&edmac->lock); + return ret; +} + +/** + * ep93xx_dma_tx_submit - set the prepared descriptor(s) to be executed + * @tx: descriptor to be executed + * + * Function will execute given descriptor on the hardware or if the hardware + * is busy, queue the descriptor to be executed later on. Returns cookie which + * can be used to poll the status of the descriptor. + */ +static dma_cookie_t ep93xx_dma_tx_submit(struct dma_async_tx_descriptor *tx) +{ + struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(tx->chan); + struct ep93xx_dma_desc *desc; + dma_cookie_t cookie; + unsigned long flags; + + spin_lock_irqsave(&edmac->lock, flags); + + cookie = edmac->chan.cookie; + + if (++cookie < 0) + cookie = 1; + + desc = container_of(tx, struct ep93xx_dma_desc, txd); + + edmac->chan.cookie = cookie; + desc->txd.cookie = cookie; + + /* + * If nothing is currently prosessed, we push this descriptor + * directly to the hardware. Otherwise we put the descriptor + * to the pending queue. + */ + if (list_empty(&edmac->active)) { + ep93xx_dma_set_active(edmac, desc); + edmac->edma->hw_submit(edmac); + } else { + list_add_tail(&desc->node, &edmac->queue); + } + + spin_unlock_irqrestore(&edmac->lock, flags); + return cookie; +} + +/** + * ep93xx_dma_alloc_chan_resources - allocate resources for the channel + * @chan: channel to allocate resources + * + * Function allocates necessary resources for the given DMA channel and + * returns number of allocated descriptors for the channel. Negative errno + * is returned in case of failure. + */ +static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan) +{ + struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); + struct ep93xx_dma_data *data = chan->private; + const char *name = dma_chan_name(chan); + int ret, i; + + /* Sanity check the channel parameters */ + if (!edmac->edma->m2m) { + if (!data) + return -EINVAL; + if (data->port < EP93XX_DMA_I2S1 || + data->port > EP93XX_DMA_IRDA) + return -EINVAL; + if (data->direction != ep93xx_dma_chan_direction(chan)) + return -EINVAL; + } else { + if (data) { + switch (data->port) { + case EP93XX_DMA_SSP: + case EP93XX_DMA_IDE: + if (data->direction != DMA_TO_DEVICE && + data->direction != DMA_FROM_DEVICE) + return -EINVAL; + break; + default: + return -EINVAL; + } + } + } + + if (data && data->name) + name = data->name; + + ret = clk_enable(edmac->clk); + if (ret) + return ret; + + ret = request_irq(edmac->irq, ep93xx_dma_interrupt, 0, name, edmac); + if (ret) + goto fail_clk_disable; + + spin_lock_irq(&edmac->lock); + edmac->last_completed = 1; + edmac->chan.cookie = 1; + ret = edmac->edma->hw_setup(edmac); + spin_unlock_irq(&edmac->lock); + + if (ret) + goto fail_free_irq; + + for (i = 0; i < DMA_MAX_CHAN_DESCRIPTORS; i++) { + struct ep93xx_dma_desc *desc; + + desc = kzalloc(sizeof(*desc), GFP_KERNEL); + if (!desc) { + dev_warn(chan2dev(edmac), "not enough descriptors\n"); + break; + } + + INIT_LIST_HEAD(&desc->tx_list); + + dma_async_tx_descriptor_init(&desc->txd, chan); + desc->txd.flags = DMA_CTRL_ACK; + desc->txd.tx_submit = ep93xx_dma_tx_submit; + + ep93xx_dma_desc_put(edmac, desc); + } + + return i; + +fail_free_irq: + free_irq(edmac->irq, edmac); +fail_clk_disable: + clk_disable(edmac->clk); + + return ret; +} + +/** + * ep93xx_dma_free_chan_resources - release resources for the channel + * @chan: channel + * + * Function releases all the resources allocated for the given channel. + * The channel must be idle when this is called. + */ +static void ep93xx_dma_free_chan_resources(struct dma_chan *chan) +{ + struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); + struct ep93xx_dma_desc *desc, *d; + unsigned long flags; + LIST_HEAD(list); + + BUG_ON(!list_empty(&edmac->active)); + BUG_ON(!list_empty(&edmac->queue)); + + spin_lock_irqsave(&edmac->lock, flags); + edmac->edma->hw_shutdown(edmac); + edmac->runtime_addr = 0; + edmac->runtime_ctrl = 0; + edmac->buffer = 0; + list_splice_init(&edmac->free_list, &list); + spin_unlock_irqrestore(&edmac->lock, flags); + + list_for_each_entry_safe(desc, d, &list, node) + kfree(desc); + + clk_disable(edmac->clk); + free_irq(edmac->irq, edmac); +} + +/** + * ep93xx_dma_prep_dma_memcpy - prepare a memcpy DMA operation + * @chan: channel + * @dest: destination bus address + * @src: source bus address + * @len: size of the transaction + * @flags: flags for the descriptor + * + * Returns a valid DMA descriptor or %NULL in case of failure. + */ +struct dma_async_tx_descriptor * +ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, + dma_addr_t src, size_t len, unsigned long flags) +{ + struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); + struct ep93xx_dma_desc *desc, *first; + size_t bytes, offset; + + first = NULL; + for (offset = 0; offset < len; offset += bytes) { + desc = ep93xx_dma_desc_get(edmac); + if (!desc) { + dev_warn(chan2dev(edmac), "couln't get descriptor\n"); + goto fail; + } + + bytes = min_t(size_t, len - offset, DMA_MAX_CHAN_BYTES); + + desc->src_addr = src + offset; + desc->dst_addr = dest + offset; + desc->size = bytes; + + if (!first) + first = desc; + else + list_add_tail(&desc->node, &first->tx_list); + } + + first->txd.cookie = -EBUSY; + first->txd.flags = flags; + + return &first->txd; +fail: + ep93xx_dma_desc_put(edmac, first); + return NULL; +} + +/** + * ep93xx_dma_prep_slave_sg - prepare a slave DMA operation + * @chan: channel + * @sgl: list of buffers to transfer + * @sg_len: number of entries in @sgl + * @dir: direction of tha DMA transfer + * @flags: flags for the descriptor + * + * Returns a valid DMA descriptor or %NULL in case of failure. + */ +static struct dma_async_tx_descriptor * +ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, + unsigned int sg_len, enum dma_data_direction dir, + unsigned long flags) +{ + struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); + struct ep93xx_dma_desc *desc, *first; + struct scatterlist *sg; + int i; + + if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) { + dev_warn(chan2dev(edmac), + "channel was configured with different direction\n"); + return NULL; + } + + if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) { + dev_warn(chan2dev(edmac), + "channel is already used for cyclic transfers\n"); + return NULL; + } + + first = NULL; + for_each_sg(sgl, sg, sg_len, i) { + size_t sg_len = sg_dma_len(sg); + + if (sg_len > DMA_MAX_CHAN_BYTES) { + dev_warn(chan2dev(edmac), "too big transfer size %d\n", + sg_len); + goto fail; + } + + desc = ep93xx_dma_desc_get(edmac); + if (!desc) { + dev_warn(chan2dev(edmac), "couln't get descriptor\n"); + goto fail; + } + + if (dir == DMA_TO_DEVICE) { + desc->src_addr = sg_dma_address(sg); + desc->dst_addr = edmac->runtime_addr; + } else { + desc->src_addr = edmac->runtime_addr; + desc->dst_addr = sg_dma_address(sg); + } + desc->size = sg_len; + + if (!first) + first = desc; + else + list_add_tail(&desc->node, &first->tx_list); + } + + first->txd.cookie = -EBUSY; + first->txd.flags = flags; + + return &first->txd; + +fail: + ep93xx_dma_desc_put(edmac, first); + return NULL; +} + +/** + * ep93xx_dma_prep_dma_cyclic - prepare a cyclic DMA operation + * @chan: channel + * @dma_addr: DMA mapped address of the buffer + * @buf_len: length of the buffer (in bytes) + * @period_len: lenght of a single period + * @dir: direction of the operation + * + * Prepares a descriptor for cyclic DMA operation. This means that once the + * descriptor is submitted, we will be submitting in a @period_len sized + * buffers and calling callback once the period has been elapsed. Transfer + * terminates only when client calls dmaengine_terminate_all() for this + * channel. + * + * Returns a valid DMA descriptor or %NULL in case of failure. + */ +static struct dma_async_tx_descriptor * +ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, + size_t buf_len, size_t period_len, + enum dma_data_direction dir) +{ + struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); + struct ep93xx_dma_desc *desc, *first; + size_t offset = 0; + + if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) { + dev_warn(chan2dev(edmac), + "channel was configured with different direction\n"); + return NULL; + } + + if (test_and_set_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) { + dev_warn(chan2dev(edmac), + "channel is already used for cyclic transfers\n"); + return NULL; + } + + if (period_len > DMA_MAX_CHAN_BYTES) { + dev_warn(chan2dev(edmac), "too big period length %d\n", + period_len); + return NULL; + } + + /* Split the buffer into period size chunks */ + first = NULL; + for (offset = 0; offset < buf_len; offset += period_len) { + desc = ep93xx_dma_desc_get(edmac); + if (!desc) { + dev_warn(chan2dev(edmac), "couln't get descriptor\n"); + goto fail; + } + + if (dir == DMA_TO_DEVICE) { + desc->src_addr = dma_addr + offset; + desc->dst_addr = edmac->runtime_addr; + } else { + desc->src_addr = edmac->runtime_addr; + desc->dst_addr = dma_addr + offset; + } + + desc->size = period_len; + + if (!first) + first = desc; + else + list_add_tail(&desc->node, &first->tx_list); + } + + first->txd.cookie = -EBUSY; + + return &first->txd; + +fail: + ep93xx_dma_desc_put(edmac, first); + return NULL; +} + +/** + * ep93xx_dma_terminate_all - terminate all transactions + * @edmac: channel + * + * Stops all DMA transactions. All descriptors are put back to the + * @edmac->free_list and callbacks are _not_ called. + */ +static int ep93xx_dma_terminate_all(struct ep93xx_dma_chan *edmac) +{ + struct ep93xx_dma_desc *desc, *_d; + unsigned long flags; + LIST_HEAD(list); + + spin_lock_irqsave(&edmac->lock, flags); + /* First we disable and flush the DMA channel */ + edmac->edma->hw_shutdown(edmac); + clear_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags); + list_splice_init(&edmac->active, &list); + list_splice_init(&edmac->queue, &list); + /* + * We then re-enable the channel. This way we can continue submitting + * the descriptors by just calling ->hw_submit() again. + */ + edmac->edma->hw_setup(edmac); + spin_unlock_irqrestore(&edmac->lock, flags); + + list_for_each_entry_safe(desc, _d, &list, node) + ep93xx_dma_desc_put(edmac, desc); + + return 0; +} + +static int ep93xx_dma_slave_config(struct ep93xx_dma_chan *edmac, + struct dma_slave_config *config) +{ + enum dma_slave_buswidth width; + unsigned long flags; + u32 addr, ctrl; + + if (!edmac->edma->m2m) + return -EINVAL; + + switch (config->direction) { + case DMA_FROM_DEVICE: + width = config->src_addr_width; + addr = config->src_addr; + break; + + case DMA_TO_DEVICE: + width = config->dst_addr_width; + addr = config->dst_addr; + break; + + default: + return -EINVAL; + } + + switch (width) { + case DMA_SLAVE_BUSWIDTH_1_BYTE: + ctrl = 0; + break; + case DMA_SLAVE_BUSWIDTH_2_BYTES: + ctrl = M2M_CONTROL_PW_16; + break; + case DMA_SLAVE_BUSWIDTH_4_BYTES: + ctrl = M2M_CONTROL_PW_32; + break; + default: + return -EINVAL; + } + + spin_lock_irqsave(&edmac->lock, flags); + edmac->runtime_addr = addr; + edmac->runtime_ctrl = ctrl; + spin_unlock_irqrestore(&edmac->lock, flags); + + return 0; +} + +/** + * ep93xx_dma_control - manipulate all pending operations on a channel + * @chan: channel + * @cmd: control command to perform + * @arg: optional argument + * + * Controls the channel. Function returns %0 in case of success or negative + * error in case of failure. + */ +static int ep93xx_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, + unsigned long arg) +{ + struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); + struct dma_slave_config *config; + + switch (cmd) { + case DMA_TERMINATE_ALL: + return ep93xx_dma_terminate_all(edmac); + + case DMA_SLAVE_CONFIG: + config = (struct dma_slave_config *)arg; + return ep93xx_dma_slave_config(edmac, config); + + default: + break; + } + + return -ENOSYS; +} + +/** + * ep93xx_dma_tx_status - check if a transaction is completed + * @chan: channel + * @cookie: transaction specific cookie + * @state: state of the transaction is stored here if given + * + * This function can be used to query state of a given transaction. + */ +static enum dma_status ep93xx_dma_tx_status(struct dma_chan *chan, + dma_cookie_t cookie, + struct dma_tx_state *state) +{ + struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); + dma_cookie_t last_used, last_completed; + enum dma_status ret; + unsigned long flags; + + spin_lock_irqsave(&edmac->lock, flags); + last_used = chan->cookie; + last_completed = edmac->last_completed; + spin_unlock_irqrestore(&edmac->lock, flags); + + ret = dma_async_is_complete(cookie, last_completed, last_used); + dma_set_tx_state(state, last_completed, last_used, 0); + + return ret; +} + +/** + * ep93xx_dma_issue_pending - push pending transactions to the hardware + * @chan: channel + * + * When this function is called, all pending transactions are pushed to the + * hardware and executed. + */ +static void ep93xx_dma_issue_pending(struct dma_chan *chan) +{ + ep93xx_dma_advance_work(to_ep93xx_dma_chan(chan)); +} + +static int __init ep93xx_dma_probe(struct platform_device *pdev) +{ + struct ep93xx_dma_platform_data *pdata = dev_get_platdata(&pdev->dev); + struct ep93xx_dma_engine *edma; + struct dma_device *dma_dev; + size_t edma_size; + int ret, i; + + edma_size = pdata->num_channels * sizeof(struct ep93xx_dma_chan); + edma = kzalloc(sizeof(*edma) + edma_size, GFP_KERNEL); + if (!edma) + return -ENOMEM; + + dma_dev = &edma->dma_dev; + edma->m2m = platform_get_device_id(pdev)->driver_data; + edma->num_channels = pdata->num_channels; + + INIT_LIST_HEAD(&dma_dev->channels); + for (i = 0; i < pdata->num_channels; i++) { + const struct ep93xx_dma_chan_data *cdata = &pdata->channels[i]; + struct ep93xx_dma_chan *edmac = &edma->channels[i]; + + edmac->chan.device = dma_dev; + edmac->regs = cdata->base; + edmac->irq = cdata->irq; + edmac->edma = edma; + + edmac->clk = clk_get(NULL, cdata->name); + if (IS_ERR(edmac->clk)) { + dev_warn(&pdev->dev, "failed to get clock for %s\n", + cdata->name); + continue; + } + + spin_lock_init(&edmac->lock); + INIT_LIST_HEAD(&edmac->active); + INIT_LIST_HEAD(&edmac->queue); + INIT_LIST_HEAD(&edmac->free_list); + tasklet_init(&edmac->tasklet, ep93xx_dma_tasklet, + (unsigned long)edmac); + + list_add_tail(&edmac->chan.device_node, + &dma_dev->channels); + } + + dma_cap_zero(dma_dev->cap_mask); + dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); + dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask); + + dma_dev->dev = &pdev->dev; + dma_dev->device_alloc_chan_resources = ep93xx_dma_alloc_chan_resources; + dma_dev->device_free_chan_resources = ep93xx_dma_free_chan_resources; + dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg; + dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic; + dma_dev->device_control = ep93xx_dma_control; + dma_dev->device_issue_pending = ep93xx_dma_issue_pending; + dma_dev->device_tx_status = ep93xx_dma_tx_status; + + dma_set_max_seg_size(dma_dev->dev, DMA_MAX_CHAN_BYTES); + + if (edma->m2m) { + dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); + dma_dev->device_prep_dma_memcpy = ep93xx_dma_prep_dma_memcpy; + + edma->hw_setup = m2m_hw_setup; + edma->hw_shutdown = m2m_hw_shutdown; + edma->hw_submit = m2m_hw_submit; + edma->hw_interrupt = m2m_hw_interrupt; + } else { + dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask); + + edma->hw_setup = m2p_hw_setup; + edma->hw_shutdown = m2p_hw_shutdown; + edma->hw_submit = m2p_hw_submit; + edma->hw_interrupt = m2p_hw_interrupt; + } + + ret = dma_async_device_register(dma_dev); + if (unlikely(ret)) { + for (i = 0; i < edma->num_channels; i++) { + struct ep93xx_dma_chan *edmac = &edma->channels[i]; + if (!IS_ERR_OR_NULL(edmac->clk)) + clk_put(edmac->clk); + } + kfree(edma); + } else { + dev_info(dma_dev->dev, "EP93xx M2%s DMA ready\n", + edma->m2m ? "M" : "P"); + } + + return ret; +} + +static struct platform_device_id ep93xx_dma_driver_ids[] = { + { "ep93xx-dma-m2p", 0 }, + { "ep93xx-dma-m2m", 1 }, + { }, +}; + +static struct platform_driver ep93xx_dma_driver = { + .driver = { + .name = "ep93xx-dma", + }, + .id_table = ep93xx_dma_driver_ids, +}; + +static int __init ep93xx_dma_module_init(void) +{ + return platform_driver_probe(&ep93xx_dma_driver, ep93xx_dma_probe); +} +subsys_initcall(ep93xx_dma_module_init); + +MODULE_AUTHOR("Mika Westerberg "); +MODULE_DESCRIPTION("EP93xx DMA driver"); +MODULE_LICENSE("GPL"); -- cgit v0.10.2 From e791e3455f1b7e92f0b66f460ade9c7a2299ac7e Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Sun, 29 May 2011 13:10:03 +0300 Subject: ep93xx: add dmaengine platform code Add platform support code for the new EP93xx dmaengine driver. Signed-off-by: Mika Westerberg Signed-off-by: Ryan Mallon Acked-by: H Hartley Sweeten Acked-by: Vinod Koul Signed-off-by: Grant Likely diff --git a/arch/arm/mach-ep93xx/Makefile b/arch/arm/mach-ep93xx/Makefile index 33ee2c8..4920f7a 100644 --- a/arch/arm/mach-ep93xx/Makefile +++ b/arch/arm/mach-ep93xx/Makefile @@ -6,6 +6,8 @@ obj-m := obj-n := obj- := +obj-$(CONFIG_EP93XX_DMA) += dma.o + obj-$(CONFIG_MACH_ADSSPHERE) += adssphere.o obj-$(CONFIG_MACH_EDB93XX) += edb93xx.o obj-$(CONFIG_MACH_GESBC9312) += gesbc9312.o diff --git a/arch/arm/mach-ep93xx/dma.c b/arch/arm/mach-ep93xx/dma.c new file mode 100644 index 0000000..5a25708 --- /dev/null +++ b/arch/arm/mach-ep93xx/dma.c @@ -0,0 +1,108 @@ +/* + * arch/arm/mach-ep93xx/dma.c + * + * Platform support code for the EP93xx dmaengine driver. + * + * Copyright (C) 2011 Mika Westerberg + * + * This work is based on the original dma-m2p implementation with + * following copyrights: + * + * Copyright (C) 2006 Lennert Buytenhek + * Copyright (C) 2006 Applied Data Systems + * Copyright (C) 2009 Ryan Mallon + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + */ + +#include +#include +#include +#include +#include +#include + +#include +#include + +#define DMA_CHANNEL(_name, _base, _irq) \ + { .name = (_name), .base = (_base), .irq = (_irq) } + +/* + * DMA M2P channels. + * + * On the EP93xx chip the following peripherals my be allocated to the 10 + * Memory to Internal Peripheral (M2P) channels (5 transmit + 5 receive). + * + * I2S contains 3 Tx and 3 Rx DMA Channels + * AAC contains 3 Tx and 3 Rx DMA Channels + * UART1 contains 1 Tx and 1 Rx DMA Channels + * UART2 contains 1 Tx and 1 Rx DMA Channels + * UART3 contains 1 Tx and 1 Rx DMA Channels + * IrDA contains 1 Tx and 1 Rx DMA Channels + * + * Registers are mapped statically in ep93xx_map_io(). + */ +static struct ep93xx_dma_chan_data ep93xx_dma_m2p_channels[] = { + DMA_CHANNEL("m2p0", EP93XX_DMA_BASE + 0x0000, IRQ_EP93XX_DMAM2P0), + DMA_CHANNEL("m2p1", EP93XX_DMA_BASE + 0x0040, IRQ_EP93XX_DMAM2P1), + DMA_CHANNEL("m2p2", EP93XX_DMA_BASE + 0x0080, IRQ_EP93XX_DMAM2P2), + DMA_CHANNEL("m2p3", EP93XX_DMA_BASE + 0x00c0, IRQ_EP93XX_DMAM2P3), + DMA_CHANNEL("m2p4", EP93XX_DMA_BASE + 0x0240, IRQ_EP93XX_DMAM2P4), + DMA_CHANNEL("m2p5", EP93XX_DMA_BASE + 0x0200, IRQ_EP93XX_DMAM2P5), + DMA_CHANNEL("m2p6", EP93XX_DMA_BASE + 0x02c0, IRQ_EP93XX_DMAM2P6), + DMA_CHANNEL("m2p7", EP93XX_DMA_BASE + 0x0280, IRQ_EP93XX_DMAM2P7), + DMA_CHANNEL("m2p8", EP93XX_DMA_BASE + 0x0340, IRQ_EP93XX_DMAM2P8), + DMA_CHANNEL("m2p9", EP93XX_DMA_BASE + 0x0300, IRQ_EP93XX_DMAM2P9), +}; + +static struct ep93xx_dma_platform_data ep93xx_dma_m2p_data = { + .channels = ep93xx_dma_m2p_channels, + .num_channels = ARRAY_SIZE(ep93xx_dma_m2p_channels), +}; + +static struct platform_device ep93xx_dma_m2p_device = { + .name = "ep93xx-dma-m2p", + .id = -1, + .dev = { + .platform_data = &ep93xx_dma_m2p_data, + }, +}; + +/* + * DMA M2M channels. + * + * There are 2 M2M channels which support memcpy/memset and in addition simple + * hardware requests from/to SSP and IDE. We do not implement an external + * hardware requests. + * + * Registers are mapped statically in ep93xx_map_io(). + */ +static struct ep93xx_dma_chan_data ep93xx_dma_m2m_channels[] = { + DMA_CHANNEL("m2m0", EP93XX_DMA_BASE + 0x0100, IRQ_EP93XX_DMAM2M0), + DMA_CHANNEL("m2m1", EP93XX_DMA_BASE + 0x0140, IRQ_EP93XX_DMAM2M1), +}; + +static struct ep93xx_dma_platform_data ep93xx_dma_m2m_data = { + .channels = ep93xx_dma_m2m_channels, + .num_channels = ARRAY_SIZE(ep93xx_dma_m2m_channels), +}; + +static struct platform_device ep93xx_dma_m2m_device = { + .name = "ep93xx-dma-m2m", + .id = -1, + .dev = { + .platform_data = &ep93xx_dma_m2m_data, + }, +}; + +static int __init ep93xx_dma_init(void) +{ + platform_device_register(&ep93xx_dma_m2p_device); + platform_device_register(&ep93xx_dma_m2m_device); + return 0; +} +arch_initcall(ep93xx_dma_init); -- cgit v0.10.2 From 51e2cc0c51298a89fc2f583d7c0a2660f7a16f37 Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Sun, 29 May 2011 13:10:04 +0300 Subject: ASoC: ep93xx: convert to use the DMA engine API Now that we have the EP93xx DMA engine driver in place, we convert the ASoC drivers (I2S, AC97 and PCM) to take advantage of this new API. There are no functional changes. Signed-off-by: Mika Westerberg Acked-by: H Hartley Sweeten Acked-by: Liam Girdwood Acked-by: Mark Brown Acked-by: Vinod Koul Signed-off-by: Grant Likely diff --git a/sound/soc/ep93xx/ep93xx-ac97.c b/sound/soc/ep93xx/ep93xx-ac97.c index 104e95c..c7417c7 100644 --- a/sound/soc/ep93xx/ep93xx-ac97.c +++ b/sound/soc/ep93xx/ep93xx-ac97.c @@ -106,12 +106,12 @@ static struct ep93xx_ac97_info *ep93xx_ac97_info; static struct ep93xx_pcm_dma_params ep93xx_ac97_pcm_out = { .name = "ac97-pcm-out", - .dma_port = EP93XX_DMA_M2P_PORT_AAC1, + .dma_port = EP93XX_DMA_AAC1, }; static struct ep93xx_pcm_dma_params ep93xx_ac97_pcm_in = { .name = "ac97-pcm-in", - .dma_port = EP93XX_DMA_M2P_PORT_AAC1, + .dma_port = EP93XX_DMA_AAC1, }; static inline unsigned ep93xx_ac97_read_reg(struct ep93xx_ac97_info *info, diff --git a/sound/soc/ep93xx/ep93xx-i2s.c b/sound/soc/ep93xx/ep93xx-i2s.c index 042f4e9..30df425 100644 --- a/sound/soc/ep93xx/ep93xx-i2s.c +++ b/sound/soc/ep93xx/ep93xx-i2s.c @@ -70,11 +70,11 @@ struct ep93xx_i2s_info { struct ep93xx_pcm_dma_params ep93xx_i2s_dma_params[] = { [SNDRV_PCM_STREAM_PLAYBACK] = { .name = "i2s-pcm-out", - .dma_port = EP93XX_DMA_M2P_PORT_I2S1, + .dma_port = EP93XX_DMA_I2S1, }, [SNDRV_PCM_STREAM_CAPTURE] = { .name = "i2s-pcm-in", - .dma_port = EP93XX_DMA_M2P_PORT_I2S1, + .dma_port = EP93XX_DMA_I2S1, }, }; diff --git a/sound/soc/ep93xx/ep93xx-pcm.c b/sound/soc/ep93xx/ep93xx-pcm.c index a456e49..a07f99c 100644 --- a/sound/soc/ep93xx/ep93xx-pcm.c +++ b/sound/soc/ep93xx/ep93xx-pcm.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include @@ -53,43 +54,34 @@ static const struct snd_pcm_hardware ep93xx_pcm_hardware = { struct ep93xx_runtime_data { - struct ep93xx_dma_m2p_client cl; - struct ep93xx_pcm_dma_params *params; int pointer_bytes; - struct tasklet_struct period_tasklet; int periods; - struct ep93xx_dma_buffer buf[32]; + int period_bytes; + struct dma_chan *dma_chan; + struct ep93xx_dma_data dma_data; }; -static void ep93xx_pcm_period_elapsed(unsigned long data) +static void ep93xx_pcm_dma_callback(void *data) { - struct snd_pcm_substream *substream = (struct snd_pcm_substream *)data; - snd_pcm_period_elapsed(substream); -} + struct snd_pcm_substream *substream = data; + struct ep93xx_runtime_data *rtd = substream->runtime->private_data; -static void ep93xx_pcm_buffer_started(void *cookie, - struct ep93xx_dma_buffer *buf) -{ + rtd->pointer_bytes += rtd->period_bytes; + rtd->pointer_bytes %= rtd->period_bytes * rtd->periods; + + snd_pcm_period_elapsed(substream); } -static void ep93xx_pcm_buffer_finished(void *cookie, - struct ep93xx_dma_buffer *buf, - int bytes, int error) +static bool ep93xx_pcm_dma_filter(struct dma_chan *chan, void *filter_param) { - struct snd_pcm_substream *substream = cookie; - struct ep93xx_runtime_data *rtd = substream->runtime->private_data; - - if (buf == rtd->buf + rtd->periods - 1) - rtd->pointer_bytes = 0; - else - rtd->pointer_bytes += buf->size; + struct ep93xx_dma_data *data = filter_param; - if (!error) { - ep93xx_dma_m2p_submit_recursive(&rtd->cl, buf); - tasklet_schedule(&rtd->period_tasklet); - } else { - snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN); + if (data->direction == ep93xx_dma_chan_direction(chan)) { + chan->private = data; + return true; } + + return false; } static int ep93xx_pcm_open(struct snd_pcm_substream *substream) @@ -98,30 +90,38 @@ static int ep93xx_pcm_open(struct snd_pcm_substream *substream) struct snd_soc_dai *cpu_dai = soc_rtd->cpu_dai; struct ep93xx_pcm_dma_params *dma_params; struct ep93xx_runtime_data *rtd; + dma_cap_mask_t mask; int ret; - dma_params = snd_soc_dai_get_dma_data(cpu_dai, substream); + ret = snd_pcm_hw_constraint_integer(substream->runtime, + SNDRV_PCM_HW_PARAM_PERIODS); + if (ret < 0) + return ret; + snd_soc_set_runtime_hwparams(substream, &ep93xx_pcm_hardware); rtd = kmalloc(sizeof(*rtd), GFP_KERNEL); if (!rtd) return -ENOMEM; - memset(&rtd->period_tasklet, 0, sizeof(rtd->period_tasklet)); - rtd->period_tasklet.func = ep93xx_pcm_period_elapsed; - rtd->period_tasklet.data = (unsigned long)substream; - - rtd->cl.name = dma_params->name; - rtd->cl.flags = dma_params->dma_port | EP93XX_DMA_M2P_IGNORE_ERROR | - ((substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ? - EP93XX_DMA_M2P_TX : EP93XX_DMA_M2P_RX); - rtd->cl.cookie = substream; - rtd->cl.buffer_started = ep93xx_pcm_buffer_started; - rtd->cl.buffer_finished = ep93xx_pcm_buffer_finished; - ret = ep93xx_dma_m2p_client_register(&rtd->cl); - if (ret < 0) { + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + dma_cap_set(DMA_CYCLIC, mask); + + dma_params = snd_soc_dai_get_dma_data(cpu_dai, substream); + rtd->dma_data.port = dma_params->dma_port; + rtd->dma_data.name = dma_params->name; + + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) + rtd->dma_data.direction = DMA_TO_DEVICE; + else + rtd->dma_data.direction = DMA_FROM_DEVICE; + + rtd->dma_chan = dma_request_channel(mask, ep93xx_pcm_dma_filter, + &rtd->dma_data); + if (!rtd->dma_chan) { kfree(rtd); - return ret; + return -EINVAL; } substream->runtime->private_data = rtd; @@ -132,31 +132,52 @@ static int ep93xx_pcm_close(struct snd_pcm_substream *substream) { struct ep93xx_runtime_data *rtd = substream->runtime->private_data; - ep93xx_dma_m2p_client_unregister(&rtd->cl); + dma_release_channel(rtd->dma_chan); kfree(rtd); return 0; } +static int ep93xx_pcm_dma_submit(struct snd_pcm_substream *substream) +{ + struct snd_pcm_runtime *runtime = substream->runtime; + struct ep93xx_runtime_data *rtd = runtime->private_data; + struct dma_chan *chan = rtd->dma_chan; + struct dma_device *dma_dev = chan->device; + struct dma_async_tx_descriptor *desc; + + rtd->pointer_bytes = 0; + desc = dma_dev->device_prep_dma_cyclic(chan, runtime->dma_addr, + rtd->period_bytes * rtd->periods, + rtd->period_bytes, + rtd->dma_data.direction); + if (!desc) + return -EINVAL; + + desc->callback = ep93xx_pcm_dma_callback; + desc->callback_param = substream; + + dmaengine_submit(desc); + return 0; +} + +static void ep93xx_pcm_dma_flush(struct snd_pcm_substream *substream) +{ + struct snd_pcm_runtime *runtime = substream->runtime; + struct ep93xx_runtime_data *rtd = runtime->private_data; + + dmaengine_terminate_all(rtd->dma_chan); +} + static int ep93xx_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_pcm_runtime *runtime = substream->runtime; struct ep93xx_runtime_data *rtd = runtime->private_data; - size_t totsize = params_buffer_bytes(params); - size_t period = params_period_bytes(params); - int i; snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer); - runtime->dma_bytes = totsize; - - rtd->periods = (totsize + period - 1) / period; - for (i = 0; i < rtd->periods; i++) { - rtd->buf[i].bus_addr = runtime->dma_addr + (i * period); - rtd->buf[i].size = period; - if ((i + 1) * period > totsize) - rtd->buf[i].size = totsize - (i * period); - } + rtd->periods = params_periods(params); + rtd->period_bytes = params_period_bytes(params); return 0; } @@ -168,24 +189,20 @@ static int ep93xx_pcm_hw_free(struct snd_pcm_substream *substream) static int ep93xx_pcm_trigger(struct snd_pcm_substream *substream, int cmd) { - struct ep93xx_runtime_data *rtd = substream->runtime->private_data; int ret; - int i; ret = 0; switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: - rtd->pointer_bytes = 0; - for (i = 0; i < rtd->periods; i++) - ep93xx_dma_m2p_submit(&rtd->cl, rtd->buf + i); + ret = ep93xx_pcm_dma_submit(substream); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: - ep93xx_dma_m2p_flush(&rtd->cl); + ep93xx_pcm_dma_flush(substream); break; default: -- cgit v0.10.2 From 71cebd700257a4f644066631bf02187d8f67817e Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Sun, 29 May 2011 13:10:05 +0300 Subject: ep93xx: remove the old M2P DMA code Since we have converted all existing users of the old DMA API to use the DMA engine API the old code can be dropped. Signed-off-by: Mika Westerberg Acked-by: Ryan Mallon Acked-by: H Hartley Sweeten Acked-by: Vinod Koul Signed-off-by: Grant Likely diff --git a/arch/arm/mach-ep93xx/Makefile b/arch/arm/mach-ep93xx/Makefile index 4920f7a..21e721a 100644 --- a/arch/arm/mach-ep93xx/Makefile +++ b/arch/arm/mach-ep93xx/Makefile @@ -1,7 +1,7 @@ # # Makefile for the linux kernel. # -obj-y := core.o clock.o dma-m2p.o gpio.o +obj-y := core.o clock.o gpio.o obj-m := obj-n := obj- := diff --git a/arch/arm/mach-ep93xx/dma-m2p.c b/arch/arm/mach-ep93xx/dma-m2p.c deleted file mode 100644 index a696d35..0000000 --- a/arch/arm/mach-ep93xx/dma-m2p.c +++ /dev/null @@ -1,411 +0,0 @@ -/* - * arch/arm/mach-ep93xx/dma-m2p.c - * M2P DMA handling for Cirrus EP93xx chips. - * - * Copyright (C) 2006 Lennert Buytenhek - * Copyright (C) 2006 Applied Data Systems - * - * Copyright (C) 2009 Ryan Mallon - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or (at - * your option) any later version. - */ - -/* - * On the EP93xx chip the following peripherals my be allocated to the 10 - * Memory to Internal Peripheral (M2P) channels (5 transmit + 5 receive). - * - * I2S contains 3 Tx and 3 Rx DMA Channels - * AAC contains 3 Tx and 3 Rx DMA Channels - * UART1 contains 1 Tx and 1 Rx DMA Channels - * UART2 contains 1 Tx and 1 Rx DMA Channels - * UART3 contains 1 Tx and 1 Rx DMA Channels - * IrDA contains 1 Tx and 1 Rx DMA Channels - * - * SSP and IDE use the Memory to Memory (M2M) channels and are not covered - * with this implementation. - */ - -#define pr_fmt(fmt) "ep93xx " KBUILD_MODNAME ": " fmt - -#include -#include -#include -#include -#include -#include - -#include -#include - -#define M2P_CONTROL 0x00 -#define M2P_CONTROL_STALL_IRQ_EN (1 << 0) -#define M2P_CONTROL_NFB_IRQ_EN (1 << 1) -#define M2P_CONTROL_ERROR_IRQ_EN (1 << 3) -#define M2P_CONTROL_ENABLE (1 << 4) -#define M2P_INTERRUPT 0x04 -#define M2P_INTERRUPT_STALL (1 << 0) -#define M2P_INTERRUPT_NFB (1 << 1) -#define M2P_INTERRUPT_ERROR (1 << 3) -#define M2P_PPALLOC 0x08 -#define M2P_STATUS 0x0c -#define M2P_REMAIN 0x14 -#define M2P_MAXCNT0 0x20 -#define M2P_BASE0 0x24 -#define M2P_MAXCNT1 0x30 -#define M2P_BASE1 0x34 - -#define STATE_IDLE 0 /* Channel is inactive. */ -#define STATE_STALL 1 /* Channel is active, no buffers pending. */ -#define STATE_ON 2 /* Channel is active, one buffer pending. */ -#define STATE_NEXT 3 /* Channel is active, two buffers pending. */ - -struct m2p_channel { - char *name; - void __iomem *base; - int irq; - - struct clk *clk; - spinlock_t lock; - - void *client; - unsigned next_slot:1; - struct ep93xx_dma_buffer *buffer_xfer; - struct ep93xx_dma_buffer *buffer_next; - struct list_head buffers_pending; -}; - -static struct m2p_channel m2p_rx[] = { - {"m2p1", EP93XX_DMA_BASE + 0x0040, IRQ_EP93XX_DMAM2P1}, - {"m2p3", EP93XX_DMA_BASE + 0x00c0, IRQ_EP93XX_DMAM2P3}, - {"m2p5", EP93XX_DMA_BASE + 0x0200, IRQ_EP93XX_DMAM2P5}, - {"m2p7", EP93XX_DMA_BASE + 0x0280, IRQ_EP93XX_DMAM2P7}, - {"m2p9", EP93XX_DMA_BASE + 0x0300, IRQ_EP93XX_DMAM2P9}, - {NULL}, -}; - -static struct m2p_channel m2p_tx[] = { - {"m2p0", EP93XX_DMA_BASE + 0x0000, IRQ_EP93XX_DMAM2P0}, - {"m2p2", EP93XX_DMA_BASE + 0x0080, IRQ_EP93XX_DMAM2P2}, - {"m2p4", EP93XX_DMA_BASE + 0x0240, IRQ_EP93XX_DMAM2P4}, - {"m2p6", EP93XX_DMA_BASE + 0x02c0, IRQ_EP93XX_DMAM2P6}, - {"m2p8", EP93XX_DMA_BASE + 0x0340, IRQ_EP93XX_DMAM2P8}, - {NULL}, -}; - -static void feed_buf(struct m2p_channel *ch, struct ep93xx_dma_buffer *buf) -{ - if (ch->next_slot == 0) { - writel(buf->size, ch->base + M2P_MAXCNT0); - writel(buf->bus_addr, ch->base + M2P_BASE0); - } else { - writel(buf->size, ch->base + M2P_MAXCNT1); - writel(buf->bus_addr, ch->base + M2P_BASE1); - } - ch->next_slot ^= 1; -} - -static void choose_buffer_xfer(struct m2p_channel *ch) -{ - struct ep93xx_dma_buffer *buf; - - ch->buffer_xfer = NULL; - if (!list_empty(&ch->buffers_pending)) { - buf = list_entry(ch->buffers_pending.next, - struct ep93xx_dma_buffer, list); - list_del(&buf->list); - feed_buf(ch, buf); - ch->buffer_xfer = buf; - } -} - -static void choose_buffer_next(struct m2p_channel *ch) -{ - struct ep93xx_dma_buffer *buf; - - ch->buffer_next = NULL; - if (!list_empty(&ch->buffers_pending)) { - buf = list_entry(ch->buffers_pending.next, - struct ep93xx_dma_buffer, list); - list_del(&buf->list); - feed_buf(ch, buf); - ch->buffer_next = buf; - } -} - -static inline void m2p_set_control(struct m2p_channel *ch, u32 v) -{ - /* - * The control register must be read immediately after being written so - * that the internal state machine is correctly updated. See the ep93xx - * users' guide for details. - */ - writel(v, ch->base + M2P_CONTROL); - readl(ch->base + M2P_CONTROL); -} - -static inline int m2p_channel_state(struct m2p_channel *ch) -{ - return (readl(ch->base + M2P_STATUS) >> 4) & 0x3; -} - -static irqreturn_t m2p_irq(int irq, void *dev_id) -{ - struct m2p_channel *ch = dev_id; - struct ep93xx_dma_m2p_client *cl; - u32 irq_status, v; - int error = 0; - - cl = ch->client; - - spin_lock(&ch->lock); - irq_status = readl(ch->base + M2P_INTERRUPT); - - if (irq_status & M2P_INTERRUPT_ERROR) { - writel(M2P_INTERRUPT_ERROR, ch->base + M2P_INTERRUPT); - error = 1; - } - - if ((irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)) == 0) { - spin_unlock(&ch->lock); - return IRQ_NONE; - } - - switch (m2p_channel_state(ch)) { - case STATE_IDLE: - pr_crit("dma interrupt without a dma buffer\n"); - BUG(); - break; - - case STATE_STALL: - cl->buffer_finished(cl->cookie, ch->buffer_xfer, 0, error); - if (ch->buffer_next != NULL) { - cl->buffer_finished(cl->cookie, ch->buffer_next, - 0, error); - } - choose_buffer_xfer(ch); - choose_buffer_next(ch); - if (ch->buffer_xfer != NULL) - cl->buffer_started(cl->cookie, ch->buffer_xfer); - break; - - case STATE_ON: - cl->buffer_finished(cl->cookie, ch->buffer_xfer, 0, error); - ch->buffer_xfer = ch->buffer_next; - choose_buffer_next(ch); - cl->buffer_started(cl->cookie, ch->buffer_xfer); - break; - - case STATE_NEXT: - pr_crit("dma interrupt while next\n"); - BUG(); - break; - } - - v = readl(ch->base + M2P_CONTROL) & ~(M2P_CONTROL_STALL_IRQ_EN | - M2P_CONTROL_NFB_IRQ_EN); - if (ch->buffer_xfer != NULL) - v |= M2P_CONTROL_STALL_IRQ_EN; - if (ch->buffer_next != NULL) - v |= M2P_CONTROL_NFB_IRQ_EN; - m2p_set_control(ch, v); - - spin_unlock(&ch->lock); - return IRQ_HANDLED; -} - -static struct m2p_channel *find_free_channel(struct ep93xx_dma_m2p_client *cl) -{ - struct m2p_channel *ch; - int i; - - if (cl->flags & EP93XX_DMA_M2P_RX) - ch = m2p_rx; - else - ch = m2p_tx; - - for (i = 0; ch[i].base; i++) { - struct ep93xx_dma_m2p_client *client; - - client = ch[i].client; - if (client != NULL) { - int port; - - port = cl->flags & EP93XX_DMA_M2P_PORT_MASK; - if (port == (client->flags & - EP93XX_DMA_M2P_PORT_MASK)) { - pr_warning("DMA channel already used by %s\n", - cl->name ? : "unknown client"); - return ERR_PTR(-EBUSY); - } - } - } - - for (i = 0; ch[i].base; i++) { - if (ch[i].client == NULL) - return ch + i; - } - - pr_warning("No free DMA channel for %s\n", - cl->name ? : "unknown client"); - return ERR_PTR(-ENODEV); -} - -static void channel_enable(struct m2p_channel *ch) -{ - struct ep93xx_dma_m2p_client *cl = ch->client; - u32 v; - - clk_enable(ch->clk); - - v = cl->flags & EP93XX_DMA_M2P_PORT_MASK; - writel(v, ch->base + M2P_PPALLOC); - - v = cl->flags & EP93XX_DMA_M2P_ERROR_MASK; - v |= M2P_CONTROL_ENABLE | M2P_CONTROL_ERROR_IRQ_EN; - m2p_set_control(ch, v); -} - -static void channel_disable(struct m2p_channel *ch) -{ - u32 v; - - v = readl(ch->base + M2P_CONTROL); - v &= ~(M2P_CONTROL_STALL_IRQ_EN | M2P_CONTROL_NFB_IRQ_EN); - m2p_set_control(ch, v); - - while (m2p_channel_state(ch) >= STATE_ON) - cpu_relax(); - - m2p_set_control(ch, 0x0); - - while (m2p_channel_state(ch) == STATE_STALL) - cpu_relax(); - - clk_disable(ch->clk); -} - -int ep93xx_dma_m2p_client_register(struct ep93xx_dma_m2p_client *cl) -{ - struct m2p_channel *ch; - int err; - - ch = find_free_channel(cl); - if (IS_ERR(ch)) - return PTR_ERR(ch); - - err = request_irq(ch->irq, m2p_irq, 0, cl->name ? : "dma-m2p", ch); - if (err) - return err; - - ch->client = cl; - ch->next_slot = 0; - ch->buffer_xfer = NULL; - ch->buffer_next = NULL; - INIT_LIST_HEAD(&ch->buffers_pending); - - cl->channel = ch; - - channel_enable(ch); - - return 0; -} -EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_client_register); - -void ep93xx_dma_m2p_client_unregister(struct ep93xx_dma_m2p_client *cl) -{ - struct m2p_channel *ch = cl->channel; - - channel_disable(ch); - free_irq(ch->irq, ch); - ch->client = NULL; -} -EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_client_unregister); - -void ep93xx_dma_m2p_submit(struct ep93xx_dma_m2p_client *cl, - struct ep93xx_dma_buffer *buf) -{ - struct m2p_channel *ch = cl->channel; - unsigned long flags; - u32 v; - - spin_lock_irqsave(&ch->lock, flags); - v = readl(ch->base + M2P_CONTROL); - if (ch->buffer_xfer == NULL) { - ch->buffer_xfer = buf; - feed_buf(ch, buf); - cl->buffer_started(cl->cookie, buf); - - v |= M2P_CONTROL_STALL_IRQ_EN; - m2p_set_control(ch, v); - - } else if (ch->buffer_next == NULL) { - ch->buffer_next = buf; - feed_buf(ch, buf); - - v |= M2P_CONTROL_NFB_IRQ_EN; - m2p_set_control(ch, v); - } else { - list_add_tail(&buf->list, &ch->buffers_pending); - } - spin_unlock_irqrestore(&ch->lock, flags); -} -EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_submit); - -void ep93xx_dma_m2p_submit_recursive(struct ep93xx_dma_m2p_client *cl, - struct ep93xx_dma_buffer *buf) -{ - struct m2p_channel *ch = cl->channel; - - list_add_tail(&buf->list, &ch->buffers_pending); -} -EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_submit_recursive); - -void ep93xx_dma_m2p_flush(struct ep93xx_dma_m2p_client *cl) -{ - struct m2p_channel *ch = cl->channel; - - channel_disable(ch); - ch->next_slot = 0; - ch->buffer_xfer = NULL; - ch->buffer_next = NULL; - INIT_LIST_HEAD(&ch->buffers_pending); - channel_enable(ch); -} -EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_flush); - -static int init_channel(struct m2p_channel *ch) -{ - ch->clk = clk_get(NULL, ch->name); - if (IS_ERR(ch->clk)) - return PTR_ERR(ch->clk); - - spin_lock_init(&ch->lock); - ch->client = NULL; - - return 0; -} - -static int __init ep93xx_dma_m2p_init(void) -{ - int i; - int ret; - - for (i = 0; m2p_rx[i].base; i++) { - ret = init_channel(m2p_rx + i); - if (ret) - return ret; - } - - for (i = 0; m2p_tx[i].base; i++) { - ret = init_channel(m2p_tx + i); - if (ret) - return ret; - } - - pr_info("M2P DMA subsystem initialized\n"); - return 0; -} -arch_initcall(ep93xx_dma_m2p_init); diff --git a/arch/arm/mach-ep93xx/include/mach/dma.h b/arch/arm/mach-ep93xx/include/mach/dma.h index 6e7049a..46d4d87 100644 --- a/arch/arm/mach-ep93xx/include/mach/dma.h +++ b/arch/arm/mach-ep93xx/include/mach/dma.h @@ -1,153 +1,10 @@ -/** - * DOC: EP93xx DMA M2P memory to peripheral and peripheral to memory engine - * - * The EP93xx DMA M2P subsystem handles DMA transfers between memory and - * peripherals. DMA M2P channels are available for audio, UARTs and IrDA. - * See chapter 10 of the EP93xx users guide for full details on the DMA M2P - * engine. - * - * See sound/soc/ep93xx/ep93xx-pcm.c for an example use of the DMA M2P code. - * - */ - #ifndef __ASM_ARCH_DMA_H #define __ASM_ARCH_DMA_H -#include #include #include #include -/** - * struct ep93xx_dma_buffer - Information about a buffer to be transferred - * using the DMA M2P engine - * - * @list: Entry in DMA buffer list - * @bus_addr: Physical address of the buffer - * @size: Size of the buffer in bytes - */ -struct ep93xx_dma_buffer { - struct list_head list; - u32 bus_addr; - u16 size; -}; - -/** - * struct ep93xx_dma_m2p_client - Information about a DMA M2P client - * - * @name: Unique name for this client - * @flags: Client flags - * @cookie: User data to pass to callback functions - * @buffer_started: Non NULL function to call when a transfer is started. - * The arguments are the user data cookie and the DMA - * buffer which is starting. - * @buffer_finished: Non NULL function to call when a transfer is completed. - * The arguments are the user data cookie, the DMA buffer - * which has completed, and a boolean flag indicating if - * the transfer had an error. - */ -struct ep93xx_dma_m2p_client { - char *name; - u8 flags; - void *cookie; - void (*buffer_started)(void *cookie, - struct ep93xx_dma_buffer *buf); - void (*buffer_finished)(void *cookie, - struct ep93xx_dma_buffer *buf, - int bytes, int error); - - /* private: Internal use only */ - void *channel; -}; - -/* DMA M2P ports */ -#define EP93XX_DMA_M2P_PORT_I2S1 0x00 -#define EP93XX_DMA_M2P_PORT_I2S2 0x01 -#define EP93XX_DMA_M2P_PORT_AAC1 0x02 -#define EP93XX_DMA_M2P_PORT_AAC2 0x03 -#define EP93XX_DMA_M2P_PORT_AAC3 0x04 -#define EP93XX_DMA_M2P_PORT_I2S3 0x05 -#define EP93XX_DMA_M2P_PORT_UART1 0x06 -#define EP93XX_DMA_M2P_PORT_UART2 0x07 -#define EP93XX_DMA_M2P_PORT_UART3 0x08 -#define EP93XX_DMA_M2P_PORT_IRDA 0x09 -#define EP93XX_DMA_M2P_PORT_MASK 0x0f - -/* DMA M2P client flags */ -#define EP93XX_DMA_M2P_TX 0x00 /* Memory to peripheral */ -#define EP93XX_DMA_M2P_RX 0x10 /* Peripheral to memory */ - -/* - * DMA M2P client error handling flags. See the EP93xx users guide - * documentation on the DMA M2P CONTROL register for more details - */ -#define EP93XX_DMA_M2P_ABORT_ON_ERROR 0x20 /* Abort on peripheral error */ -#define EP93XX_DMA_M2P_IGNORE_ERROR 0x40 /* Ignore peripheral errors */ -#define EP93XX_DMA_M2P_ERROR_MASK 0x60 /* Mask of error bits */ - -/** - * ep93xx_dma_m2p_client_register - Register a client with the DMA M2P - * subsystem - * - * @m2p: Client information to register - * returns 0 on success - * - * The DMA M2P subsystem allocates a channel and an interrupt line for the DMA - * client - */ -int ep93xx_dma_m2p_client_register(struct ep93xx_dma_m2p_client *m2p); - -/** - * ep93xx_dma_m2p_client_unregister - Unregister a client from the DMA M2P - * subsystem - * - * @m2p: Client to unregister - * - * Any transfers currently in progress will be completed in hardware, but - * ignored in software. - */ -void ep93xx_dma_m2p_client_unregister(struct ep93xx_dma_m2p_client *m2p); - -/** - * ep93xx_dma_m2p_submit - Submit a DMA M2P transfer - * - * @m2p: DMA Client to submit the transfer on - * @buf: DMA Buffer to submit - * - * If the current or next transfer positions are free on the M2P client then - * the transfer is started immediately. If not, the transfer is added to the - * list of pending transfers. This function must not be called from the - * buffer_finished callback for an M2P channel. - * - */ -void ep93xx_dma_m2p_submit(struct ep93xx_dma_m2p_client *m2p, - struct ep93xx_dma_buffer *buf); - -/** - * ep93xx_dma_m2p_submit_recursive - Put a DMA transfer on the pending list - * for an M2P channel - * - * @m2p: DMA Client to submit the transfer on - * @buf: DMA Buffer to submit - * - * This function must only be called from the buffer_finished callback for an - * M2P channel. It is commonly used to add the next transfer in a chained list - * of DMA transfers. - */ -void ep93xx_dma_m2p_submit_recursive(struct ep93xx_dma_m2p_client *m2p, - struct ep93xx_dma_buffer *buf); - -/** - * ep93xx_dma_m2p_flush - Flush all pending transfers on a DMA M2P client - * - * @m2p: DMA client to flush transfers on - * - * Any transfers currently in progress will be completed in hardware, but - * ignored in software. - * - */ -void ep93xx_dma_m2p_flush(struct ep93xx_dma_m2p_client *m2p); - /* * M2P channels. * -- cgit v0.10.2 From 626a96db11698119a67eeda130488e869aa6f14e Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Sun, 29 May 2011 13:10:06 +0300 Subject: spi/ep93xx: add DMA support This patch adds DMA support for the EP93xx SPI driver. By default the DMA is not enabled but it can be enabled by setting ep93xx_spi_info.use_dma to true in board configuration file. Note that the SPI driver still uses PIO for small transfers (<= 8 bytes) for performance reasons. Signed-off-by: Mika Westerberg Acked-by: H Hartley Sweeten Acked-by: Vinod Koul Signed-off-by: Grant Likely diff --git a/Documentation/spi/ep93xx_spi b/Documentation/spi/ep93xx_spi index 6325f5b..d8eb01c 100644 --- a/Documentation/spi/ep93xx_spi +++ b/Documentation/spi/ep93xx_spi @@ -88,6 +88,16 @@ static void __init ts72xx_init_machine(void) ARRAY_SIZE(ts72xx_spi_devices)); } +The driver can use DMA for the transfers also. In this case ts72xx_spi_info +becomes: + +static struct ep93xx_spi_info ts72xx_spi_info = { + .num_chipselect = ARRAY_SIZE(ts72xx_spi_devices), + .use_dma = true; +}; + +Note that CONFIG_EP93XX_DMA should be enabled as well. + Thanks to ========= Martin Guy, H. Hartley Sweeten and others who helped me during development of diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c index 8207954..cc9f1d4 100644 --- a/arch/arm/mach-ep93xx/core.c +++ b/arch/arm/mach-ep93xx/core.c @@ -488,11 +488,15 @@ static struct resource ep93xx_spi_resources[] = { }, }; +static u64 ep93xx_spi_dma_mask = DMA_BIT_MASK(32); + static struct platform_device ep93xx_spi_device = { .name = "ep93xx-spi", .id = 0, .dev = { - .platform_data = &ep93xx_spi_master_data, + .platform_data = &ep93xx_spi_master_data, + .coherent_dma_mask = DMA_BIT_MASK(32), + .dma_mask = &ep93xx_spi_dma_mask, }, .num_resources = ARRAY_SIZE(ep93xx_spi_resources), .resource = ep93xx_spi_resources, diff --git a/arch/arm/mach-ep93xx/include/mach/ep93xx_spi.h b/arch/arm/mach-ep93xx/include/mach/ep93xx_spi.h index 0a37961..9bb63ac 100644 --- a/arch/arm/mach-ep93xx/include/mach/ep93xx_spi.h +++ b/arch/arm/mach-ep93xx/include/mach/ep93xx_spi.h @@ -7,9 +7,11 @@ struct spi_device; * struct ep93xx_spi_info - EP93xx specific SPI descriptor * @num_chipselect: number of chip selects on this board, must be * at least one + * @use_dma: use DMA for the transfers */ struct ep93xx_spi_info { int num_chipselect; + bool use_dma; }; /** diff --git a/drivers/spi/ep93xx_spi.c b/drivers/spi/ep93xx_spi.c index d357007..1cf6454 100644 --- a/drivers/spi/ep93xx_spi.c +++ b/drivers/spi/ep93xx_spi.c @@ -1,7 +1,7 @@ /* * Driver for Cirrus Logic EP93xx SPI controller. * - * Copyright (c) 2010 Mika Westerberg + * Copyright (C) 2010-2011 Mika Westerberg * * Explicit FIFO handling code was inspired by amba-pl022 driver. * @@ -21,13 +21,16 @@ #include #include #include +#include #include #include #include #include #include +#include #include +#include #include #define SSPCR0 0x0000 @@ -71,6 +74,7 @@ * @pdev: pointer to platform device * @clk: clock for the controller * @regs_base: pointer to ioremap()'d registers + * @sspdr_phys: physical address of the SSPDR register * @irq: IRQ number used by the driver * @min_rate: minimum clock rate (in Hz) supported by the controller * @max_rate: maximum clock rate (in Hz) supported by the controller @@ -84,6 +88,14 @@ * @rx: current byte in transfer to receive * @fifo_level: how full is FIFO (%0..%SPI_FIFO_SIZE - %1). Receiving one * frame decreases this level and sending one frame increases it. + * @dma_rx: RX DMA channel + * @dma_tx: TX DMA channel + * @dma_rx_data: RX parameters passed to the DMA engine + * @dma_tx_data: TX parameters passed to the DMA engine + * @rx_sgt: sg table for RX transfers + * @tx_sgt: sg table for TX transfers + * @zeropage: dummy page used as RX buffer when only TX buffer is passed in by + * the client * * This structure holds EP93xx SPI controller specific information. When * @running is %true, driver accepts transfer requests from protocol drivers. @@ -100,6 +112,7 @@ struct ep93xx_spi { const struct platform_device *pdev; struct clk *clk; void __iomem *regs_base; + unsigned long sspdr_phys; int irq; unsigned long min_rate; unsigned long max_rate; @@ -112,6 +125,13 @@ struct ep93xx_spi { size_t tx; size_t rx; size_t fifo_level; + struct dma_chan *dma_rx; + struct dma_chan *dma_tx; + struct ep93xx_dma_data dma_rx_data; + struct ep93xx_dma_data dma_tx_data; + struct sg_table rx_sgt; + struct sg_table tx_sgt; + void *zeropage; }; /** @@ -496,14 +516,195 @@ static int ep93xx_spi_read_write(struct ep93xx_spi *espi) espi->fifo_level++; } - if (espi->rx == t->len) { - msg->actual_length += t->len; + if (espi->rx == t->len) return 0; - } return -EINPROGRESS; } +static void ep93xx_spi_pio_transfer(struct ep93xx_spi *espi) +{ + /* + * Now everything is set up for the current transfer. We prime the TX + * FIFO, enable interrupts, and wait for the transfer to complete. + */ + if (ep93xx_spi_read_write(espi)) { + ep93xx_spi_enable_interrupts(espi); + wait_for_completion(&espi->wait); + } +} + +/** + * ep93xx_spi_dma_prepare() - prepares a DMA transfer + * @espi: ep93xx SPI controller struct + * @dir: DMA transfer direction + * + * Function configures the DMA, maps the buffer and prepares the DMA + * descriptor. Returns a valid DMA descriptor in case of success and ERR_PTR + * in case of failure. + */ +static struct dma_async_tx_descriptor * +ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir) +{ + struct spi_transfer *t = espi->current_msg->state; + struct dma_async_tx_descriptor *txd; + enum dma_slave_buswidth buswidth; + struct dma_slave_config conf; + struct scatterlist *sg; + struct sg_table *sgt; + struct dma_chan *chan; + const void *buf, *pbuf; + size_t len = t->len; + int i, ret, nents; + + if (bits_per_word(espi) > 8) + buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES; + else + buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE; + + memset(&conf, 0, sizeof(conf)); + conf.direction = dir; + + if (dir == DMA_FROM_DEVICE) { + chan = espi->dma_rx; + buf = t->rx_buf; + sgt = &espi->rx_sgt; + + conf.src_addr = espi->sspdr_phys; + conf.src_addr_width = buswidth; + } else { + chan = espi->dma_tx; + buf = t->tx_buf; + sgt = &espi->tx_sgt; + + conf.dst_addr = espi->sspdr_phys; + conf.dst_addr_width = buswidth; + } + + ret = dmaengine_slave_config(chan, &conf); + if (ret) + return ERR_PTR(ret); + + /* + * We need to split the transfer into PAGE_SIZE'd chunks. This is + * because we are using @espi->zeropage to provide a zero RX buffer + * for the TX transfers and we have only allocated one page for that. + * + * For performance reasons we allocate a new sg_table only when + * needed. Otherwise we will re-use the current one. Eventually the + * last sg_table is released in ep93xx_spi_release_dma(). + */ + + nents = DIV_ROUND_UP(len, PAGE_SIZE); + if (nents != sgt->nents) { + sg_free_table(sgt); + + ret = sg_alloc_table(sgt, nents, GFP_KERNEL); + if (ret) + return ERR_PTR(ret); + } + + pbuf = buf; + for_each_sg(sgt->sgl, sg, sgt->nents, i) { + size_t bytes = min_t(size_t, len, PAGE_SIZE); + + if (buf) { + sg_set_page(sg, virt_to_page(pbuf), bytes, + offset_in_page(pbuf)); + } else { + sg_set_page(sg, virt_to_page(espi->zeropage), + bytes, 0); + } + + pbuf += bytes; + len -= bytes; + } + + if (WARN_ON(len)) { + dev_warn(&espi->pdev->dev, "len = %d expected 0!", len); + return ERR_PTR(-EINVAL); + } + + nents = dma_map_sg(chan->device->dev, sgt->sgl, sgt->nents, dir); + if (!nents) + return ERR_PTR(-ENOMEM); + + txd = chan->device->device_prep_slave_sg(chan, sgt->sgl, nents, + dir, DMA_CTRL_ACK); + if (!txd) { + dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir); + return ERR_PTR(-ENOMEM); + } + return txd; +} + +/** + * ep93xx_spi_dma_finish() - finishes with a DMA transfer + * @espi: ep93xx SPI controller struct + * @dir: DMA transfer direction + * + * Function finishes with the DMA transfer. After this, the DMA buffer is + * unmapped. + */ +static void ep93xx_spi_dma_finish(struct ep93xx_spi *espi, + enum dma_data_direction dir) +{ + struct dma_chan *chan; + struct sg_table *sgt; + + if (dir == DMA_FROM_DEVICE) { + chan = espi->dma_rx; + sgt = &espi->rx_sgt; + } else { + chan = espi->dma_tx; + sgt = &espi->tx_sgt; + } + + dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir); +} + +static void ep93xx_spi_dma_callback(void *callback_param) +{ + complete(callback_param); +} + +static void ep93xx_spi_dma_transfer(struct ep93xx_spi *espi) +{ + struct spi_message *msg = espi->current_msg; + struct dma_async_tx_descriptor *rxd, *txd; + + rxd = ep93xx_spi_dma_prepare(espi, DMA_FROM_DEVICE); + if (IS_ERR(rxd)) { + dev_err(&espi->pdev->dev, "DMA RX failed: %ld\n", PTR_ERR(rxd)); + msg->status = PTR_ERR(rxd); + return; + } + + txd = ep93xx_spi_dma_prepare(espi, DMA_TO_DEVICE); + if (IS_ERR(txd)) { + ep93xx_spi_dma_finish(espi, DMA_FROM_DEVICE); + dev_err(&espi->pdev->dev, "DMA TX failed: %ld\n", PTR_ERR(rxd)); + msg->status = PTR_ERR(txd); + return; + } + + /* We are ready when RX is done */ + rxd->callback = ep93xx_spi_dma_callback; + rxd->callback_param = &espi->wait; + + /* Now submit both descriptors and wait while they finish */ + dmaengine_submit(rxd); + dmaengine_submit(txd); + + dma_async_issue_pending(espi->dma_rx); + dma_async_issue_pending(espi->dma_tx); + + wait_for_completion(&espi->wait); + + ep93xx_spi_dma_finish(espi, DMA_TO_DEVICE); + ep93xx_spi_dma_finish(espi, DMA_FROM_DEVICE); +} + /** * ep93xx_spi_process_transfer() - processes one SPI transfer * @espi: ep93xx SPI controller struct @@ -556,13 +757,14 @@ static void ep93xx_spi_process_transfer(struct ep93xx_spi *espi, espi->tx = 0; /* - * Now everything is set up for the current transfer. We prime the TX - * FIFO, enable interrupts, and wait for the transfer to complete. + * There is no point of setting up DMA for the transfers which will + * fit into the FIFO and can be transferred with a single interrupt. + * So in these cases we will be using PIO and don't bother for DMA. */ - if (ep93xx_spi_read_write(espi)) { - ep93xx_spi_enable_interrupts(espi); - wait_for_completion(&espi->wait); - } + if (espi->dma_rx && t->len > SPI_FIFO_SIZE) + ep93xx_spi_dma_transfer(espi); + else + ep93xx_spi_pio_transfer(espi); /* * In case of error during transmit, we bail out from processing @@ -571,6 +773,8 @@ static void ep93xx_spi_process_transfer(struct ep93xx_spi *espi, if (msg->status) return; + msg->actual_length += t->len; + /* * After this transfer is finished, perform any possible * post-transfer actions requested by the protocol driver. @@ -752,6 +956,75 @@ static irqreturn_t ep93xx_spi_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } +static bool ep93xx_spi_dma_filter(struct dma_chan *chan, void *filter_param) +{ + if (ep93xx_dma_chan_is_m2p(chan)) + return false; + + chan->private = filter_param; + return true; +} + +static int ep93xx_spi_setup_dma(struct ep93xx_spi *espi) +{ + dma_cap_mask_t mask; + int ret; + + espi->zeropage = (void *)get_zeroed_page(GFP_KERNEL); + if (!espi->zeropage) + return -ENOMEM; + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + espi->dma_rx_data.port = EP93XX_DMA_SSP; + espi->dma_rx_data.direction = DMA_FROM_DEVICE; + espi->dma_rx_data.name = "ep93xx-spi-rx"; + + espi->dma_rx = dma_request_channel(mask, ep93xx_spi_dma_filter, + &espi->dma_rx_data); + if (!espi->dma_rx) { + ret = -ENODEV; + goto fail_free_page; + } + + espi->dma_tx_data.port = EP93XX_DMA_SSP; + espi->dma_tx_data.direction = DMA_TO_DEVICE; + espi->dma_tx_data.name = "ep93xx-spi-tx"; + + espi->dma_tx = dma_request_channel(mask, ep93xx_spi_dma_filter, + &espi->dma_tx_data); + if (!espi->dma_tx) { + ret = -ENODEV; + goto fail_release_rx; + } + + return 0; + +fail_release_rx: + dma_release_channel(espi->dma_rx); + espi->dma_rx = NULL; +fail_free_page: + free_page((unsigned long)espi->zeropage); + + return ret; +} + +static void ep93xx_spi_release_dma(struct ep93xx_spi *espi) +{ + if (espi->dma_rx) { + dma_release_channel(espi->dma_rx); + sg_free_table(&espi->rx_sgt); + } + if (espi->dma_tx) { + dma_release_channel(espi->dma_tx); + sg_free_table(&espi->tx_sgt); + } + + if (espi->zeropage) + free_page((unsigned long)espi->zeropage); +} + static int __init ep93xx_spi_probe(struct platform_device *pdev) { struct spi_master *master; @@ -818,6 +1091,7 @@ static int __init ep93xx_spi_probe(struct platform_device *pdev) goto fail_put_clock; } + espi->sspdr_phys = res->start + SSPDR; espi->regs_base = ioremap(res->start, resource_size(res)); if (!espi->regs_base) { dev_err(&pdev->dev, "failed to map resources\n"); @@ -832,10 +1106,13 @@ static int __init ep93xx_spi_probe(struct platform_device *pdev) goto fail_unmap_regs; } + if (info->use_dma && ep93xx_spi_setup_dma(espi)) + dev_warn(&pdev->dev, "DMA setup failed. Falling back to PIO\n"); + espi->wq = create_singlethread_workqueue("ep93xx_spid"); if (!espi->wq) { dev_err(&pdev->dev, "unable to create workqueue\n"); - goto fail_free_irq; + goto fail_free_dma; } INIT_WORK(&espi->msg_work, ep93xx_spi_work); INIT_LIST_HEAD(&espi->msg_queue); @@ -857,7 +1134,8 @@ static int __init ep93xx_spi_probe(struct platform_device *pdev) fail_free_queue: destroy_workqueue(espi->wq); -fail_free_irq: +fail_free_dma: + ep93xx_spi_release_dma(espi); free_irq(espi->irq, espi); fail_unmap_regs: iounmap(espi->regs_base); @@ -901,6 +1179,7 @@ static int __exit ep93xx_spi_remove(struct platform_device *pdev) } spin_unlock_irq(&espi->lock); + ep93xx_spi_release_dma(espi); free_irq(espi->irq, espi); iounmap(espi->regs_base); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -- cgit v0.10.2 From f016aeb655350ef935ddf336e22cb00452a1c41e Mon Sep 17 00:00:00 2001 From: Tomoya MORINAGA Date: Tue, 7 Jun 2011 14:50:10 +0900 Subject: spi/topcliff_pch: support new device ML7213 IOH Support ML7213 device of OKI SEMICONDUCTOR. ML7213 is companion chip of Intel Atom E6xx series for IVI(In-Vehicle Infotainment). ML7213 is compatible for Intel EG20T PCH. v4: - Delete unrelated whitespace - Prevent device driver from accessing platform data - Add __devinit and __devexit - Save pdev->dev to pd_dev->dev.parent - Have own suspend/resume processing in platform_driver. - Care returned value in pch_spi_init - Change unregister order Signed-off-by: Tomoya MORINAGA Signed-off-by: Grant Likely diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 9578a84..c013481 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -371,12 +371,15 @@ config SPI_TI_SSP serial port. config SPI_TOPCLIFF_PCH - tristate "Topcliff PCH SPI Controller" + tristate "Intel EG20T PCH/OKI SEMICONDUCTOR ML7213 IOH SPI controller" depends on PCI help SPI driver for the Topcliff PCH (Platform Controller Hub) SPI bus used in some x86 embedded processors. + This driver also supports the ML7213, a companion chip for the + Atom E6xx series and compatible with the Intel EG20T PCH. + config SPI_TXX9 tristate "Toshiba TXx9 SPI controller" depends on GENERIC_GPIO && CPU_TX49XX diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c index 79e48d4..be84e3a 100644 --- a/drivers/spi/spi-topcliff-pch.c +++ b/drivers/spi/spi-topcliff-pch.c @@ -26,6 +26,7 @@ #include #include #include +#include /* Register offsets */ #define PCH_SPCR 0x00 /* SPI control register */ @@ -35,6 +36,7 @@ #define PCH_SPDRR 0x10 /* SPI read data register */ #define PCH_SSNXCR 0x18 /* SSN Expand Control Register */ #define PCH_SRST 0x1C /* SPI reset register */ +#define PCH_SPI_ADDRESS_SIZE 0x20 #define PCH_SPSR_TFD 0x000007C0 #define PCH_SPSR_RFD 0x0000F800 @@ -75,7 +77,8 @@ #define SPSR_FI_BIT (1 << 2) #define SPBRR_SIZE_BIT (1 << 10) -#define PCH_ALL (SPCR_TFIE_BIT|SPCR_RFIE_BIT|SPCR_FIE_BIT|SPCR_ORIE_BIT|SPCR_MDFIE_BIT) +#define PCH_ALL (SPCR_TFIE_BIT|SPCR_RFIE_BIT|SPCR_FIE_BIT|\ + SPCR_ORIE_BIT|SPCR_MDFIE_BIT) #define SPCR_RFIC_FIELD 20 #define SPCR_TFIC_FIELD 16 @@ -88,6 +91,16 @@ #define PCH_CLOCK_HZ 50000000 #define PCH_MAX_SPBR 1023 +/* Definition for ML7213 by OKI SEMICONDUCTOR */ +#define PCI_VENDOR_ID_ROHM 0x10DB +#define PCI_DEVICE_ID_ML7213_SPI 0x802c + +/* + * Set the number of SPI instance max + * Intel EG20T PCH : 1ch + * OKI SEMICONDUCTOR ML7213 IOH : 2ch +*/ +#define PCH_SPI_MAX_DEV 2 /** * struct pch_spi_data - Holds the SPI channel specific details @@ -121,6 +134,9 @@ * @cur_trans: The current transfer that this SPI driver is * handling * @board_dat: Reference to the SPI device data structure + * @plat_dev: platform_device structure + * @ch: SPI channel number + * @irq_reg_sts: Status of IRQ registration */ struct pch_spi_data { void __iomem *io_remap_addr; @@ -144,27 +160,33 @@ struct pch_spi_data { struct spi_message *current_msg; struct spi_transfer *cur_trans; struct pch_spi_board_data *board_dat; + struct platform_device *plat_dev; + int ch; + u8 irq_reg_sts; }; /** * struct pch_spi_board_data - Holds the SPI device specific details * @pdev: Pointer to the PCI device - * @irq_reg_sts: Status of IRQ registration - * @pci_req_sts: Status of pci_request_regions * @suspend_sts: Status of suspend - * @data: Pointer to SPI channel data structure + * @num: The number of SPI device instance */ struct pch_spi_board_data { struct pci_dev *pdev; - u8 irq_reg_sts; - u8 pci_req_sts; u8 suspend_sts; - struct pch_spi_data *data; + int num; +}; + +struct pch_pd_dev_save { + int num; + struct platform_device *pd_save[PCH_SPI_MAX_DEV]; + struct pch_spi_board_data *board_dat; }; static struct pci_device_id pch_spi_pcidev_id[] = { - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_GE_SPI)}, - {0,} + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_GE_SPI), 1, }, + { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_SPI), 2, }, + { } }; /** @@ -283,11 +305,11 @@ static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val, static irqreturn_t pch_spi_handler(int irq, void *dev_id) { u32 reg_spsr_val; - struct pch_spi_data *data; void __iomem *spsr; void __iomem *io_remap_addr; irqreturn_t ret = IRQ_NONE; - struct pch_spi_board_data *board_dat = dev_id; + struct pch_spi_data *data = dev_id; + struct pch_spi_board_data *board_dat = data->board_dat; if (board_dat->suspend_sts) { dev_dbg(&board_dat->pdev->dev, @@ -295,7 +317,6 @@ static irqreturn_t pch_spi_handler(int irq, void *dev_id) return IRQ_NONE; } - data = board_dat->data; io_remap_addr = data->io_remap_addr; spsr = io_remap_addr + PCH_SPSR; @@ -868,117 +889,49 @@ static void pch_spi_process_messages(struct work_struct *pwork) } while (data->cur_trans != NULL); } -static void pch_spi_free_resources(struct pch_spi_board_data *board_dat) +static void pch_spi_free_resources(struct pch_spi_board_data *board_dat, + struct pch_spi_data *data) { dev_dbg(&board_dat->pdev->dev, "%s ENTRY\n", __func__); /* free workqueue */ - if (board_dat->data->wk != NULL) { - destroy_workqueue(board_dat->data->wk); - board_dat->data->wk = NULL; + if (data->wk != NULL) { + destroy_workqueue(data->wk); + data->wk = NULL; dev_dbg(&board_dat->pdev->dev, "%s destroy_workqueue invoked successfully\n", __func__); } - - /* disable interrupts & free IRQ */ - if (board_dat->irq_reg_sts) { - /* disable interrupts */ - pch_spi_setclr_reg(board_dat->data->master, PCH_SPCR, 0, - PCH_ALL); - - /* free IRQ */ - free_irq(board_dat->pdev->irq, board_dat); - - dev_dbg(&board_dat->pdev->dev, - "%s free_irq invoked successfully\n", __func__); - - board_dat->irq_reg_sts = false; - } - - /* unmap PCI base address */ - if (board_dat->data->io_remap_addr != 0) { - pci_iounmap(board_dat->pdev, board_dat->data->io_remap_addr); - - board_dat->data->io_remap_addr = 0; - - dev_dbg(&board_dat->pdev->dev, - "%s pci_iounmap invoked successfully\n", __func__); - } - - /* release PCI region */ - if (board_dat->pci_req_sts) { - pci_release_regions(board_dat->pdev); - dev_dbg(&board_dat->pdev->dev, - "%s pci_release_regions invoked successfully\n", - __func__); - board_dat->pci_req_sts = false; - } } -static int pch_spi_get_resources(struct pch_spi_board_data *board_dat) +static int pch_spi_get_resources(struct pch_spi_board_data *board_dat, + struct pch_spi_data *data) { - void __iomem *io_remap_addr; - int retval; + int retval = 0; + dev_dbg(&board_dat->pdev->dev, "%s ENTRY\n", __func__); /* create workqueue */ - board_dat->data->wk = create_singlethread_workqueue(KBUILD_MODNAME); - if (!board_dat->data->wk) { + data->wk = create_singlethread_workqueue(KBUILD_MODNAME); + if (!data->wk) { dev_err(&board_dat->pdev->dev, "%s create_singlet hread_workqueue failed\n", __func__); retval = -EBUSY; goto err_return; } - dev_dbg(&board_dat->pdev->dev, - "%s create_singlethread_workqueue success\n", __func__); - - retval = pci_request_regions(board_dat->pdev, KBUILD_MODNAME); - if (retval != 0) { - dev_err(&board_dat->pdev->dev, - "%s request_region failed\n", __func__); - goto err_return; - } - - board_dat->pci_req_sts = true; - - io_remap_addr = pci_iomap(board_dat->pdev, 1, 0); - if (io_remap_addr == 0) { - dev_err(&board_dat->pdev->dev, - "%s pci_iomap failed\n", __func__); - retval = -ENOMEM; - goto err_return; - } - - /* calculate base address for all channels */ - board_dat->data->io_remap_addr = io_remap_addr; - /* reset PCH SPI h/w */ - pch_spi_reset(board_dat->data->master); + pch_spi_reset(data->master); dev_dbg(&board_dat->pdev->dev, "%s pch_spi_reset invoked successfully\n", __func__); - /* register IRQ */ - retval = request_irq(board_dat->pdev->irq, pch_spi_handler, - IRQF_SHARED, KBUILD_MODNAME, board_dat); - if (retval != 0) { - dev_err(&board_dat->pdev->dev, - "%s request_irq failed\n", __func__); - goto err_return; - } - - dev_dbg(&board_dat->pdev->dev, "%s request_irq returned=%d\n", - __func__, retval); - - board_dat->irq_reg_sts = true; dev_dbg(&board_dat->pdev->dev, "%s data->irq_reg_sts=true\n", __func__); err_return: if (retval != 0) { dev_err(&board_dat->pdev->dev, "%s FAIL:invoking pch_spi_free_resources\n", __func__); - pch_spi_free_resources(board_dat); + pch_spi_free_resources(board_dat, data); } dev_dbg(&board_dat->pdev->dev, "%s Return=%d\n", __func__, retval); @@ -986,255 +939,343 @@ err_return: return retval; } -static int pch_spi_probe(struct pci_dev *pdev, const struct pci_device_id *id) +static int __devinit pch_spi_pd_probe(struct platform_device *plat_dev) { - + int ret; struct spi_master *master; + struct pch_spi_board_data *board_dat = dev_get_platdata(&plat_dev->dev); + struct pch_spi_data *data; - struct pch_spi_board_data *board_dat; - int retval; - - dev_dbg(&pdev->dev, "%s ENTRY\n", __func__); - - /* allocate memory for private data */ - board_dat = kzalloc(sizeof(struct pch_spi_board_data), GFP_KERNEL); - if (board_dat == NULL) { - dev_err(&pdev->dev, - " %s memory allocation for private data failed\n", - __func__); - retval = -ENOMEM; - goto err_kmalloc; - } - - dev_dbg(&pdev->dev, - "%s memory allocation for private data success\n", __func__); - - /* enable PCI device */ - retval = pci_enable_device(pdev); - if (retval != 0) { - dev_err(&pdev->dev, "%s pci_enable_device FAILED\n", __func__); - - goto err_pci_en_device; + master = spi_alloc_master(&board_dat->pdev->dev, + sizeof(struct pch_spi_data)); + if (!master) { + dev_err(&plat_dev->dev, "spi_alloc_master[%d] failed.\n", + plat_dev->id); + return -ENOMEM; } - dev_dbg(&pdev->dev, "%s pci_enable_device returned=%d\n", - __func__, retval); + data = spi_master_get_devdata(master); + data->master = master; - board_dat->pdev = pdev; + platform_set_drvdata(plat_dev, data); - /* alllocate memory for SPI master */ - master = spi_alloc_master(&pdev->dev, sizeof(struct pch_spi_data)); - if (master == NULL) { - retval = -ENOMEM; - dev_err(&pdev->dev, "%s Fail.\n", __func__); - goto err_spi_alloc_master; + /* baseaddress + 0x20(offset) */ + data->io_remap_addr = pci_iomap(board_dat->pdev, 1, 0) + + 0x20 * plat_dev->id; + if (!data->io_remap_addr) { + dev_err(&plat_dev->dev, "%s pci_iomap failed\n", __func__); + ret = -ENOMEM; + goto err_pci_iomap; } - dev_dbg(&pdev->dev, - "%s spi_alloc_master returned non NULL\n", __func__); + dev_dbg(&plat_dev->dev, "[ch%d] remap_addr=%p\n", + plat_dev->id, data->io_remap_addr); /* initialize members of SPI master */ master->bus_num = -1; master->num_chipselect = PCH_MAX_CS; master->setup = pch_spi_setup; master->transfer = pch_spi_transfer; - dev_dbg(&pdev->dev, - "%s transfer member of SPI master initialized\n", __func__); - board_dat->data = spi_master_get_devdata(master); + data->board_dat = board_dat; + data->plat_dev = plat_dev; + data->n_curnt_chip = 255; + data->status = STATUS_RUNNING; + data->ch = plat_dev->id; - board_dat->data->master = master; - board_dat->data->n_curnt_chip = 255; - board_dat->data->board_dat = board_dat; - board_dat->data->status = STATUS_RUNNING; + INIT_LIST_HEAD(&data->queue); + spin_lock_init(&data->lock); + INIT_WORK(&data->work, pch_spi_process_messages); + init_waitqueue_head(&data->wait); - INIT_LIST_HEAD(&board_dat->data->queue); - spin_lock_init(&board_dat->data->lock); - INIT_WORK(&board_dat->data->work, pch_spi_process_messages); - init_waitqueue_head(&board_dat->data->wait); - - /* allocate resources for PCH SPI */ - retval = pch_spi_get_resources(board_dat); - if (retval) { - dev_err(&pdev->dev, "%s fail(retval=%d)\n", __func__, retval); + ret = pch_spi_get_resources(board_dat, data); + if (ret) { + dev_err(&plat_dev->dev, "%s fail(retval=%d)\n", __func__, ret); goto err_spi_get_resources; } - dev_dbg(&pdev->dev, "%s pch_spi_get_resources returned=%d\n", - __func__, retval); - - /* save private data in dev */ - pci_set_drvdata(pdev, board_dat); - dev_dbg(&pdev->dev, "%s invoked pci_set_drvdata\n", __func__); + ret = request_irq(board_dat->pdev->irq, pch_spi_handler, + IRQF_SHARED, KBUILD_MODNAME, data); + if (ret) { + dev_err(&plat_dev->dev, + "%s request_irq failed\n", __func__); + goto err_request_irq; + } + data->irq_reg_sts = true; - /* set master mode */ pch_spi_set_master_mode(master); - dev_dbg(&pdev->dev, - "%s invoked pch_spi_set_master_mode\n", __func__); - /* Register the controller with the SPI core. */ - retval = spi_register_master(master); - if (retval != 0) { - dev_err(&pdev->dev, + ret = spi_register_master(master); + if (ret != 0) { + dev_err(&plat_dev->dev, "%s spi_register_master FAILED\n", __func__); - goto err_spi_reg_master; + goto err_spi_register_master; } - dev_dbg(&pdev->dev, "%s spi_register_master returned=%d\n", - __func__, retval); - - return 0; -err_spi_reg_master: - spi_unregister_master(master); +err_spi_register_master: + free_irq(board_dat->pdev->irq, board_dat); +err_request_irq: + pch_spi_free_resources(board_dat, data); err_spi_get_resources: -err_spi_alloc_master: + pci_iounmap(board_dat->pdev, data->io_remap_addr); +err_pci_iomap: spi_master_put(master); - pci_disable_device(pdev); -err_pci_en_device: - kfree(board_dat); -err_kmalloc: - return retval; + + return ret; } -static void pch_spi_remove(struct pci_dev *pdev) +static int __devexit pch_spi_pd_remove(struct platform_device *plat_dev) { - struct pch_spi_board_data *board_dat = pci_get_drvdata(pdev); + struct pch_spi_board_data *board_dat = dev_get_platdata(&plat_dev->dev); + struct pch_spi_data *data = platform_get_drvdata(plat_dev); int count; - dev_dbg(&pdev->dev, "%s ENTRY\n", __func__); - - if (!board_dat) { - dev_err(&pdev->dev, - "%s pci_get_drvdata returned NULL\n", __func__); - return; - } - + dev_dbg(&plat_dev->dev, "%s:[ch%d] irq=%d\n", + __func__, plat_dev->id, board_dat->pdev->irq); /* check for any pending messages; no action is taken if the queue * is still full; but at least we tried. Unload anyway */ count = 500; - spin_lock(&board_dat->data->lock); - board_dat->data->status = STATUS_EXITING; - while ((list_empty(&board_dat->data->queue) == 0) && --count) { + spin_lock(&data->lock); + data->status = STATUS_EXITING; + while ((list_empty(&data->queue) == 0) && --count) { dev_dbg(&board_dat->pdev->dev, "%s :queue not empty\n", __func__); - spin_unlock(&board_dat->data->lock); + spin_unlock(&data->lock); msleep(PCH_SLEEP_TIME); - spin_lock(&board_dat->data->lock); + spin_lock(&data->lock); } - spin_unlock(&board_dat->data->lock); - - /* Free resources allocated for PCH SPI */ - pch_spi_free_resources(board_dat); - - spi_unregister_master(board_dat->data->master); - - /* free memory for private data */ - kfree(board_dat); + spin_unlock(&data->lock); - pci_set_drvdata(pdev, NULL); + pch_spi_free_resources(board_dat, data); + /* disable interrupts & free IRQ */ + if (data->irq_reg_sts) { + /* disable interrupts */ + pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL); + data->irq_reg_sts = false; + free_irq(board_dat->pdev->irq, data); + } - /* disable PCI device */ - pci_disable_device(pdev); + pci_iounmap(board_dat->pdev, data->io_remap_addr); + spi_unregister_master(data->master); + spi_master_put(data->master); + platform_set_drvdata(plat_dev, NULL); - dev_dbg(&pdev->dev, "%s invoked pci_disable_device\n", __func__); + return 0; } - #ifdef CONFIG_PM -static int pch_spi_suspend(struct pci_dev *pdev, pm_message_t state) +static int pch_spi_pd_suspend(struct platform_device *pd_dev, + pm_message_t state) { u8 count; - int retval; + struct pch_spi_board_data *board_dat = dev_get_platdata(&pd_dev->dev); + struct pch_spi_data *data = platform_get_drvdata(pd_dev); - struct pch_spi_board_data *board_dat = pci_get_drvdata(pdev); - - dev_dbg(&pdev->dev, "%s ENTRY\n", __func__); + dev_dbg(&pd_dev->dev, "%s ENTRY\n", __func__); if (!board_dat) { - dev_err(&pdev->dev, + dev_err(&pd_dev->dev, "%s pci_get_drvdata returned NULL\n", __func__); return -EFAULT; } - retval = 0; - board_dat->suspend_sts = true; - /* check if the current message is processed: Only after thats done the transfer will be suspended */ count = 255; - while ((--count) > 0) { - if (!(board_dat->data->bcurrent_msg_processing)) { - dev_dbg(&pdev->dev, "%s board_dat->data->bCurrent_" - "msg_processing = false\n", __func__); + while ((--count) > 0) + if (!(data->bcurrent_msg_processing)) { break; - } else { - dev_dbg(&pdev->dev, "%s board_dat->data->bCurrent_msg_" - "processing = true\n", __func__); - } msleep(PCH_SLEEP_TIME); } /* Free IRQ */ - if (board_dat->irq_reg_sts) { + if (data->irq_reg_sts) { /* disable all interrupts */ - pch_spi_setclr_reg(board_dat->data->master, PCH_SPCR, 0, - PCH_ALL); - pch_spi_reset(board_dat->data->master); - - free_irq(board_dat->pdev->irq, board_dat); + pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL); + pch_spi_reset(data->master); + free_irq(board_dat->pdev->irq, data); - board_dat->irq_reg_sts = false; - dev_dbg(&pdev->dev, + data->irq_reg_sts = false; + dev_dbg(&pd_dev->dev, "%s free_irq invoked successfully.\n", __func__); } + return 0; +} + +static int pch_spi_pd_resume(struct platform_device *pd_dev) +{ + struct pch_spi_board_data *board_dat = dev_get_platdata(&pd_dev->dev); + struct pch_spi_data *data = platform_get_drvdata(pd_dev); + int retval; + + if (!board_dat) { + dev_err(&pd_dev->dev, + "%s pci_get_drvdata returned NULL\n", __func__); + return -EFAULT; + } + + if (!data->irq_reg_sts) { + /* register IRQ */ + retval = request_irq(board_dat->pdev->irq, pch_spi_handler, + IRQF_SHARED, KBUILD_MODNAME, data); + if (retval < 0) { + dev_err(&pd_dev->dev, + "%s request_irq failed\n", __func__); + return retval; + } + + /* reset PCH SPI h/w */ + pch_spi_reset(data->master); + pch_spi_set_master_mode(data->master); + data->irq_reg_sts = true; + } + return 0; +} +#else +#define pch_spi_pd_suspend NULL +#define pch_spi_pd_resume NULL +#endif + +static struct platform_driver pch_spi_pd_driver = { + .driver = { + .name = "pch-spi", + .owner = THIS_MODULE, + }, + .probe = pch_spi_pd_probe, + .remove = __devexit_p(pch_spi_pd_remove), + .suspend = pch_spi_pd_suspend, + .resume = pch_spi_pd_resume +}; + +static int __devinit pch_spi_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct pch_spi_board_data *board_dat; + struct platform_device *pd_dev = NULL; + int retval; + int i; + struct pch_pd_dev_save *pd_dev_save; + + pd_dev_save = kzalloc(sizeof(struct pch_pd_dev_save), GFP_KERNEL); + if (!pd_dev_save) { + dev_err(&pdev->dev, "%s Can't allocate pd_dev_sav\n", __func__); + return -ENOMEM; + } + + board_dat = kzalloc(sizeof(struct pch_spi_board_data), GFP_KERNEL); + if (!board_dat) { + dev_err(&pdev->dev, "%s Can't allocate board_dat\n", __func__); + retval = -ENOMEM; + goto err_no_mem; + } + + retval = pci_request_regions(pdev, KBUILD_MODNAME); + if (retval) { + dev_err(&pdev->dev, "%s request_region failed\n", __func__); + goto pci_request_regions; + } + + board_dat->pdev = pdev; + board_dat->num = id->driver_data; + pd_dev_save->num = id->driver_data; + pd_dev_save->board_dat = board_dat; + + retval = pci_enable_device(pdev); + if (retval) { + dev_err(&pdev->dev, "%s pci_enable_device failed\n", __func__); + goto pci_enable_device; + } + + for (i = 0; i < board_dat->num; i++) { + pd_dev = platform_device_alloc("pch-spi", i); + if (!pd_dev) { + dev_err(&pdev->dev, "platform_device_alloc failed\n"); + goto err_platform_device; + } + pd_dev_save->pd_save[i] = pd_dev; + pd_dev->dev.parent = &pdev->dev; + + retval = platform_device_add_data(pd_dev, board_dat, + sizeof(*board_dat)); + if (retval) { + dev_err(&pdev->dev, + "platform_device_add_data failed\n"); + platform_device_put(pd_dev); + goto err_platform_device; + } + + retval = platform_device_add(pd_dev); + if (retval) { + dev_err(&pdev->dev, "platform_device_add failed\n"); + platform_device_put(pd_dev); + goto err_platform_device; + } + } + + pci_set_drvdata(pdev, pd_dev_save); + + return 0; + +err_platform_device: + pci_disable_device(pdev); +pci_enable_device: + pci_release_regions(pdev); +pci_request_regions: + kfree(board_dat); +err_no_mem: + kfree(pd_dev_save); + + return retval; +} + +static void __devexit pch_spi_remove(struct pci_dev *pdev) +{ + int i; + struct pch_pd_dev_save *pd_dev_save = pci_get_drvdata(pdev); + + dev_dbg(&pdev->dev, "%s ENTRY:pdev=%p\n", __func__, pdev); + + for (i = 0; i < pd_dev_save->num; i++) + platform_device_unregister(pd_dev_save->pd_save[i]); + + pci_disable_device(pdev); + pci_release_regions(pdev); + kfree(pd_dev_save->board_dat); + kfree(pd_dev_save); +} + +#ifdef CONFIG_PM +static int pch_spi_suspend(struct pci_dev *pdev, pm_message_t state) +{ + int retval; + struct pch_pd_dev_save *pd_dev_save = pci_get_drvdata(pdev); + + dev_dbg(&pdev->dev, "%s ENTRY\n", __func__); + + pd_dev_save->board_dat->suspend_sts = true; + /* save config space */ retval = pci_save_state(pdev); - if (retval == 0) { - dev_dbg(&pdev->dev, "%s pci_save_state returned=%d\n", - __func__, retval); - /* disable PM notifications */ pci_enable_wake(pdev, PCI_D3hot, 0); - dev_dbg(&pdev->dev, - "%s pci_enable_wake invoked successfully\n", __func__); - /* disable PCI device */ pci_disable_device(pdev); - dev_dbg(&pdev->dev, - "%s pci_disable_device invoked successfully\n", - __func__); - /* move device to D3hot state */ pci_set_power_state(pdev, PCI_D3hot); - dev_dbg(&pdev->dev, - "%s pci_set_power_state invoked successfully\n", - __func__); } else { dev_err(&pdev->dev, "%s pci_save_state failed\n", __func__); } - dev_dbg(&pdev->dev, "%s return=%d\n", __func__, retval); - return retval; } static int pch_spi_resume(struct pci_dev *pdev) { int retval; - - struct pch_spi_board_data *board = pci_get_drvdata(pdev); + struct pch_pd_dev_save *pd_dev_save = pci_get_drvdata(pdev); dev_dbg(&pdev->dev, "%s ENTRY\n", __func__); - if (!board) { - dev_err(&pdev->dev, - "%s pci_get_drvdata returned NULL\n", __func__); - return -EFAULT; - } - - /* move device to DO power state */ pci_set_power_state(pdev, PCI_D0); - - /* restore state */ pci_restore_state(pdev); retval = pci_enable_device(pdev); @@ -1242,34 +1283,12 @@ static int pch_spi_resume(struct pci_dev *pdev) dev_err(&pdev->dev, "%s pci_enable_device failed\n", __func__); } else { - /* disable PM notifications */ pci_enable_wake(pdev, PCI_D3hot, 0); - /* register IRQ handler */ - if (!board->irq_reg_sts) { - /* register IRQ */ - retval = request_irq(board->pdev->irq, pch_spi_handler, - IRQF_SHARED, KBUILD_MODNAME, - board); - if (retval < 0) { - dev_err(&pdev->dev, - "%s request_irq failed\n", __func__); - return retval; - } - board->irq_reg_sts = true; - - /* reset PCH SPI h/w */ - pch_spi_reset(board->data->master); - pch_spi_set_master_mode(board->data->master); - - /* set suspend status to false */ - board->suspend_sts = false; - - } + /* set suspend status to false */ + pd_dev_save->board_dat->suspend_sts = false; } - dev_dbg(&pdev->dev, "%s returning=%d\n", __func__, retval); - return retval; } #else @@ -1289,15 +1308,25 @@ static struct pci_driver pch_spi_pcidev = { static int __init pch_spi_init(void) { - return pci_register_driver(&pch_spi_pcidev); + int ret; + ret = platform_driver_register(&pch_spi_pd_driver); + if (ret) + return ret; + + ret = pci_register_driver(&pch_spi_pcidev); + if (ret) + return ret; + + return 0; } module_init(pch_spi_init); static void __exit pch_spi_exit(void) { pci_unregister_driver(&pch_spi_pcidev); + platform_driver_unregister(&pch_spi_pd_driver); } module_exit(pch_spi_exit); MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("Topcliff PCH SPI PCI Driver"); +MODULE_DESCRIPTION("Intel EG20T PCH/OKI SEMICONDUCTOR ML7213 IOH SPI Driver"); -- cgit v0.10.2 From c37f3c2749b53225d36faa5c583203c5f12ae15b Mon Sep 17 00:00:00 2001 From: Tomoya MORINAGA Date: Tue, 7 Jun 2011 14:50:11 +0900 Subject: spi/topcliff_pch: DMA support This patch enables this SPI driver works with DMA mode. Signed-off-by: Tomoya MORINAGA Signed-off-by: Grant Likely diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c index be84e3a..1b8030b 100644 --- a/drivers/spi/spi-topcliff-pch.c +++ b/drivers/spi/spi-topcliff-pch.c @@ -28,6 +28,9 @@ #include #include +#include +#include + /* Register offsets */ #define PCH_SPCR 0x00 /* SPI control register */ #define PCH_SPBRR 0x04 /* SPI baud rate register */ @@ -36,7 +39,7 @@ #define PCH_SPDRR 0x10 /* SPI read data register */ #define PCH_SSNXCR 0x18 /* SSN Expand Control Register */ #define PCH_SRST 0x1C /* SPI reset register */ -#define PCH_SPI_ADDRESS_SIZE 0x20 +#define PCH_ADDRESS_SIZE 0x20 #define PCH_SPSR_TFD 0x000007C0 #define PCH_SPSR_RFD 0x0000F800 @@ -54,8 +57,6 @@ #define STATUS_EXITING 2 #define PCH_SLEEP_TIME 10 -#define PCH_ADDRESS_SIZE 0x20 - #define SSN_LOW 0x02U #define SSN_NO_CONTROL 0x00U #define PCH_MAX_CS 0xFF @@ -75,6 +76,7 @@ #define SPSR_TFI_BIT (1 << 0) #define SPSR_RFI_BIT (1 << 1) #define SPSR_FI_BIT (1 << 2) +#define SPSR_ORF_BIT (1 << 3) #define SPBRR_SIZE_BIT (1 << 10) #define PCH_ALL (SPCR_TFIE_BIT|SPCR_RFIE_BIT|SPCR_FIE_BIT|\ @@ -83,10 +85,9 @@ #define SPCR_RFIC_FIELD 20 #define SPCR_TFIC_FIELD 16 -#define SPSR_INT_BITS 0x1F -#define MASK_SPBRR_SPBR_BITS (~((1 << 10) - 1)) -#define MASK_RFIC_SPCR_BITS (~(0xf << 20)) -#define MASK_TFIC_SPCR_BITS (~(0xf000f << 12)) +#define MASK_SPBRR_SPBR_BITS ((1 << 10) - 1) +#define MASK_RFIC_SPCR_BITS (0xf << SPCR_RFIC_FIELD) +#define MASK_TFIC_SPCR_BITS (0xf << SPCR_TFIC_FIELD) #define PCH_CLOCK_HZ 50000000 #define PCH_MAX_SPBR 1023 @@ -102,6 +103,28 @@ */ #define PCH_SPI_MAX_DEV 2 +#define PCH_BUF_SIZE 4096 +#define PCH_DMA_TRANS_SIZE 12 + +static int use_dma = 1; + +struct pch_spi_dma_ctrl { + struct dma_async_tx_descriptor *desc_tx; + struct dma_async_tx_descriptor *desc_rx; + struct pch_dma_slave param_tx; + struct pch_dma_slave param_rx; + struct dma_chan *chan_tx; + struct dma_chan *chan_rx; + struct scatterlist *sg_tx_p; + struct scatterlist *sg_rx_p; + struct scatterlist sg_tx; + struct scatterlist sg_rx; + int nent; + void *tx_buf_virt; + void *rx_buf_virt; + dma_addr_t tx_buf_dma; + dma_addr_t rx_buf_dma; +}; /** * struct pch_spi_data - Holds the SPI channel specific details * @io_remap_addr: The remapped PCI base address @@ -140,6 +163,7 @@ */ struct pch_spi_data { void __iomem *io_remap_addr; + unsigned long io_base_addr; struct spi_master *master; struct work_struct work; struct workqueue_struct *wk; @@ -162,6 +186,8 @@ struct pch_spi_data { struct pch_spi_board_data *board_dat; struct platform_device *plat_dev; int ch; + struct pch_spi_dma_ctrl dma; + int use_dma; u8 irq_reg_sts; }; @@ -273,10 +299,10 @@ static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val, reg_spcr_val &= ~SPCR_RFIE_BIT; /* disable RFI */ /* reset rx threshold */ - reg_spcr_val &= MASK_RFIC_SPCR_BITS; + reg_spcr_val &= ~MASK_RFIC_SPCR_BITS; reg_spcr_val |= (PCH_RX_THOLD_MAX << SPCR_RFIC_FIELD); - iowrite32(((reg_spcr_val) &= (~(SPCR_RFIE_BIT))), - (io_remap_addr + PCH_SPCR)); + + iowrite32(reg_spcr_val, (io_remap_addr + PCH_SPCR)); } /* update counts */ @@ -287,12 +313,15 @@ static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val, /* if transfer complete interrupt */ if (reg_spsr_val & SPSR_FI_BIT) { - /* disable FI & RFI interrupts */ - pch_spi_setclr_reg(data->master, PCH_SPCR, 0, - SPCR_FIE_BIT | SPCR_RFIE_BIT); + if (tx_index < bpw_len) + dev_err(&data->master->dev, + "%s : Transfer is not completed", __func__); + /* disable interrupts */ + pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL); /* transfer is completed;inform pch_spi_process_messages */ data->transfer_complete = true; + data->transfer_active = false; wake_up(&data->wait); } } @@ -316,12 +345,17 @@ static irqreturn_t pch_spi_handler(int irq, void *dev_id) "%s returning due to suspend\n", __func__); return IRQ_NONE; } + if (data->use_dma) + return IRQ_NONE; io_remap_addr = data->io_remap_addr; spsr = io_remap_addr + PCH_SPSR; reg_spsr_val = ioread32(spsr); + if (reg_spsr_val & SPSR_ORF_BIT) + dev_err(&board_dat->pdev->dev, "%s Over run error", __func__); + /* Check if the interrupt is for SPI device */ if (reg_spsr_val & (SPSR_FI_BIT | SPSR_RFI_BIT)) { pch_spi_handler_sub(data, reg_spsr_val, io_remap_addr); @@ -347,7 +381,7 @@ static void pch_spi_set_baud_rate(struct spi_master *master, u32 speed_hz) if (n_spbr > PCH_MAX_SPBR) n_spbr = PCH_MAX_SPBR; - pch_spi_setclr_reg(master, PCH_SPBRR, n_spbr, ~MASK_SPBRR_SPBR_BITS); + pch_spi_setclr_reg(master, PCH_SPBRR, n_spbr, MASK_SPBRR_SPBR_BITS); } /** @@ -456,26 +490,27 @@ static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg) dev_dbg(&pspi->dev, "%s Transfer List not empty. " "Transfer Speed is set.\n", __func__); + spin_lock_irqsave(&data->lock, flags); /* validate Tx/Rx buffers and Transfer length */ list_for_each_entry(transfer, &pmsg->transfers, transfer_list) { if (!transfer->tx_buf && !transfer->rx_buf) { dev_err(&pspi->dev, "%s Tx and Rx buffer NULL\n", __func__); retval = -EINVAL; - goto err_out; + goto err_return_spinlock; } if (!transfer->len) { dev_err(&pspi->dev, "%s Transfer length invalid\n", __func__); retval = -EINVAL; - goto err_out; + goto err_return_spinlock; } dev_dbg(&pspi->dev, "%s Tx/Rx buffer valid. Transfer length" " valid\n", __func__); - /* if baud rate hs been specified validate the same */ + /* if baud rate has been specified validate the same */ if (transfer->speed_hz > PCH_MAX_BAUDRATE) transfer->speed_hz = PCH_MAX_BAUDRATE; @@ -486,25 +521,24 @@ static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg) retval = -EINVAL; dev_err(&pspi->dev, "%s Invalid bits per word\n", __func__); - goto err_out; + goto err_return_spinlock; } } } - - spin_lock_irqsave(&data->lock, flags); + spin_unlock_irqrestore(&data->lock, flags); /* We won't process any messages if we have been asked to terminate */ if (data->status == STATUS_EXITING) { dev_err(&pspi->dev, "%s status = STATUS_EXITING.\n", __func__); retval = -ESHUTDOWN; - goto err_return_spinlock; + goto err_out; } /* If suspended ,return -EINVAL */ if (data->board_dat->suspend_sts) { dev_err(&pspi->dev, "%s suspend; returning EINVAL\n", __func__); retval = -EINVAL; - goto err_return_spinlock; + goto err_out; } /* set status of message */ @@ -512,9 +546,11 @@ static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg) dev_dbg(&pspi->dev, "%s - pmsg->status =%d\n", __func__, pmsg->status); pmsg->status = -EINPROGRESS; - + spin_lock_irqsave(&data->lock, flags); /* add message to queue */ list_add_tail(&pmsg->queue, &data->queue); + spin_unlock_irqrestore(&data->lock, flags); + dev_dbg(&pspi->dev, "%s - Invoked list_add_tail\n", __func__); /* schedule work queue to run */ @@ -523,11 +559,13 @@ static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg) retval = 0; -err_return_spinlock: - spin_unlock_irqrestore(&data->lock, flags); err_out: dev_dbg(&pspi->dev, "%s RETURN=%d\n", __func__, retval); return retval; +err_return_spinlock: + dev_dbg(&pspi->dev, "%s RETURN=%d\n", __func__, retval); + spin_unlock_irqrestore(&data->lock, flags); + return retval; } static inline void pch_spi_select_chip(struct pch_spi_data *data, @@ -548,8 +586,7 @@ static inline void pch_spi_select_chip(struct pch_spi_data *data, pch_spi_setup_transfer(pspi); } -static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw, - struct spi_message **ppmsg) +static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw) { int size; u32 n_writes; @@ -558,8 +595,6 @@ static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw, const u8 *tx_buf; const u16 *tx_sbuf; - pmsg = *ppmsg; - /* set baud rate if needed */ if (data->cur_trans->speed_hz) { dev_dbg(&data->master->dev, "%s:setting baud rate\n", __func__); @@ -642,10 +677,9 @@ static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw, data->transfer_active = true; } - -static void pch_spi_nomore_transfer(struct pch_spi_data *data, - struct spi_message *pmsg) +static void pch_spi_nomore_transfer(struct pch_spi_data *data) { + struct spi_message *pmsg; dev_dbg(&data->master->dev, "%s called\n", __func__); /* Invoke complete callback * [To the spi core..indicating end of transfer] */ @@ -696,29 +730,21 @@ static void pch_spi_nomore_transfer(struct pch_spi_data *data, static void pch_spi_set_ir(struct pch_spi_data *data) { - /* enable interrupts */ - if ((data->bpw_len) > PCH_MAX_FIFO_DEPTH) { + /* enable interrupts, set threshold, enable SPI */ + if ((data->bpw_len) > PCH_MAX_FIFO_DEPTH) /* set receive threshold to PCH_RX_THOLD */ pch_spi_setclr_reg(data->master, PCH_SPCR, - PCH_RX_THOLD << SPCR_RFIC_FIELD, - ~MASK_RFIC_SPCR_BITS); - /* enable FI and RFI interrupts */ - pch_spi_setclr_reg(data->master, PCH_SPCR, - SPCR_RFIE_BIT | SPCR_FIE_BIT, 0); - } else { + PCH_RX_THOLD << SPCR_RFIC_FIELD | + SPCR_FIE_BIT | SPCR_RFIE_BIT | + SPCR_ORIE_BIT | SPCR_SPE_BIT, + MASK_RFIC_SPCR_BITS | PCH_ALL); + else /* set receive threshold to maximum */ pch_spi_setclr_reg(data->master, PCH_SPCR, - PCH_RX_THOLD_MAX << SPCR_TFIC_FIELD, - ~MASK_TFIC_SPCR_BITS); - /* enable FI interrupt */ - pch_spi_setclr_reg(data->master, PCH_SPCR, SPCR_FIE_BIT, 0); - } - - dev_dbg(&data->master->dev, - "%s:invoking pch_spi_set_enable to enable SPI\n", __func__); - - /* SPI set enable */ - pch_spi_setclr_reg(data->current_chip->master, PCH_SPCR, SPCR_SPE_BIT, 0); + PCH_RX_THOLD_MAX << SPCR_RFIC_FIELD | + SPCR_FIE_BIT | SPCR_ORIE_BIT | + SPCR_SPE_BIT, + MASK_RFIC_SPCR_BITS | PCH_ALL); /* Wait until the transfer completes; go to sleep after initiating the transfer. */ @@ -731,15 +757,13 @@ static void pch_spi_set_ir(struct pch_spi_data *data) dev_dbg(&data->master->dev, "%s:no more control over SSN-writing 0 to SSNXCR.", __func__); - data->transfer_active = false; - dev_dbg(&data->master->dev, - "%s set data->transfer_active = false\n", __func__); - /* clear all interrupts */ pch_spi_writereg(data->master, PCH_SPSR, pch_spi_readreg(data->master, PCH_SPSR)); - /* disable interrupts */ - pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL); + /* Disable interrupts and SPI transfer */ + pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL | SPCR_SPE_BIT); + /* clear FIFO */ + pch_spi_clear_fifo(data->master); } static void pch_spi_copy_rx_data(struct pch_spi_data *data, int bpw) @@ -763,6 +787,327 @@ static void pch_spi_copy_rx_data(struct pch_spi_data *data, int bpw) } } +static void pch_spi_copy_rx_data_for_dma(struct pch_spi_data *data, int bpw) +{ + int j; + u8 *rx_buf; + u16 *rx_sbuf; + const u8 *rx_dma_buf; + const u16 *rx_dma_sbuf; + + /* copy Rx Data */ + if (!data->cur_trans->rx_buf) + return; + + if (bpw == 8) { + rx_buf = data->cur_trans->rx_buf; + rx_dma_buf = data->dma.rx_buf_virt; + for (j = 0; j < data->bpw_len; j++) + *rx_buf++ = *rx_dma_buf++ & 0xFF; + } else { + rx_sbuf = data->cur_trans->rx_buf; + rx_dma_sbuf = data->dma.rx_buf_virt; + for (j = 0; j < data->bpw_len; j++) + *rx_sbuf++ = *rx_dma_sbuf++; + } +} + +static void pch_spi_start_transfer(struct pch_spi_data *data) +{ + struct pch_spi_dma_ctrl *dma; + unsigned long flags; + + dma = &data->dma; + + spin_lock_irqsave(&data->lock, flags); + + /* disable interrupts, SPI set enable */ + pch_spi_setclr_reg(data->master, PCH_SPCR, SPCR_SPE_BIT, PCH_ALL); + + spin_unlock_irqrestore(&data->lock, flags); + + /* Wait until the transfer completes; go to sleep after + initiating the transfer. */ + dev_dbg(&data->master->dev, + "%s:waiting for transfer to get over\n", __func__); + wait_event_interruptible(data->wait, data->transfer_complete); + + dma_sync_sg_for_cpu(&data->master->dev, dma->sg_rx_p, dma->nent, + DMA_FROM_DEVICE); + async_tx_ack(dma->desc_rx); + async_tx_ack(dma->desc_tx); + kfree(dma->sg_tx_p); + kfree(dma->sg_rx_p); + + spin_lock_irqsave(&data->lock, flags); + pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL); + dev_dbg(&data->master->dev, + "%s:no more control over SSN-writing 0 to SSNXCR.", __func__); + + /* clear fifo threshold, disable interrupts, disable SPI transfer */ + pch_spi_setclr_reg(data->master, PCH_SPCR, 0, + MASK_RFIC_SPCR_BITS | MASK_TFIC_SPCR_BITS | PCH_ALL | + SPCR_SPE_BIT); + /* clear all interrupts */ + pch_spi_writereg(data->master, PCH_SPSR, + pch_spi_readreg(data->master, PCH_SPSR)); + /* clear FIFO */ + pch_spi_clear_fifo(data->master); + + spin_unlock_irqrestore(&data->lock, flags); +} + +static void pch_dma_rx_complete(void *arg) +{ + struct pch_spi_data *data = arg; + + /* transfer is completed;inform pch_spi_process_messages_dma */ + data->transfer_complete = true; + wake_up_interruptible(&data->wait); +} + +static bool pch_spi_filter(struct dma_chan *chan, void *slave) +{ + struct pch_dma_slave *param = slave; + + if ((chan->chan_id == param->chan_id) && + (param->dma_dev == chan->device->dev)) { + chan->private = param; + return true; + } else { + return false; + } +} + +static void pch_spi_request_dma(struct pch_spi_data *data, int bpw) +{ + dma_cap_mask_t mask; + struct dma_chan *chan; + struct pci_dev *dma_dev; + struct pch_dma_slave *param; + struct pch_spi_dma_ctrl *dma; + unsigned int width; + + if (bpw == 8) + width = PCH_DMA_WIDTH_1_BYTE; + else + width = PCH_DMA_WIDTH_2_BYTES; + + dma = &data->dma; + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + /* Get DMA's dev information */ + dma_dev = pci_get_bus_and_slot(2, PCI_DEVFN(12, 0)); + + /* Set Tx DMA */ + param = &dma->param_tx; + param->dma_dev = &dma_dev->dev; + param->chan_id = data->master->bus_num * 2; /* Tx = 0, 2 */ + param->tx_reg = data->io_base_addr + PCH_SPDWR; + param->width = width; + chan = dma_request_channel(mask, pch_spi_filter, param); + if (!chan) { + dev_err(&data->master->dev, + "ERROR: dma_request_channel FAILS(Tx)\n"); + data->use_dma = 0; + return; + } + dma->chan_tx = chan; + + /* Set Rx DMA */ + param = &dma->param_rx; + param->dma_dev = &dma_dev->dev; + param->chan_id = data->master->bus_num * 2 + 1; /* Rx = Tx + 1 */ + param->rx_reg = data->io_base_addr + PCH_SPDRR; + param->width = width; + chan = dma_request_channel(mask, pch_spi_filter, param); + if (!chan) { + dev_err(&data->master->dev, + "ERROR: dma_request_channel FAILS(Rx)\n"); + dma_release_channel(dma->chan_tx); + dma->chan_tx = NULL; + data->use_dma = 0; + return; + } + dma->chan_rx = chan; +} + +static void pch_spi_release_dma(struct pch_spi_data *data) +{ + struct pch_spi_dma_ctrl *dma; + + dma = &data->dma; + if (dma->chan_tx) { + dma_release_channel(dma->chan_tx); + dma->chan_tx = NULL; + } + if (dma->chan_rx) { + dma_release_channel(dma->chan_rx); + dma->chan_rx = NULL; + } + return; +} + +static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw) +{ + const u8 *tx_buf; + const u16 *tx_sbuf; + u8 *tx_dma_buf; + u16 *tx_dma_sbuf; + struct scatterlist *sg; + struct dma_async_tx_descriptor *desc_tx; + struct dma_async_tx_descriptor *desc_rx; + int num; + int i; + int size; + int rem; + unsigned long flags; + struct pch_spi_dma_ctrl *dma; + + dma = &data->dma; + + /* set baud rate if needed */ + if (data->cur_trans->speed_hz) { + dev_dbg(&data->master->dev, "%s:setting baud rate\n", __func__); + spin_lock_irqsave(&data->lock, flags); + pch_spi_set_baud_rate(data->master, data->cur_trans->speed_hz); + spin_unlock_irqrestore(&data->lock, flags); + } + + /* set bits per word if needed */ + if (data->cur_trans->bits_per_word && + (data->current_msg->spi->bits_per_word != + data->cur_trans->bits_per_word)) { + dev_dbg(&data->master->dev, "%s:set bits per word\n", __func__); + spin_lock_irqsave(&data->lock, flags); + pch_spi_set_bits_per_word(data->master, + data->cur_trans->bits_per_word); + spin_unlock_irqrestore(&data->lock, flags); + *bpw = data->cur_trans->bits_per_word; + } else { + *bpw = data->current_msg->spi->bits_per_word; + } + data->bpw_len = data->cur_trans->len / (*bpw / 8); + + /* copy Tx Data */ + if (data->cur_trans->tx_buf != NULL) { + if (*bpw == 8) { + tx_buf = data->cur_trans->tx_buf; + tx_dma_buf = dma->tx_buf_virt; + for (i = 0; i < data->bpw_len; i++) + *tx_dma_buf++ = *tx_buf++; + } else { + tx_sbuf = data->cur_trans->tx_buf; + tx_dma_sbuf = dma->tx_buf_virt; + for (i = 0; i < data->bpw_len; i++) + *tx_dma_sbuf++ = *tx_sbuf++; + } + } + if (data->bpw_len > PCH_DMA_TRANS_SIZE) { + num = data->bpw_len / PCH_DMA_TRANS_SIZE + 1; + size = PCH_DMA_TRANS_SIZE; + rem = data->bpw_len % PCH_DMA_TRANS_SIZE; + } else { + num = 1; + size = data->bpw_len; + rem = data->bpw_len; + } + dev_dbg(&data->master->dev, "%s num=%d size=%d rem=%d\n", + __func__, num, size, rem); + spin_lock_irqsave(&data->lock, flags); + + /* set receive fifo threshold and transmit fifo threshold */ + pch_spi_setclr_reg(data->master, PCH_SPCR, + ((size - 1) << SPCR_RFIC_FIELD) | + ((PCH_MAX_FIFO_DEPTH - PCH_DMA_TRANS_SIZE) << + SPCR_TFIC_FIELD), + MASK_RFIC_SPCR_BITS | MASK_TFIC_SPCR_BITS); + + spin_unlock_irqrestore(&data->lock, flags); + + /* RX */ + dma->sg_rx_p = kzalloc(sizeof(struct scatterlist)*num, GFP_ATOMIC); + sg_init_table(dma->sg_rx_p, num); /* Initialize SG table */ + /* offset, length setting */ + sg = dma->sg_rx_p; + for (i = 0; i < num; i++, sg++) { + if (i == 0) { + sg->offset = 0; + sg_set_page(sg, virt_to_page(dma->rx_buf_virt), rem, + sg->offset); + sg_dma_len(sg) = rem; + } else { + sg->offset = rem + size * (i - 1); + sg->offset = sg->offset * (*bpw / 8); + sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size, + sg->offset); + sg_dma_len(sg) = size; + } + sg_dma_address(sg) = dma->rx_buf_dma + sg->offset; + } + sg = dma->sg_rx_p; + desc_rx = dma->chan_rx->device->device_prep_slave_sg(dma->chan_rx, sg, + num, DMA_FROM_DEVICE, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc_rx) { + dev_err(&data->master->dev, "%s:device_prep_slave_sg Failed\n", + __func__); + return; + } + dma_sync_sg_for_device(&data->master->dev, sg, num, DMA_FROM_DEVICE); + desc_rx->callback = pch_dma_rx_complete; + desc_rx->callback_param = data; + dma->nent = num; + dma->desc_rx = desc_rx; + + /* TX */ + dma->sg_tx_p = kzalloc(sizeof(struct scatterlist)*num, GFP_ATOMIC); + sg_init_table(dma->sg_tx_p, num); /* Initialize SG table */ + /* offset, length setting */ + sg = dma->sg_tx_p; + for (i = 0; i < num; i++, sg++) { + if (i == 0) { + sg->offset = 0; + sg_set_page(sg, virt_to_page(dma->tx_buf_virt), rem, + sg->offset); + sg_dma_len(sg) = rem; + } else { + sg->offset = rem + size * (i - 1); + sg->offset = sg->offset * (*bpw / 8); + sg_set_page(sg, virt_to_page(dma->tx_buf_virt), size, + sg->offset); + sg_dma_len(sg) = size; + } + sg_dma_address(sg) = dma->tx_buf_dma + sg->offset; + } + sg = dma->sg_tx_p; + desc_tx = dma->chan_tx->device->device_prep_slave_sg(dma->chan_tx, + sg, num, DMA_TO_DEVICE, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc_tx) { + dev_err(&data->master->dev, "%s:device_prep_slave_sg Failed\n", + __func__); + return; + } + dma_sync_sg_for_device(&data->master->dev, sg, num, DMA_TO_DEVICE); + desc_tx->callback = NULL; + desc_tx->callback_param = data; + dma->nent = num; + dma->desc_tx = desc_tx; + + dev_dbg(&data->master->dev, "\n%s:Pulling down SSN low - writing " + "0x2 to SSNXCR\n", __func__); + + spin_lock_irqsave(&data->lock, flags); + pch_spi_writereg(data->master, PCH_SSNXCR, SSN_LOW); + desc_rx->tx_submit(desc_rx); + desc_tx->tx_submit(desc_tx); + spin_unlock_irqrestore(&data->lock, flags); + + /* reset transfer complete flag */ + data->transfer_complete = false; +} static void pch_spi_process_messages(struct work_struct *pwork) { @@ -774,13 +1119,10 @@ static void pch_spi_process_messages(struct work_struct *pwork) dev_dbg(&data->master->dev, "%s data initialized\n", __func__); spin_lock(&data->lock); - /* check if suspend has been initiated;if yes flush queue */ if (data->board_dat->suspend_sts || (data->status == STATUS_EXITING)) { - dev_dbg(&data->master->dev, - "%s suspend/remove initiated,flushing queue\n", - __func__); - + dev_dbg(&data->master->dev, "%s suspend/remove initiated," + "flushing queue\n", __func__); list_for_each_entry(pmsg, data->queue.next, queue) { pmsg->status = -EIO; @@ -814,53 +1156,42 @@ static void pch_spi_process_messages(struct work_struct *pwork) spin_unlock(&data->lock); + if (data->use_dma) + pch_spi_request_dma(data, + data->current_msg->spi->bits_per_word); do { /* If we are already processing a message get the next transfer structure from the message otherwise retrieve the 1st transfer request from the message. */ spin_lock(&data->lock); - if (data->cur_trans == NULL) { data->cur_trans = - list_entry(data->current_msg->transfers. - next, struct spi_transfer, - transfer_list); - dev_dbg(&data->master->dev, - "%s :Getting 1st transfer message\n", __func__); + list_entry(data->current_msg->transfers.next, + struct spi_transfer, transfer_list); + dev_dbg(&data->master->dev, "%s " + ":Getting 1st transfer message\n", __func__); } else { data->cur_trans = - list_entry(data->cur_trans->transfer_list.next, - struct spi_transfer, - transfer_list); - dev_dbg(&data->master->dev, - "%s :Getting next transfer message\n", - __func__); + list_entry(data->cur_trans->transfer_list.next, + struct spi_transfer, transfer_list); + dev_dbg(&data->master->dev, "%s " + ":Getting next transfer message\n", __func__); } - spin_unlock(&data->lock); - pch_spi_set_tx(data, &bpw, &pmsg); - - /* Control interrupt*/ - pch_spi_set_ir(data); - - /* Disable SPI transfer */ - pch_spi_setclr_reg(data->current_chip->master, PCH_SPCR, 0, - SPCR_SPE_BIT); - - /* clear FIFO */ - pch_spi_clear_fifo(data->master); - - /* copy Rx Data */ - pch_spi_copy_rx_data(data, bpw); - - /* free memory */ - kfree(data->pkt_rx_buff); - data->pkt_rx_buff = NULL; - - kfree(data->pkt_tx_buff); - data->pkt_tx_buff = NULL; - + if (data->use_dma) { + pch_spi_handle_dma(data, &bpw); + pch_spi_start_transfer(data); + pch_spi_copy_rx_data_for_dma(data, bpw); + } else { + pch_spi_set_tx(data, &bpw); + pch_spi_set_ir(data); + pch_spi_copy_rx_data(data, bpw); + kfree(data->pkt_rx_buff); + data->pkt_rx_buff = NULL; + kfree(data->pkt_tx_buff); + data->pkt_tx_buff = NULL; + } /* increment message count */ data->current_msg->actual_length += data->cur_trans->len; @@ -881,12 +1212,15 @@ static void pch_spi_process_messages(struct work_struct *pwork) /* No more transfer in this message. */ if ((data->cur_trans->transfer_list.next) == &(data->current_msg->transfers)) { - pch_spi_nomore_transfer(data, pmsg); + pch_spi_nomore_transfer(data); } spin_unlock(&data->lock); } while (data->cur_trans != NULL); + + if (data->use_dma) + pch_spi_release_dma(data); } static void pch_spi_free_resources(struct pch_spi_board_data *board_dat, @@ -939,6 +1273,35 @@ err_return: return retval; } +static void pch_free_dma_buf(struct pch_spi_board_data *board_dat, + struct pch_spi_data *data) +{ + struct pch_spi_dma_ctrl *dma; + + dma = &data->dma; + if (dma->tx_buf_dma) + dma_free_coherent(&board_dat->pdev->dev, PCH_BUF_SIZE, + dma->tx_buf_virt, dma->tx_buf_dma); + if (dma->rx_buf_dma) + dma_free_coherent(&board_dat->pdev->dev, PCH_BUF_SIZE, + dma->rx_buf_virt, dma->rx_buf_dma); + return; +} + +static void pch_alloc_dma_buf(struct pch_spi_board_data *board_dat, + struct pch_spi_data *data) +{ + struct pch_spi_dma_ctrl *dma; + + dma = &data->dma; + /* Get Consistent memory for Tx DMA */ + dma->tx_buf_virt = dma_alloc_coherent(&board_dat->pdev->dev, + PCH_BUF_SIZE, &dma->tx_buf_dma, GFP_KERNEL); + /* Get Consistent memory for Rx DMA */ + dma->rx_buf_virt = dma_alloc_coherent(&board_dat->pdev->dev, + PCH_BUF_SIZE, &dma->rx_buf_dma, GFP_KERNEL); +} + static int __devinit pch_spi_pd_probe(struct platform_device *plat_dev) { int ret; @@ -946,6 +1309,8 @@ static int __devinit pch_spi_pd_probe(struct platform_device *plat_dev) struct pch_spi_board_data *board_dat = dev_get_platdata(&plat_dev->dev); struct pch_spi_data *data; + dev_dbg(&plat_dev->dev, "%s:debug\n", __func__); + master = spi_alloc_master(&board_dat->pdev->dev, sizeof(struct pch_spi_data)); if (!master) { @@ -959,9 +1324,11 @@ static int __devinit pch_spi_pd_probe(struct platform_device *plat_dev) platform_set_drvdata(plat_dev, data); - /* baseaddress + 0x20(offset) */ + /* baseaddress + address offset) */ + data->io_base_addr = pci_resource_start(board_dat->pdev, 1) + + PCH_ADDRESS_SIZE * plat_dev->id; data->io_remap_addr = pci_iomap(board_dat->pdev, 1, 0) + - 0x20 * plat_dev->id; + PCH_ADDRESS_SIZE * plat_dev->id; if (!data->io_remap_addr) { dev_err(&plat_dev->dev, "%s pci_iomap failed\n", __func__); ret = -ENOMEM; @@ -982,6 +1349,7 @@ static int __devinit pch_spi_pd_probe(struct platform_device *plat_dev) data->n_curnt_chip = 255; data->status = STATUS_RUNNING; data->ch = plat_dev->id; + data->use_dma = use_dma; INIT_LIST_HEAD(&data->queue); spin_lock_init(&data->lock); @@ -1012,6 +1380,11 @@ static int __devinit pch_spi_pd_probe(struct platform_device *plat_dev) goto err_spi_register_master; } + if (use_dma) { + dev_info(&plat_dev->dev, "Use DMA for data transfers\n"); + pch_alloc_dma_buf(board_dat, data); + } + return 0; err_spi_register_master: @@ -1031,22 +1404,27 @@ static int __devexit pch_spi_pd_remove(struct platform_device *plat_dev) struct pch_spi_board_data *board_dat = dev_get_platdata(&plat_dev->dev); struct pch_spi_data *data = platform_get_drvdata(plat_dev); int count; + unsigned long flags; dev_dbg(&plat_dev->dev, "%s:[ch%d] irq=%d\n", __func__, plat_dev->id, board_dat->pdev->irq); + + if (use_dma) + pch_free_dma_buf(board_dat, data); + /* check for any pending messages; no action is taken if the queue * is still full; but at least we tried. Unload anyway */ count = 500; - spin_lock(&data->lock); + spin_lock_irqsave(&data->lock, flags); data->status = STATUS_EXITING; while ((list_empty(&data->queue) == 0) && --count) { dev_dbg(&board_dat->pdev->dev, "%s :queue not empty\n", __func__); - spin_unlock(&data->lock); + spin_unlock_irqrestore(&data->lock, flags); msleep(PCH_SLEEP_TIME); - spin_lock(&data->lock); + spin_lock_irqsave(&data->lock, flags); } - spin_unlock(&data->lock); + spin_unlock_irqrestore(&data->lock, flags); pch_spi_free_resources(board_dat, data); /* disable interrupts & free IRQ */ @@ -1083,8 +1461,8 @@ static int pch_spi_pd_suspend(struct platform_device *pd_dev, /* check if the current message is processed: Only after thats done the transfer will be suspended */ count = 255; - while ((--count) > 0) - if (!(data->bcurrent_msg_processing)) { + while ((--count) > 0) { + if (!(data->bcurrent_msg_processing)) break; msleep(PCH_SLEEP_TIME); } @@ -1328,5 +1706,9 @@ static void __exit pch_spi_exit(void) } module_exit(pch_spi_exit); +module_param(use_dma, int, 0644); +MODULE_PARM_DESC(use_dma, + "to use DMA for data transfers pass 1 else 0; default 1"); + MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Intel EG20T PCH/OKI SEMICONDUCTOR ML7213 IOH SPI Driver"); -- cgit v0.10.2 From 8e2943c04c74e537c762c09bcea89e923510a9ac Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Fri, 10 Jun 2011 18:11:25 -0700 Subject: spi: Convert uses of struct resource * to resource_size(ptr) Done via coccinelle scripts like: @@ struct resource *ptr; @@ - ptr->end - ptr->start + 1 + resource_size(ptr) and some grep and typing. Mostly uncompiled, no cross-compilers. Signed-off-by: Joe Perches Signed-off-by: Grant Likely diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c index fcff810..03019bf 100644 --- a/drivers/spi/spi-ath79.c +++ b/drivers/spi/spi-ath79.c @@ -232,7 +232,7 @@ static __devinit int ath79_spi_probe(struct platform_device *pdev) goto err_put_master; } - sp->base = ioremap(r->start, r->end - r->start + 1); + sp->base = ioremap(r->start, resource_size(r)); if (!sp->base) { ret = -ENXIO; goto err_put_master; diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c index 969cdd2..fde3a2d 100644 --- a/drivers/spi/spi-omap2-mcspi.c +++ b/drivers/spi/spi-omap2-mcspi.c @@ -1116,8 +1116,8 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev) status = -ENODEV; goto err1; } - if (!request_mem_region(r->start, (r->end - r->start) + 1, - dev_name(&pdev->dev))) { + if (!request_mem_region(r->start, resource_size(r), + dev_name(&pdev->dev))) { status = -EBUSY; goto err1; } @@ -1125,7 +1125,7 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev) r->start += pdata->regs_offset; r->end += pdata->regs_offset; mcspi->phys = r->start; - mcspi->base = ioremap(r->start, r->end - r->start + 1); + mcspi->base = ioremap(r->start, resource_size(r)); if (!mcspi->base) { dev_dbg(&pdev->dev, "can't ioremap MCSPI\n"); status = -ENOMEM; @@ -1190,7 +1190,7 @@ err4: err3: kfree(mcspi->dma_channels); err2: - release_mem_region(r->start, (r->end - r->start) + 1); + release_mem_region(r->start, resource_size(r)); iounmap(mcspi->base); err1: return status; @@ -1210,7 +1210,7 @@ static int __exit omap2_mcspi_remove(struct platform_device *pdev) omap2_mcspi_disable_clocks(mcspi); r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - release_mem_region(r->start, (r->end - r->start) + 1); + release_mem_region(r->start, resource_size(r)); base = mcspi->base; spi_unregister_master(master); diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c index d482628..9421a39 100644 --- a/drivers/spi/spi-orion.c +++ b/drivers/spi/spi-orion.c @@ -489,7 +489,7 @@ static int __init orion_spi_probe(struct platform_device *pdev) goto out; } - if (!request_mem_region(r->start, (r->end - r->start) + 1, + if (!request_mem_region(r->start, resource_size(r), dev_name(&pdev->dev))) { status = -EBUSY; goto out; @@ -511,7 +511,7 @@ static int __init orion_spi_probe(struct platform_device *pdev) return status; out_rel_mem: - release_mem_region(r->start, (r->end - r->start) + 1); + release_mem_region(r->start, resource_size(r)); out: spi_master_put(master); @@ -531,7 +531,7 @@ static int __exit orion_spi_remove(struct platform_device *pdev) cancel_work_sync(&spi->work); r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - release_mem_region(r->start, (r->end - r->start) + 1); + release_mem_region(r->start, resource_size(r)); spi_unregister_master(master); diff --git a/drivers/spi/spi-ppc4xx.c b/drivers/spi/spi-ppc4xx.c index 2a298c0..b267fd9 100644 --- a/drivers/spi/spi-ppc4xx.c +++ b/drivers/spi/spi-ppc4xx.c @@ -502,7 +502,7 @@ static int __init spi_ppc4xx_of_probe(struct platform_device *op) goto free_gpios; } hw->mapbase = resource.start; - hw->mapsize = resource.end - resource.start + 1; + hw->mapsize = resource_size(&resource); /* Sanity check */ if (hw->mapsize < sizeof(struct spi_ppc4xx_regs)) { diff --git a/drivers/spi/spi-tegra.c b/drivers/spi/spi-tegra.c index 6c3aa6e..8e30727 100644 --- a/drivers/spi/spi-tegra.c +++ b/drivers/spi/spi-tegra.c @@ -498,14 +498,14 @@ static int __init spi_tegra_probe(struct platform_device *pdev) goto err0; } - if (!request_mem_region(r->start, (r->end - r->start) + 1, + if (!request_mem_region(r->start, resource_size(r), dev_name(&pdev->dev))) { ret = -EBUSY; goto err0; } tspi->phys = r->start; - tspi->base = ioremap(r->start, r->end - r->start + 1); + tspi->base = ioremap(r->start, resource_size(r)); if (!tspi->base) { dev_err(&pdev->dev, "can't ioremap iomem\n"); ret = -ENOMEM; @@ -563,7 +563,7 @@ err3: err2: iounmap(tspi->base); err1: - release_mem_region(r->start, (r->end - r->start) + 1); + release_mem_region(r->start, resource_size(r)); err0: spi_master_put(master); return ret; @@ -588,7 +588,7 @@ static int __devexit spi_tegra_remove(struct platform_device *pdev) iounmap(tspi->base); r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - release_mem_region(r->start, (r->end - r->start) + 1); + release_mem_region(r->start, resource_size(r)); return 0; } -- cgit v0.10.2 From e892bac102805f905e463a2cc7d0f870358cc1d5 Mon Sep 17 00:00:00 2001 From: Grant Likely Date: Wed, 15 Jun 2011 13:18:06 -0600 Subject: gpio/tegra: Move Tegra gpio driver to drivers/gpio As part of the gpio driver consolidation, this patch moves the Tegra driver into drivers/gpio Signed-off-by: Grant Likely diff --git a/Documentation/devicetree/bindings/spi/spi_nvidia.txt b/Documentation/devicetree/bindings/spi/spi_nvidia.txt new file mode 100644 index 0000000..bde450b --- /dev/null +++ b/Documentation/devicetree/bindings/spi/spi_nvidia.txt @@ -0,0 +1,5 @@ +NVIDIA Tegra 2 SPI device + +Required properties: +- compatible : should be "nvidia,tegra250-spi". +- gpios : should specify GPIOs used for chipselect. diff --git a/drivers/spi/spi-tegra.c b/drivers/spi/spi-tegra.c index 8e30727..a43ceeb 100644 --- a/drivers/spi/spi-tegra.c +++ b/drivers/spi/spi-tegra.c @@ -546,6 +546,7 @@ static int __init spi_tegra_probe(struct platform_device *pdev) tspi->rx_dma_req.req_sel = spi_tegra_req_sels[pdev->id]; tspi->rx_dma_req.dev = tspi; + master->dev.of_node = pdev->dev.of_node; ret = spi_register_master(master); if (ret < 0) @@ -595,10 +596,21 @@ static int __devexit spi_tegra_remove(struct platform_device *pdev) MODULE_ALIAS("platform:spi_tegra"); +#ifdef CONFIG_OF +static struct of_device_id spi_tegra_of_match_table[] __devinitdata = { + { .compatible = "nvidia,tegra250-spi", }, + {} +}; +MODULE_DEVICE_TABLE(of, spi_tegra_of_match_table); +#else /* CONFIG_OF */ +#define spi_tegra_of_match_table NULL +#endif /* CONFIG_OF */ + static struct platform_driver spi_tegra_driver = { .driver = { .name = "spi_tegra", .owner = THIS_MODULE, + .of_match_table = spi_tegra_of_match_table, }, .remove = __devexit_p(spi_tegra_remove), }; -- cgit v0.10.2 From 083be3f05371b8fe0606f3abf029beeeff66d633 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Thu, 16 Jun 2011 10:14:28 +0200 Subject: spi/pl022: initialize burstsize from FIFO trigger level Configure the DMA burstsize from the FIFO trigger level supplied with the controller configuration data. This is based on a patch from Virupax, but I rewrote it differently. Reported-by: Virupax Sadashivpetimath Signed-off-by: Linus Walleij Signed-off-by: Grant Likely diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c index 2541705..f4b7b72 100644 --- a/drivers/spi/spi-pl022.c +++ b/drivers/spi/spi-pl022.c @@ -381,6 +381,8 @@ struct pl022 { enum ssp_reading read; enum ssp_writing write; u32 exp_fifo_level; + enum ssp_rx_level_trig rx_lev_trig; + enum ssp_tx_level_trig tx_lev_trig; /* DMA settings */ #ifdef CONFIG_DMA_ENGINE struct dma_chan *dma_rx_channel; @@ -907,12 +909,10 @@ static int configure_dma(struct pl022 *pl022) struct dma_slave_config rx_conf = { .src_addr = SSP_DR(pl022->phybase), .direction = DMA_FROM_DEVICE, - .src_maxburst = pl022->vendor->fifodepth >> 1, }; struct dma_slave_config tx_conf = { .dst_addr = SSP_DR(pl022->phybase), .direction = DMA_TO_DEVICE, - .dst_maxburst = pl022->vendor->fifodepth >> 1, }; unsigned int pages; int ret; @@ -926,6 +926,54 @@ static int configure_dma(struct pl022 *pl022) if (!rxchan || !txchan) return -ENODEV; + /* + * If supplied, the DMA burstsize should equal the FIFO trigger level. + * Notice that the DMA engine uses one-to-one mapping. Since we can + * not trigger on 2 elements this needs explicit mapping rather than + * calculation. + */ + switch (pl022->rx_lev_trig) { + case SSP_RX_1_OR_MORE_ELEM: + rx_conf.src_maxburst = 1; + break; + case SSP_RX_4_OR_MORE_ELEM: + rx_conf.src_maxburst = 4; + break; + case SSP_RX_8_OR_MORE_ELEM: + rx_conf.src_maxburst = 8; + break; + case SSP_RX_16_OR_MORE_ELEM: + rx_conf.src_maxburst = 16; + break; + case SSP_RX_32_OR_MORE_ELEM: + rx_conf.src_maxburst = 32; + break; + default: + rx_conf.src_maxburst = pl022->vendor->fifodepth >> 1; + break; + } + + switch (pl022->tx_lev_trig) { + case SSP_TX_1_OR_MORE_EMPTY_LOC: + tx_conf.dst_maxburst = 1; + break; + case SSP_TX_4_OR_MORE_EMPTY_LOC: + tx_conf.dst_maxburst = 4; + break; + case SSP_TX_8_OR_MORE_EMPTY_LOC: + tx_conf.dst_maxburst = 8; + break; + case SSP_TX_16_OR_MORE_EMPTY_LOC: + tx_conf.dst_maxburst = 16; + break; + case SSP_TX_32_OR_MORE_EMPTY_LOC: + tx_conf.dst_maxburst = 32; + break; + default: + tx_conf.dst_maxburst = pl022->vendor->fifodepth >> 1; + break; + } + switch (pl022->read) { case READING_NULL: /* Use the same as for writing */ @@ -1871,6 +1919,9 @@ static int pl022_setup(struct spi_device *spi) goto err_config_params; } + pl022->rx_lev_trig = chip_info->rx_lev_trig; + pl022->tx_lev_trig = chip_info->tx_lev_trig; + /* Now set controller state based on controller data */ chip->xfer_type = chip_info->com_mode; if (!chip_info->cs_control) { -- cgit v0.10.2 From bcda6ff8ddffa593635ed7e6f620a1ef3b827ffc Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Thu, 16 Jun 2011 10:14:40 +0200 Subject: spi/pl022: support runtime PM Insert notifiers for the runtime PM API. With this the runtime PM layer kicks in to action where used. Signed-off-by: Rabin Vincent Reviewed-by: Virupax Sadashivpetimath Reviewed-by: Jonas Aberg Reviewed-by: Srinidhi Kasagar [Rebased to Linux 3.0-rc3, edit description] Signed-off-by: Linus Walleij Signed-off-by: Grant Likely diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c index f4b7b72..5079ff1 100644 --- a/drivers/spi/spi-pl022.c +++ b/drivers/spi/spi-pl022.c @@ -40,6 +40,7 @@ #include #include #include +#include /* * This macro is used to define some register default values. @@ -517,6 +518,7 @@ static void giveback(struct pl022 *pl022) clk_disable(pl022->clk); amba_pclk_disable(pl022->adev); amba_vcore_disable(pl022->adev); + pm_runtime_put(&pl022->adev->dev); } /** @@ -1542,6 +1544,7 @@ static void pump_messages(struct work_struct *work) * and core will be disabled when giveback() is called in each method * (poll/interrupt/DMA) */ + pm_runtime_get_sync(&pl022->adev->dev); amba_vcore_enable(pl022->adev); amba_pclk_enable(pl022->adev); clk_enable(pl022->clk); @@ -2142,6 +2145,8 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id) } printk(KERN_INFO "pl022: mapped registers from 0x%08x to %p\n", adev->res.start, pl022->virtbase); + pm_runtime_enable(dev); + pm_runtime_resume(dev); pl022->clk = clk_get(&adev->dev, NULL); if (IS_ERR(pl022->clk)) { @@ -2203,6 +2208,7 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id) destroy_queue(pl022); pl022_dma_remove(pl022); free_irq(adev->irq[0], pl022); + pm_runtime_disable(&adev->dev); err_no_irq: clk_put(pl022->clk); err_no_clk: -- cgit v0.10.2 From 78b2b911bf7020359a2adb54adfca522cf776141 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Thu, 16 Jun 2011 10:14:46 +0200 Subject: spi/pl022: strengthen FIFO watermark level checks The platform configuration can select custom FIFO watermarks, but these may conflict the actual FIFO size of the PL022 variant if set too high. So strengthen the sanity checks to deny any conflicting settings. Signed-off-by: Linus Walleij Signed-off-by: Grant Likely diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c index 5079ff1..77ffd18 100644 --- a/drivers/spi/spi-pl022.c +++ b/drivers/spi/spi-pl022.c @@ -1678,17 +1678,57 @@ static int verify_controller_parameters(struct pl022 *pl022, "Communication mode is configured incorrectly\n"); return -EINVAL; } - if ((chip_info->rx_lev_trig < SSP_RX_1_OR_MORE_ELEM) - || (chip_info->rx_lev_trig > SSP_RX_32_OR_MORE_ELEM)) { + switch (chip_info->rx_lev_trig) { + case SSP_RX_1_OR_MORE_ELEM: + case SSP_RX_4_OR_MORE_ELEM: + case SSP_RX_8_OR_MORE_ELEM: + /* These are always OK, all variants can handle this */ + break; + case SSP_RX_16_OR_MORE_ELEM: + if (pl022->vendor->fifodepth < 16) { + dev_err(&pl022->adev->dev, + "RX FIFO Trigger Level is configured incorrectly\n"); + return -EINVAL; + } + break; + case SSP_RX_32_OR_MORE_ELEM: + if (pl022->vendor->fifodepth < 32) { + dev_err(&pl022->adev->dev, + "RX FIFO Trigger Level is configured incorrectly\n"); + return -EINVAL; + } + break; + default: dev_err(&pl022->adev->dev, "RX FIFO Trigger Level is configured incorrectly\n"); return -EINVAL; + break; } - if ((chip_info->tx_lev_trig < SSP_TX_1_OR_MORE_EMPTY_LOC) - || (chip_info->tx_lev_trig > SSP_TX_32_OR_MORE_EMPTY_LOC)) { + switch (chip_info->tx_lev_trig) { + case SSP_TX_1_OR_MORE_EMPTY_LOC: + case SSP_TX_4_OR_MORE_EMPTY_LOC: + case SSP_TX_8_OR_MORE_EMPTY_LOC: + /* These are always OK, all variants can handle this */ + break; + case SSP_TX_16_OR_MORE_EMPTY_LOC: + if (pl022->vendor->fifodepth < 16) { + dev_err(&pl022->adev->dev, + "TX FIFO Trigger Level is configured incorrectly\n"); + return -EINVAL; + } + break; + case SSP_TX_32_OR_MORE_EMPTY_LOC: + if (pl022->vendor->fifodepth < 32) { + dev_err(&pl022->adev->dev, + "TX FIFO Trigger Level is configured incorrectly\n"); + return -EINVAL; + } + break; + default: dev_err(&pl022->adev->dev, "TX FIFO Trigger Level is configured incorrectly\n"); return -EINVAL; + break; } if (chip_info->iface == SSP_INTERFACE_NATIONAL_MICROWIRE) { if ((chip_info->ctrl_len < SSP_BITS_4) -- cgit v0.10.2 From 2e2de2e314672c8b6644f67a35556d6df780493d Mon Sep 17 00:00:00 2001 From: Tomoya MORINAGA Date: Fri, 17 Jun 2011 09:34:25 +0900 Subject: spi/topcliff-pch: Support new device ML7223 IOH Support new device OKI SEMICONDUCTOR ML7223 IOH(Input/Output Hub). The ML7223 IOH is for MP(Media Phone) use. The ML7223 is companion chip for Intel Atom E6xx series. The ML7223 is completely compatible for Intel EG20T PCH. Signed-off-by: Tomoya MORINAGA Signed-off-by: Grant Likely diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c index 1b8030b..1d23f38 100644 --- a/drivers/spi/spi-topcliff-pch.c +++ b/drivers/spi/spi-topcliff-pch.c @@ -95,11 +95,13 @@ /* Definition for ML7213 by OKI SEMICONDUCTOR */ #define PCI_VENDOR_ID_ROHM 0x10DB #define PCI_DEVICE_ID_ML7213_SPI 0x802c +#define PCI_DEVICE_ID_ML7223_SPI 0x800F /* * Set the number of SPI instance max * Intel EG20T PCH : 1ch * OKI SEMICONDUCTOR ML7213 IOH : 2ch + * OKI SEMICONDUCTOR ML7223 IOH : 1ch */ #define PCH_SPI_MAX_DEV 2 @@ -212,6 +214,7 @@ struct pch_pd_dev_save { static struct pci_device_id pch_spi_pcidev_id[] = { { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_GE_SPI), 1, }, { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_SPI), 2, }, + { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_SPI), 1, }, { } }; @@ -1711,4 +1714,4 @@ MODULE_PARM_DESC(use_dma, "to use DMA for data transfers pass 1 else 0; default 1"); MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("Intel EG20T PCH/OKI SEMICONDUCTOR ML7213 IOH SPI Driver"); +MODULE_DESCRIPTION("Intel EG20T PCH/OKI SEMICONDUCTOR ML7xxx IOH SPI Driver"); -- cgit v0.10.2 From 47885ce81c7498c015e6763303821ab6e8a6e2cc Mon Sep 17 00:00:00 2001 From: Mike Frysinger Date: Fri, 17 Jun 2011 04:16:56 -0400 Subject: spi/bfin_spi: use structs for accessing hardware regs Rather than hardcoding the register sizes/offsets in this file, use the existing struct in the spi header for reading/writing the hardware. Signed-off-by: Mike Frysinger Signed-off-by: Grant Likely diff --git a/drivers/spi/spi-bfin5xx.c b/drivers/spi/spi-bfin5xx.c index cc880c9..7331831 100644 --- a/drivers/spi/spi-bfin5xx.c +++ b/drivers/spi/spi-bfin5xx.c @@ -58,7 +58,7 @@ struct bfin_spi_master_data { struct spi_master *master; /* Regs base of SPI controller */ - void __iomem *regs_base; + struct bfin_spi_regs __iomem *regs; /* Pin request list */ u16 *pin_req; @@ -122,34 +122,14 @@ struct bfin_spi_slave_data { const struct bfin_spi_transfer_ops *ops; }; -#define DEFINE_SPI_REG(reg, off) \ -static inline u16 read_##reg(struct bfin_spi_master_data *drv_data) \ - { return bfin_read16(drv_data->regs_base + off); } \ -static inline void write_##reg(struct bfin_spi_master_data *drv_data, u16 v) \ - { bfin_write16(drv_data->regs_base + off, v); } - -DEFINE_SPI_REG(CTRL, 0x00) -DEFINE_SPI_REG(FLAG, 0x04) -DEFINE_SPI_REG(STAT, 0x08) -DEFINE_SPI_REG(TDBR, 0x0C) -DEFINE_SPI_REG(RDBR, 0x10) -DEFINE_SPI_REG(BAUD, 0x14) -DEFINE_SPI_REG(SHAW, 0x18) - static void bfin_spi_enable(struct bfin_spi_master_data *drv_data) { - u16 cr; - - cr = read_CTRL(drv_data); - write_CTRL(drv_data, (cr | BIT_CTL_ENABLE)); + bfin_write_or(&drv_data->regs->ctl, BIT_CTL_ENABLE); } static void bfin_spi_disable(struct bfin_spi_master_data *drv_data) { - u16 cr; - - cr = read_CTRL(drv_data); - write_CTRL(drv_data, (cr & (~BIT_CTL_ENABLE))); + bfin_write_and(&drv_data->regs->ctl, ~BIT_CTL_ENABLE); } /* Caculate the SPI_BAUD register value based on input HZ */ @@ -172,10 +152,10 @@ static int bfin_spi_flush(struct bfin_spi_master_data *drv_data) unsigned long limit = loops_per_jiffy << 1; /* wait for stop and clear stat */ - while (!(read_STAT(drv_data) & BIT_STAT_SPIF) && --limit) + while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_SPIF) && --limit) cpu_relax(); - write_STAT(drv_data, BIT_STAT_CLR); + bfin_write(&drv_data->regs->stat, BIT_STAT_CLR); return limit; } @@ -183,29 +163,19 @@ static int bfin_spi_flush(struct bfin_spi_master_data *drv_data) /* Chip select operation functions for cs_change flag */ static void bfin_spi_cs_active(struct bfin_spi_master_data *drv_data, struct bfin_spi_slave_data *chip) { - if (likely(chip->chip_select_num < MAX_CTRL_CS)) { - u16 flag = read_FLAG(drv_data); - - flag &= ~chip->flag; - - write_FLAG(drv_data, flag); - } else { + if (likely(chip->chip_select_num < MAX_CTRL_CS)) + bfin_write_and(&drv_data->regs->flg, ~chip->flag); + else gpio_set_value(chip->cs_gpio, 0); - } } static void bfin_spi_cs_deactive(struct bfin_spi_master_data *drv_data, struct bfin_spi_slave_data *chip) { - if (likely(chip->chip_select_num < MAX_CTRL_CS)) { - u16 flag = read_FLAG(drv_data); - - flag |= chip->flag; - - write_FLAG(drv_data, flag); - } else { + if (likely(chip->chip_select_num < MAX_CTRL_CS)) + bfin_write_or(&drv_data->regs->flg, chip->flag); + else gpio_set_value(chip->cs_gpio, 1); - } /* Move delay here for consistency */ if (chip->cs_chg_udelay) @@ -216,25 +186,15 @@ static void bfin_spi_cs_deactive(struct bfin_spi_master_data *drv_data, static inline void bfin_spi_cs_enable(struct bfin_spi_master_data *drv_data, struct bfin_spi_slave_data *chip) { - if (chip->chip_select_num < MAX_CTRL_CS) { - u16 flag = read_FLAG(drv_data); - - flag |= (chip->flag >> 8); - - write_FLAG(drv_data, flag); - } + if (chip->chip_select_num < MAX_CTRL_CS) + bfin_write_or(&drv_data->regs->flg, chip->flag >> 8); } static inline void bfin_spi_cs_disable(struct bfin_spi_master_data *drv_data, struct bfin_spi_slave_data *chip) { - if (chip->chip_select_num < MAX_CTRL_CS) { - u16 flag = read_FLAG(drv_data); - - flag &= ~(chip->flag >> 8); - - write_FLAG(drv_data, flag); - } + if (chip->chip_select_num < MAX_CTRL_CS) + bfin_write_and(&drv_data->regs->flg, ~(chip->flag >> 8)); } /* stop controller and re-config current chip*/ @@ -243,15 +203,15 @@ static void bfin_spi_restore_state(struct bfin_spi_master_data *drv_data) struct bfin_spi_slave_data *chip = drv_data->cur_chip; /* Clear status and disable clock */ - write_STAT(drv_data, BIT_STAT_CLR); + bfin_write(&drv_data->regs->stat, BIT_STAT_CLR); bfin_spi_disable(drv_data); dev_dbg(&drv_data->pdev->dev, "restoring spi ctl state\n"); SSYNC(); /* Load the registers */ - write_CTRL(drv_data, chip->ctl_reg); - write_BAUD(drv_data, chip->baud); + bfin_write(&drv_data->regs->ctl, chip->ctl_reg); + bfin_write(&drv_data->regs->baud, chip->baud); bfin_spi_enable(drv_data); bfin_spi_cs_active(drv_data, chip); @@ -260,7 +220,7 @@ static void bfin_spi_restore_state(struct bfin_spi_master_data *drv_data) /* used to kick off transfer in rx mode and read unwanted RX data */ static inline void bfin_spi_dummy_read(struct bfin_spi_master_data *drv_data) { - (void) read_RDBR(drv_data); + (void) bfin_read(&drv_data->regs->rdbr); } static void bfin_spi_u8_writer(struct bfin_spi_master_data *drv_data) @@ -269,10 +229,10 @@ static void bfin_spi_u8_writer(struct bfin_spi_master_data *drv_data) bfin_spi_dummy_read(drv_data); while (drv_data->tx < drv_data->tx_end) { - write_TDBR(drv_data, (*(u8 *) (drv_data->tx++))); + bfin_write(&drv_data->regs->tdbr, (*(u8 *) (drv_data->tx++))); /* wait until transfer finished. checking SPIF or TXS may not guarantee transfer completion */ - while (!(read_STAT(drv_data) & BIT_STAT_RXS)) + while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_RXS)) cpu_relax(); /* discard RX data and clear RXS */ bfin_spi_dummy_read(drv_data); @@ -287,10 +247,10 @@ static void bfin_spi_u8_reader(struct bfin_spi_master_data *drv_data) bfin_spi_dummy_read(drv_data); while (drv_data->rx < drv_data->rx_end) { - write_TDBR(drv_data, tx_val); - while (!(read_STAT(drv_data) & BIT_STAT_RXS)) + bfin_write(&drv_data->regs->tdbr, tx_val); + while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_RXS)) cpu_relax(); - *(u8 *) (drv_data->rx++) = read_RDBR(drv_data); + *(u8 *) (drv_data->rx++) = bfin_read(&drv_data->regs->rdbr); } } @@ -300,10 +260,10 @@ static void bfin_spi_u8_duplex(struct bfin_spi_master_data *drv_data) bfin_spi_dummy_read(drv_data); while (drv_data->rx < drv_data->rx_end) { - write_TDBR(drv_data, (*(u8 *) (drv_data->tx++))); - while (!(read_STAT(drv_data) & BIT_STAT_RXS)) + bfin_write(&drv_data->regs->tdbr, (*(u8 *) (drv_data->tx++))); + while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_RXS)) cpu_relax(); - *(u8 *) (drv_data->rx++) = read_RDBR(drv_data); + *(u8 *) (drv_data->rx++) = bfin_read(&drv_data->regs->rdbr); } } @@ -319,11 +279,11 @@ static void bfin_spi_u16_writer(struct bfin_spi_master_data *drv_data) bfin_spi_dummy_read(drv_data); while (drv_data->tx < drv_data->tx_end) { - write_TDBR(drv_data, (*(u16 *) (drv_data->tx))); + bfin_write(&drv_data->regs->tdbr, (*(u16 *) (drv_data->tx))); drv_data->tx += 2; /* wait until transfer finished. checking SPIF or TXS may not guarantee transfer completion */ - while (!(read_STAT(drv_data) & BIT_STAT_RXS)) + while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_RXS)) cpu_relax(); /* discard RX data and clear RXS */ bfin_spi_dummy_read(drv_data); @@ -338,10 +298,10 @@ static void bfin_spi_u16_reader(struct bfin_spi_master_data *drv_data) bfin_spi_dummy_read(drv_data); while (drv_data->rx < drv_data->rx_end) { - write_TDBR(drv_data, tx_val); - while (!(read_STAT(drv_data) & BIT_STAT_RXS)) + bfin_write(&drv_data->regs->tdbr, tx_val); + while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_RXS)) cpu_relax(); - *(u16 *) (drv_data->rx) = read_RDBR(drv_data); + *(u16 *) (drv_data->rx) = bfin_read(&drv_data->regs->rdbr); drv_data->rx += 2; } } @@ -352,11 +312,11 @@ static void bfin_spi_u16_duplex(struct bfin_spi_master_data *drv_data) bfin_spi_dummy_read(drv_data); while (drv_data->rx < drv_data->rx_end) { - write_TDBR(drv_data, (*(u16 *) (drv_data->tx))); + bfin_write(&drv_data->regs->tdbr, (*(u16 *) (drv_data->tx))); drv_data->tx += 2; - while (!(read_STAT(drv_data) & BIT_STAT_RXS)) + while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_RXS)) cpu_relax(); - *(u16 *) (drv_data->rx) = read_RDBR(drv_data); + *(u16 *) (drv_data->rx) = bfin_read(&drv_data->regs->rdbr); drv_data->rx += 2; } } @@ -428,7 +388,7 @@ static irqreturn_t bfin_spi_pio_irq_handler(int irq, void *dev_id) int loop = 0; /* wait until transfer finished. */ - while (!(read_STAT(drv_data) & BIT_STAT_RXS)) + while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_RXS)) cpu_relax(); if ((drv_data->tx && drv_data->tx >= drv_data->tx_end) || @@ -439,11 +399,11 @@ static irqreturn_t bfin_spi_pio_irq_handler(int irq, void *dev_id) if (n_bytes % 2) { u16 *buf = (u16 *)drv_data->rx; for (loop = 0; loop < n_bytes / 2; loop++) - *buf++ = read_RDBR(drv_data); + *buf++ = bfin_read(&drv_data->regs->rdbr); } else { u8 *buf = (u8 *)drv_data->rx; for (loop = 0; loop < n_bytes; loop++) - *buf++ = read_RDBR(drv_data); + *buf++ = bfin_read(&drv_data->regs->rdbr); } drv_data->rx += n_bytes; } @@ -468,15 +428,15 @@ static irqreturn_t bfin_spi_pio_irq_handler(int irq, void *dev_id) u16 *buf = (u16 *)drv_data->rx; u16 *buf2 = (u16 *)drv_data->tx; for (loop = 0; loop < n_bytes / 2; loop++) { - *buf++ = read_RDBR(drv_data); - write_TDBR(drv_data, *buf2++); + *buf++ = bfin_read(&drv_data->regs->rdbr); + bfin_write(&drv_data->regs->tdbr, *buf2++); } } else { u8 *buf = (u8 *)drv_data->rx; u8 *buf2 = (u8 *)drv_data->tx; for (loop = 0; loop < n_bytes; loop++) { - *buf++ = read_RDBR(drv_data); - write_TDBR(drv_data, *buf2++); + *buf++ = bfin_read(&drv_data->regs->rdbr); + bfin_write(&drv_data->regs->tdbr, *buf2++); } } } else if (drv_data->rx) { @@ -485,14 +445,14 @@ static irqreturn_t bfin_spi_pio_irq_handler(int irq, void *dev_id) if (n_bytes % 2) { u16 *buf = (u16 *)drv_data->rx; for (loop = 0; loop < n_bytes / 2; loop++) { - *buf++ = read_RDBR(drv_data); - write_TDBR(drv_data, chip->idle_tx_val); + *buf++ = bfin_read(&drv_data->regs->rdbr); + bfin_write(&drv_data->regs->tdbr, chip->idle_tx_val); } } else { u8 *buf = (u8 *)drv_data->rx; for (loop = 0; loop < n_bytes; loop++) { - *buf++ = read_RDBR(drv_data); - write_TDBR(drv_data, chip->idle_tx_val); + *buf++ = bfin_read(&drv_data->regs->rdbr); + bfin_write(&drv_data->regs->tdbr, chip->idle_tx_val); } } } else if (drv_data->tx) { @@ -501,14 +461,14 @@ static irqreturn_t bfin_spi_pio_irq_handler(int irq, void *dev_id) if (n_bytes % 2) { u16 *buf = (u16 *)drv_data->tx; for (loop = 0; loop < n_bytes / 2; loop++) { - read_RDBR(drv_data); - write_TDBR(drv_data, *buf++); + bfin_read(&drv_data->regs->rdbr); + bfin_write(&drv_data->regs->tdbr, *buf++); } } else { u8 *buf = (u8 *)drv_data->tx; for (loop = 0; loop < n_bytes; loop++) { - read_RDBR(drv_data); - write_TDBR(drv_data, *buf++); + bfin_read(&drv_data->regs->rdbr); + bfin_write(&drv_data->regs->tdbr, *buf++); } } } @@ -528,19 +488,19 @@ static irqreturn_t bfin_spi_dma_irq_handler(int irq, void *dev_id) struct spi_message *msg = drv_data->cur_msg; unsigned long timeout; unsigned short dmastat = get_dma_curr_irqstat(drv_data->dma_channel); - u16 spistat = read_STAT(drv_data); + u16 spistat = bfin_read(&drv_data->regs->stat); dev_dbg(&drv_data->pdev->dev, "in dma_irq_handler dmastat:0x%x spistat:0x%x\n", dmastat, spistat); if (drv_data->rx != NULL) { - u16 cr = read_CTRL(drv_data); + u16 cr = bfin_read(&drv_data->regs->ctl); /* discard old RX data and clear RXS */ bfin_spi_dummy_read(drv_data); - write_CTRL(drv_data, cr & ~BIT_CTL_ENABLE); /* Disable SPI */ - write_CTRL(drv_data, cr & ~BIT_CTL_TIMOD); /* Restore State */ - write_STAT(drv_data, BIT_STAT_CLR); /* Clear Status */ + bfin_write(&drv_data->regs->ctl, cr & ~BIT_CTL_ENABLE); /* Disable SPI */ + bfin_write(&drv_data->regs->ctl, cr & ~BIT_CTL_TIMOD); /* Restore State */ + bfin_write(&drv_data->regs->stat, BIT_STAT_CLR); /* Clear Status */ } clear_dma_irqstat(drv_data->dma_channel); @@ -552,17 +512,17 @@ static irqreturn_t bfin_spi_dma_irq_handler(int irq, void *dev_id) * register until it goes low for 2 successive reads */ if (drv_data->tx != NULL) { - while ((read_STAT(drv_data) & BIT_STAT_TXS) || - (read_STAT(drv_data) & BIT_STAT_TXS)) + while ((bfin_read(&drv_data->regs->stat) & BIT_STAT_TXS) || + (bfin_read(&drv_data->regs->stat) & BIT_STAT_TXS)) cpu_relax(); } dev_dbg(&drv_data->pdev->dev, "in dma_irq_handler dmastat:0x%x spistat:0x%x\n", - dmastat, read_STAT(drv_data)); + dmastat, bfin_read(&drv_data->regs->stat)); timeout = jiffies + HZ; - while (!(read_STAT(drv_data) & BIT_STAT_SPIF)) + while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_SPIF)) if (!time_before(jiffies, timeout)) { dev_warn(&drv_data->pdev->dev, "timeout waiting for SPIF"); break; @@ -699,9 +659,9 @@ static void bfin_spi_pump_transfers(unsigned long data) bfin_spi_giveback(drv_data); return; } - cr = read_CTRL(drv_data) & ~(BIT_CTL_TIMOD | BIT_CTL_WORDSIZE); + cr = bfin_read(&drv_data->regs->ctl) & ~(BIT_CTL_TIMOD | BIT_CTL_WORDSIZE); cr |= cr_width; - write_CTRL(drv_data, cr); + bfin_write(&drv_data->regs->ctl, cr); dev_dbg(&drv_data->pdev->dev, "transfer: drv_data->ops is %p, chip->ops is %p, u8_ops is %p\n", @@ -712,11 +672,11 @@ static void bfin_spi_pump_transfers(unsigned long data) /* Speed setup (surely valid because already checked) */ if (transfer->speed_hz) - write_BAUD(drv_data, hz_to_spi_baud(transfer->speed_hz)); + bfin_write(&drv_data->regs->baud, hz_to_spi_baud(transfer->speed_hz)); else - write_BAUD(drv_data, chip->baud); + bfin_write(&drv_data->regs->baud, chip->baud); - write_STAT(drv_data, BIT_STAT_CLR); + bfin_write(&drv_data->regs->stat, BIT_STAT_CLR); bfin_spi_cs_active(drv_data, chip); dev_dbg(&drv_data->pdev->dev, @@ -749,7 +709,7 @@ static void bfin_spi_pump_transfers(unsigned long data) } /* poll for SPI completion before start */ - while (!(read_STAT(drv_data) & BIT_STAT_SPIF)) + while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_SPIF)) cpu_relax(); /* dirty hack for autobuffer DMA mode */ @@ -766,7 +726,7 @@ static void bfin_spi_pump_transfers(unsigned long data) enable_dma(drv_data->dma_channel); /* start SPI transfer */ - write_CTRL(drv_data, cr | BIT_CTL_TIMOD_DMA_TX); + bfin_write(&drv_data->regs->ctl, cr | BIT_CTL_TIMOD_DMA_TX); /* just return here, there can only be one transfer * in this mode @@ -821,7 +781,7 @@ static void bfin_spi_pump_transfers(unsigned long data) set_dma_config(drv_data->dma_channel, dma_config); local_irq_save(flags); SSYNC(); - write_CTRL(drv_data, cr); + bfin_write(&drv_data->regs->ctl, cr); enable_dma(drv_data->dma_channel); dma_enable_irq(drv_data->dma_channel); local_irq_restore(flags); @@ -835,7 +795,7 @@ static void bfin_spi_pump_transfers(unsigned long data) * problems with setting up the output value in TDBR prior to the * start of the transfer. */ - write_CTRL(drv_data, cr | BIT_CTL_TXMOD); + bfin_write(&drv_data->regs->ctl, cr | BIT_CTL_TXMOD); if (chip->pio_interrupt) { /* SPI irq should have been disabled by now */ @@ -845,19 +805,19 @@ static void bfin_spi_pump_transfers(unsigned long data) /* start transfer */ if (drv_data->tx == NULL) - write_TDBR(drv_data, chip->idle_tx_val); + bfin_write(&drv_data->regs->tdbr, chip->idle_tx_val); else { int loop; if (bits_per_word % 16 == 0) { u16 *buf = (u16 *)drv_data->tx; for (loop = 0; loop < bits_per_word / 16; loop++) { - write_TDBR(drv_data, *buf++); + bfin_write(&drv_data->regs->tdbr, *buf++); } } else if (bits_per_word % 8 == 0) { u8 *buf = (u8 *)drv_data->tx; for (loop = 0; loop < bits_per_word / 8; loop++) - write_TDBR(drv_data, *buf++); + bfin_write(&drv_data->regs->tdbr, *buf++); } drv_data->tx += drv_data->n_bytes; @@ -1353,8 +1313,8 @@ static int __init bfin_spi_probe(struct platform_device *pdev) goto out_error_get_res; } - drv_data->regs_base = ioremap(res->start, resource_size(res)); - if (drv_data->regs_base == NULL) { + drv_data->regs = ioremap(res->start, resource_size(res)); + if (drv_data->regs == NULL) { dev_err(dev, "Cannot map IO\n"); status = -ENXIO; goto out_error_ioremap; @@ -1397,8 +1357,8 @@ static int __init bfin_spi_probe(struct platform_device *pdev) /* Reset SPI registers. If these registers were used by the boot loader, * the sky may fall on your head if you enable the dma controller. */ - write_CTRL(drv_data, BIT_CTL_CPHA | BIT_CTL_MASTER); - write_FLAG(drv_data, 0xFF00); + bfin_write(&drv_data->regs->ctl, BIT_CTL_CPHA | BIT_CTL_MASTER); + bfin_write(&drv_data->regs->flg, 0xFF00); /* Register with the SPI framework */ platform_set_drvdata(pdev, drv_data); @@ -1408,15 +1368,15 @@ static int __init bfin_spi_probe(struct platform_device *pdev) goto out_error_queue_alloc; } - dev_info(dev, "%s, Version %s, regs_base@%p, dma channel@%d\n", - DRV_DESC, DRV_VERSION, drv_data->regs_base, + dev_info(dev, "%s, Version %s, regs@%p, dma channel@%d\n", + DRV_DESC, DRV_VERSION, drv_data->regs, drv_data->dma_channel); return status; out_error_queue_alloc: bfin_spi_destroy_queue(drv_data); out_error_free_io: - iounmap((void *) drv_data->regs_base); + iounmap(drv_data->regs); out_error_ioremap: out_error_get_res: spi_master_put(master); @@ -1473,14 +1433,14 @@ static int bfin_spi_suspend(struct platform_device *pdev, pm_message_t state) if (status != 0) return status; - drv_data->ctrl_reg = read_CTRL(drv_data); - drv_data->flag_reg = read_FLAG(drv_data); + drv_data->ctrl_reg = bfin_read(&drv_data->regs->ctl); + drv_data->flag_reg = bfin_read(&drv_data->regs->flg); /* * reset SPI_CTL and SPI_FLG registers */ - write_CTRL(drv_data, BIT_CTL_CPHA | BIT_CTL_MASTER); - write_FLAG(drv_data, 0xFF00); + bfin_write(&drv_data->regs->ctl, BIT_CTL_CPHA | BIT_CTL_MASTER); + bfin_write(&drv_data->regs->flg, 0xFF00); return 0; } @@ -1490,8 +1450,8 @@ static int bfin_spi_resume(struct platform_device *pdev) struct bfin_spi_master_data *drv_data = platform_get_drvdata(pdev); int status = 0; - write_CTRL(drv_data, drv_data->ctrl_reg); - write_FLAG(drv_data, drv_data->flag_reg); + bfin_write(&drv_data->regs->ctl, drv_data->ctrl_reg); + bfin_write(&drv_data->regs->flg, drv_data->flag_reg); /* Start the queue running */ status = bfin_spi_start_queue(drv_data); -- cgit v0.10.2 From ddc0bf13d63715c2bce0fe8818fba12b82823283 Mon Sep 17 00:00:00 2001 From: Mike Frysinger Date: Fri, 17 Jun 2011 04:16:57 -0400 Subject: spi/bfin_spi: constify pin array This array isn't written anywhere, so constify it. Signed-off-by: Mike Frysinger Signed-off-by: Grant Likely diff --git a/drivers/spi/spi-bfin5xx.c b/drivers/spi/spi-bfin5xx.c index 7331831..ca421c4 100644 --- a/drivers/spi/spi-bfin5xx.c +++ b/drivers/spi/spi-bfin5xx.c @@ -965,7 +965,7 @@ static int bfin_spi_transfer(struct spi_device *spi, struct spi_message *msg) #define MAX_SPI_SSEL 7 -static u16 ssel[][MAX_SPI_SSEL] = { +static const u16 ssel[][MAX_SPI_SSEL] = { {P_SPI0_SSEL1, P_SPI0_SSEL2, P_SPI0_SSEL3, P_SPI0_SSEL4, P_SPI0_SSEL5, P_SPI0_SSEL6, P_SPI0_SSEL7}, -- cgit v0.10.2 From c52d4e5f3cd3939bf44e788fdcbce8dcebb6fe61 Mon Sep 17 00:00:00 2001 From: Mike Frysinger Date: Fri, 17 Jun 2011 04:16:58 -0400 Subject: spi/bfin_spi: uninline fat queue funcs There's no need for these queue funcs to be inlined, so drop the markings. This shaves off a few hundred duplicated bytes. Signed-off-by: Mike Frysinger Signed-off-by: Grant Likely diff --git a/drivers/spi/spi-bfin5xx.c b/drivers/spi/spi-bfin5xx.c index ca421c4..b8d25f2 100644 --- a/drivers/spi/spi-bfin5xx.c +++ b/drivers/spi/spi-bfin5xx.c @@ -1186,7 +1186,7 @@ static void bfin_spi_cleanup(struct spi_device *spi) spi_set_ctldata(spi, NULL); } -static inline int bfin_spi_init_queue(struct bfin_spi_master_data *drv_data) +static int bfin_spi_init_queue(struct bfin_spi_master_data *drv_data) { INIT_LIST_HEAD(&drv_data->queue); spin_lock_init(&drv_data->lock); @@ -1208,7 +1208,7 @@ static inline int bfin_spi_init_queue(struct bfin_spi_master_data *drv_data) return 0; } -static inline int bfin_spi_start_queue(struct bfin_spi_master_data *drv_data) +static int bfin_spi_start_queue(struct bfin_spi_master_data *drv_data) { unsigned long flags; @@ -1230,7 +1230,7 @@ static inline int bfin_spi_start_queue(struct bfin_spi_master_data *drv_data) return 0; } -static inline int bfin_spi_stop_queue(struct bfin_spi_master_data *drv_data) +static int bfin_spi_stop_queue(struct bfin_spi_master_data *drv_data) { unsigned long flags; unsigned limit = 500; @@ -1259,7 +1259,7 @@ static inline int bfin_spi_stop_queue(struct bfin_spi_master_data *drv_data) return status; } -static inline int bfin_spi_destroy_queue(struct bfin_spi_master_data *drv_data) +static int bfin_spi_destroy_queue(struct bfin_spi_master_data *drv_data) { int status; -- cgit v0.10.2 From a2274cedc4b33a29be6583cd757bdc0b436851ec Mon Sep 17 00:00:00 2001 From: Stefan Schmidt Date: Sat, 18 Jun 2011 20:02:06 +0200 Subject: spi/pxa2xx: Remove unavailable ssp_type from documentation Since commit commit 2f1a74e5a2de0459139b85af95e901448726c375 Author: eric miao Date: Wed Nov 21 18:50:53 2007 +0800 [ARM] pxa: make pxa2xx_spi driver use ssp_request()/ssp_free() the ssp_type field in struct pxa2xx_spi_master is no longer available. Signed-off-by: Stefan Schmidt Signed-off-by: Grant Likely diff --git a/Documentation/spi/pxa2xx b/Documentation/spi/pxa2xx index 493dada..00511e0 100644 --- a/Documentation/spi/pxa2xx +++ b/Documentation/spi/pxa2xx @@ -22,15 +22,11 @@ Typically a SPI master is defined in the arch/.../mach-*/board-*.c as a found in include/linux/spi/pxa2xx_spi.h: struct pxa2xx_spi_master { - enum pxa_ssp_type ssp_type; u32 clock_enable; u16 num_chipselect; u8 enable_dma; }; -The "pxa2xx_spi_master.ssp_type" field must have a value between 1 and 3 and -informs the driver which features a particular SSP supports. - The "pxa2xx_spi_master.clock_enable" field is used to enable/disable the corresponding SSP peripheral block in the "Clock Enable Register (CKEN"). See the "PXA2xx Developer Manual" section "Clocks and Power Management". @@ -61,7 +57,6 @@ static struct resource pxa_spi_nssp_resources[] = { }; static struct pxa2xx_spi_master pxa_nssp_master_info = { - .ssp_type = PXA25x_NSSP, /* Type of SSP */ .clock_enable = CKEN_NSSP, /* NSSP Peripheral clock */ .num_chipselect = 1, /* Matches the number of chips attached to NSSP */ .enable_dma = 1, /* Enables NSSP DMA */ -- cgit v0.10.2 From 932036ce03f6c853a17005bced1e82b23f6378c0 Mon Sep 17 00:00:00 2001 From: Peter Korsgaard Date: Tue, 28 Jun 2011 14:49:13 +0200 Subject: mach-jive: convert to spi_gpio Rather than the deprecated spi_s3c24xx_gpio driver. Compile tested only. Signed-off-by: Peter Korsgaard Acked-by: Ben Dooks Signed-off-by: Grant Likely diff --git a/arch/arm/mach-s3c2412/mach-jive.c b/arch/arm/mach-s3c2412/mach-jive.c index 85dcaeb..5eeb475 100644 --- a/arch/arm/mach-s3c2412/mach-jive.c +++ b/arch/arm/mach-s3c2412/mach-jive.c @@ -25,6 +25,7 @@ #include