From cd7bed00340475ee72a013a070e200e065085ef3 Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Tue, 22 Jan 2013 12:26:28 +0200 Subject: spi/pxa2xx: break out the private DMA API usage into a separate file The PXA SPI driver uses PXA platform specific private DMA implementation which does not work on non-PXA platforms. In order to use this driver on other platforms we break out the private DMA implementation into a separate file that gets compiled only when CONFIG_SPI_PXA2XX_PXADMA is set. The DMA functions are stubbed out if there is no DMA implementation selected (i.e we are building on non-PXA platform). While we are there we can kill the dummy DMA bits in pxa2xx_spi.h as they are not needed anymore for CE4100. Once this is done we can add the generic DMA engine support to the driver that allows usage of any DMA controller that implements DMA engine API. Signed-off-by: Mika Westerberg Acked-by: Linus Walleij Tested-by: Lu Cao Signed-off-by: Mark Brown diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index a90393d..f187866 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -297,6 +297,12 @@ config SPI_PPC4xx help This selects a driver for the PPC4xx SPI Controller. +config SPI_PXA2XX_PXADMA + bool "PXA2xx SSP legacy PXA DMA API support" + depends on SPI_PXA2XX && ARCH_PXA + help + Enable PXA private legacy DMA API support. + config SPI_PXA2XX tristate "PXA2xx SSP SPI master" depends on ARCH_PXA || PCI diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index 64e970b..2e3cdd3 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile @@ -47,7 +47,9 @@ obj-$(CONFIG_SPI_OMAP24XX) += spi-omap2-mcspi.o obj-$(CONFIG_SPI_ORION) += spi-orion.o obj-$(CONFIG_SPI_PL022) += spi-pl022.o obj-$(CONFIG_SPI_PPC4xx) += spi-ppc4xx.o -obj-$(CONFIG_SPI_PXA2XX) += spi-pxa2xx.o +spi-pxa2xx-platform-objs := spi-pxa2xx.o +spi-pxa2xx-platform-$(CONFIG_SPI_PXA2XX_PXADMA) += spi-pxa2xx-pxadma.o +obj-$(CONFIG_SPI_PXA2XX) += spi-pxa2xx-platform.o obj-$(CONFIG_SPI_PXA2XX_PCI) += spi-pxa2xx-pci.o obj-$(CONFIG_SPI_RSPI) += spi-rspi.o obj-$(CONFIG_SPI_S3C24XX) += spi-s3c24xx-hw.o diff --git a/drivers/spi/spi-pxa2xx-pxadma.c b/drivers/spi/spi-pxa2xx-pxadma.c new file mode 100644 index 0000000..2916efc --- /dev/null +++ b/drivers/spi/spi-pxa2xx-pxadma.c @@ -0,0 +1,490 @@ +/* + * PXA2xx SPI private DMA support. + * + * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "spi-pxa2xx.h" + +#define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR) +#define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK) + +bool pxa2xx_spi_dma_is_possible(size_t len) +{ + /* Try to map dma buffer and do a dma transfer if successful, but + * only if the length is non-zero and less than MAX_DMA_LEN. + * + * Zero-length non-descriptor DMA is illegal on PXA2xx; force use + * of PIO instead. Care is needed above because the transfer may + * have have been passed with buffers that are already dma mapped. + * A zero-length transfer in PIO mode will not try to write/read + * to/from the buffers + * + * REVISIT large transfers are exactly where we most want to be + * using DMA. If this happens much, split those transfers into + * multiple DMA segments rather than forcing PIO. + */ + return len > 0 && len <= MAX_DMA_LEN; +} + +int pxa2xx_spi_map_dma_buffers(struct driver_data *drv_data) +{ + struct spi_message *msg = drv_data->cur_msg; + struct device *dev = &msg->spi->dev; + + if (!drv_data->cur_chip->enable_dma) + return 0; + + if (msg->is_dma_mapped) + return drv_data->rx_dma && drv_data->tx_dma; + + if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx)) + return 0; + + /* Modify setup if rx buffer is null */ + if (drv_data->rx == NULL) { + *drv_data->null_dma_buf = 0; + drv_data->rx = drv_data->null_dma_buf; + drv_data->rx_map_len = 4; + } else + drv_data->rx_map_len = drv_data->len; + + + /* Modify setup if tx buffer is null */ + if (drv_data->tx == NULL) { + *drv_data->null_dma_buf = 0; + drv_data->tx = drv_data->null_dma_buf; + drv_data->tx_map_len = 4; + } else + drv_data->tx_map_len = drv_data->len; + + /* Stream map the tx buffer. Always do DMA_TO_DEVICE first + * so we flush the cache *before* invalidating it, in case + * the tx and rx buffers overlap. + */ + drv_data->tx_dma = dma_map_single(dev, drv_data->tx, + drv_data->tx_map_len, DMA_TO_DEVICE); + if (dma_mapping_error(dev, drv_data->tx_dma)) + return 0; + + /* Stream map the rx buffer */ + drv_data->rx_dma = dma_map_single(dev, drv_data->rx, + drv_data->rx_map_len, DMA_FROM_DEVICE); + if (dma_mapping_error(dev, drv_data->rx_dma)) { + dma_unmap_single(dev, drv_data->tx_dma, + drv_data->tx_map_len, DMA_TO_DEVICE); + return 0; + } + + return 1; +} + +static void pxa2xx_spi_unmap_dma_buffers(struct driver_data *drv_data) +{ + struct device *dev; + + if (!drv_data->dma_mapped) + return; + + if (!drv_data->cur_msg->is_dma_mapped) { + dev = &drv_data->cur_msg->spi->dev; + dma_unmap_single(dev, drv_data->rx_dma, + drv_data->rx_map_len, DMA_FROM_DEVICE); + dma_unmap_single(dev, drv_data->tx_dma, + drv_data->tx_map_len, DMA_TO_DEVICE); + } + + drv_data->dma_mapped = 0; +} + +static int wait_ssp_rx_stall(void const __iomem *ioaddr) +{ + unsigned long limit = loops_per_jiffy << 1; + + while ((read_SSSR(ioaddr) & SSSR_BSY) && --limit) + cpu_relax(); + + return limit; +} + +static int wait_dma_channel_stop(int channel) +{ + unsigned long limit = loops_per_jiffy << 1; + + while (!(DCSR(channel) & DCSR_STOPSTATE) && --limit) + cpu_relax(); + + return limit; +} + +static void pxa2xx_spi_dma_error_stop(struct driver_data *drv_data, + const char *msg) +{ + void __iomem *reg = drv_data->ioaddr; + + /* Stop and reset */ + DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; + DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; + write_SSSR_CS(drv_data, drv_data->clear_sr); + write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); + if (!pxa25x_ssp_comp(drv_data)) + write_SSTO(0, reg); + pxa2xx_spi_flush(drv_data); + write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); + + pxa2xx_spi_unmap_dma_buffers(drv_data); + + dev_err(&drv_data->pdev->dev, "%s\n", msg); + + drv_data->cur_msg->state = ERROR_STATE; + tasklet_schedule(&drv_data->pump_transfers); +} + +static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data) +{ + void __iomem *reg = drv_data->ioaddr; + struct spi_message *msg = drv_data->cur_msg; + + /* Clear and disable interrupts on SSP and DMA channels*/ + write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); + write_SSSR_CS(drv_data, drv_data->clear_sr); + DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; + DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; + + if (wait_dma_channel_stop(drv_data->rx_channel) == 0) + dev_err(&drv_data->pdev->dev, + "dma_handler: dma rx channel stop failed\n"); + + if (wait_ssp_rx_stall(drv_data->ioaddr) == 0) + dev_err(&drv_data->pdev->dev, + "dma_transfer: ssp rx stall failed\n"); + + pxa2xx_spi_unmap_dma_buffers(drv_data); + + /* update the buffer pointer for the amount completed in dma */ + drv_data->rx += drv_data->len - + (DCMD(drv_data->rx_channel) & DCMD_LENGTH); + + /* read trailing data from fifo, it does not matter how many + * bytes are in the fifo just read until buffer is full + * or fifo is empty, which ever occurs first */ + drv_data->read(drv_data); + + /* return count of what was actually read */ + msg->actual_length += drv_data->len - + (drv_data->rx_end - drv_data->rx); + + /* Transfer delays and chip select release are + * handled in pump_transfers or giveback + */ + + /* Move to next transfer */ + msg->state = pxa2xx_spi_next_transfer(drv_data); + + /* Schedule transfer tasklet */ + tasklet_schedule(&drv_data->pump_transfers); +} + +void pxa2xx_spi_dma_handler(int channel, void *data) +{ + struct driver_data *drv_data = data; + u32 irq_status = DCSR(channel) & DMA_INT_MASK; + + if (irq_status & DCSR_BUSERR) { + + if (channel == drv_data->tx_channel) + pxa2xx_spi_dma_error_stop(drv_data, + "dma_handler: bad bus address on tx channel"); + else + pxa2xx_spi_dma_error_stop(drv_data, + "dma_handler: bad bus address on rx channel"); + return; + } + + /* PXA255x_SSP has no timeout interrupt, wait for tailing bytes */ + if ((channel == drv_data->tx_channel) + && (irq_status & DCSR_ENDINTR) + && (drv_data->ssp_type == PXA25x_SSP)) { + + /* Wait for rx to stall */ + if (wait_ssp_rx_stall(drv_data->ioaddr) == 0) + dev_err(&drv_data->pdev->dev, + "dma_handler: ssp rx stall failed\n"); + + /* finish this transfer, start the next */ + pxa2xx_spi_dma_transfer_complete(drv_data); + } +} + +irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data) +{ + u32 irq_status; + void __iomem *reg = drv_data->ioaddr; + + irq_status = read_SSSR(reg) & drv_data->mask_sr; + if (irq_status & SSSR_ROR) { + pxa2xx_spi_dma_error_stop(drv_data, + "dma_transfer: fifo overrun"); + return IRQ_HANDLED; + } + + /* Check for false positive timeout */ + if ((irq_status & SSSR_TINT) + && (DCSR(drv_data->tx_channel) & DCSR_RUN)) { + write_SSSR(SSSR_TINT, reg); + return IRQ_HANDLED; + } + + if (irq_status & SSSR_TINT || drv_data->rx == drv_data->rx_end) { + + /* Clear and disable timeout interrupt, do the rest in + * dma_transfer_complete */ + if (!pxa25x_ssp_comp(drv_data)) + write_SSTO(0, reg); + + /* finish this transfer, start the next */ + pxa2xx_spi_dma_transfer_complete(drv_data); + + return IRQ_HANDLED; + } + + /* Opps problem detected */ + return IRQ_NONE; +} + +int pxa2xx_spi_dma_prepare(struct driver_data *drv_data, u32 dma_burst) +{ + u32 dma_width; + + switch (drv_data->n_bytes) { + case 1: + dma_width = DCMD_WIDTH1; + break; + case 2: + dma_width = DCMD_WIDTH2; + break; + default: + dma_width = DCMD_WIDTH4; + break; + } + + /* Setup rx DMA Channel */ + DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; + DSADR(drv_data->rx_channel) = drv_data->ssdr_physical; + DTADR(drv_data->rx_channel) = drv_data->rx_dma; + if (drv_data->rx == drv_data->null_dma_buf) + /* No target address increment */ + DCMD(drv_data->rx_channel) = DCMD_FLOWSRC + | dma_width + | dma_burst + | drv_data->len; + else + DCMD(drv_data->rx_channel) = DCMD_INCTRGADDR + | DCMD_FLOWSRC + | dma_width + | dma_burst + | drv_data->len; + + /* Setup tx DMA Channel */ + DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; + DSADR(drv_data->tx_channel) = drv_data->tx_dma; + DTADR(drv_data->tx_channel) = drv_data->ssdr_physical; + if (drv_data->tx == drv_data->null_dma_buf) + /* No source address increment */ + DCMD(drv_data->tx_channel) = DCMD_FLOWTRG + | dma_width + | dma_burst + | drv_data->len; + else + DCMD(drv_data->tx_channel) = DCMD_INCSRCADDR + | DCMD_FLOWTRG + | dma_width + | dma_burst + | drv_data->len; + + /* Enable dma end irqs on SSP to detect end of transfer */ + if (drv_data->ssp_type == PXA25x_SSP) + DCMD(drv_data->tx_channel) |= DCMD_ENDIRQEN; + + return 0; +} + +void pxa2xx_spi_dma_start(struct driver_data *drv_data) +{ + DCSR(drv_data->rx_channel) |= DCSR_RUN; + DCSR(drv_data->tx_channel) |= DCSR_RUN; +} + +int pxa2xx_spi_dma_setup(struct driver_data *drv_data) +{ + struct device *dev = &drv_data->pdev->dev; + struct ssp_device *ssp = drv_data->ssp; + + /* Get two DMA channels (rx and tx) */ + drv_data->rx_channel = pxa_request_dma("pxa2xx_spi_ssp_rx", + DMA_PRIO_HIGH, + pxa2xx_spi_dma_handler, + drv_data); + if (drv_data->rx_channel < 0) { + dev_err(dev, "problem (%d) requesting rx channel\n", + drv_data->rx_channel); + return -ENODEV; + } + drv_data->tx_channel = pxa_request_dma("pxa2xx_spi_ssp_tx", + DMA_PRIO_MEDIUM, + pxa2xx_spi_dma_handler, + drv_data); + if (drv_data->tx_channel < 0) { + dev_err(dev, "problem (%d) requesting tx channel\n", + drv_data->tx_channel); + pxa_free_dma(drv_data->rx_channel); + return -ENODEV; + } + + DRCMR(ssp->drcmr_rx) = DRCMR_MAPVLD | drv_data->rx_channel; + DRCMR(ssp->drcmr_tx) = DRCMR_MAPVLD | drv_data->tx_channel; + + return 0; +} + +void pxa2xx_spi_dma_release(struct driver_data *drv_data) +{ + struct ssp_device *ssp = drv_data->ssp; + + DRCMR(ssp->drcmr_rx) = 0; + DRCMR(ssp->drcmr_tx) = 0; + + if (drv_data->tx_channel != 0) + pxa_free_dma(drv_data->tx_channel); + if (drv_data->rx_channel != 0) + pxa_free_dma(drv_data->rx_channel); +} + +void pxa2xx_spi_dma_resume(struct driver_data *drv_data) +{ + if (drv_data->rx_channel != -1) + DRCMR(drv_data->ssp->drcmr_rx) = + DRCMR_MAPVLD | drv_data->rx_channel; + if (drv_data->tx_channel != -1) + DRCMR(drv_data->ssp->drcmr_tx) = + DRCMR_MAPVLD | drv_data->tx_channel; +} + +int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip, + struct spi_device *spi, + u8 bits_per_word, u32 *burst_code, + u32 *threshold) +{ + struct pxa2xx_spi_chip *chip_info = + (struct pxa2xx_spi_chip *)spi->controller_data; + int bytes_per_word; + int burst_bytes; + int thresh_words; + int req_burst_size; + int retval = 0; + + /* Set the threshold (in registers) to equal the same amount of data + * as represented by burst size (in bytes). The computation below + * is (burst_size rounded up to nearest 8 byte, word or long word) + * divided by (bytes/register); the tx threshold is the inverse of + * the rx, so that there will always be enough data in the rx fifo + * to satisfy a burst, and there will always be enough space in the + * tx fifo to accept a burst (a tx burst will overwrite the fifo if + * there is not enough space), there must always remain enough empty + * space in the rx fifo for any data loaded to the tx fifo. + * Whenever burst_size (in bytes) equals bits/word, the fifo threshold + * will be 8, or half the fifo; + * The threshold can only be set to 2, 4 or 8, but not 16, because + * to burst 16 to the tx fifo, the fifo would have to be empty; + * however, the minimum fifo trigger level is 1, and the tx will + * request service when the fifo is at this level, with only 15 spaces. + */ + + /* find bytes/word */ + if (bits_per_word <= 8) + bytes_per_word = 1; + else if (bits_per_word <= 16) + bytes_per_word = 2; + else + bytes_per_word = 4; + + /* use struct pxa2xx_spi_chip->dma_burst_size if available */ + if (chip_info) + req_burst_size = chip_info->dma_burst_size; + else { + switch (chip->dma_burst_size) { + default: + /* if the default burst size is not set, + * do it now */ + chip->dma_burst_size = DCMD_BURST8; + case DCMD_BURST8: + req_burst_size = 8; + break; + case DCMD_BURST16: + req_burst_size = 16; + break; + case DCMD_BURST32: + req_burst_size = 32; + break; + } + } + if (req_burst_size <= 8) { + *burst_code = DCMD_BURST8; + burst_bytes = 8; + } else if (req_burst_size <= 16) { + if (bytes_per_word == 1) { + /* don't burst more than 1/2 the fifo */ + *burst_code = DCMD_BURST8; + burst_bytes = 8; + retval = 1; + } else { + *burst_code = DCMD_BURST16; + burst_bytes = 16; + } + } else { + if (bytes_per_word == 1) { + /* don't burst more than 1/2 the fifo */ + *burst_code = DCMD_BURST8; + burst_bytes = 8; + retval = 1; + } else if (bytes_per_word == 2) { + /* don't burst more than 1/2 the fifo */ + *burst_code = DCMD_BURST16; + burst_bytes = 16; + retval = 1; + } else { + *burst_code = DCMD_BURST32; + burst_bytes = 32; + } + } + + thresh_words = burst_bytes / bytes_per_word; + + /* thresh_words will be between 2 and 8 */ + *threshold = (SSCR1_RxTresh(thresh_words) & SSCR1_RFT) + | (SSCR1_TxTresh(16-thresh_words) & SSCR1_TFT); + + return retval; +} diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index 304cf6e..5b7c2a4 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include #include @@ -36,6 +35,7 @@ #include #include +#include "spi-pxa2xx.h" MODULE_AUTHOR("Stephen Street"); MODULE_DESCRIPTION("PXA2xx SSP SPI Controller"); @@ -46,12 +46,6 @@ MODULE_ALIAS("platform:pxa2xx-spi"); #define TIMOUT_DFLT 1000 -#define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR) -#define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK) -#define IS_DMA_ALIGNED(x) IS_ALIGNED((unsigned long)(x), DMA_ALIGNMENT) -#define MAX_DMA_LEN 8191 -#define DMA_ALIGNMENT 8 - /* * for testing SSCR1 changes that require SSP restart, basically * everything except the service and interrupt enables, the pxa270 developer @@ -66,106 +60,6 @@ MODULE_ALIAS("platform:pxa2xx-spi"); | SSCR1_RFT | SSCR1_TFT | SSCR1_MWDS \ | SSCR1_SPH | SSCR1_SPO | SSCR1_LBM) -#define DEFINE_SSP_REG(reg, off) \ -static inline u32 read_##reg(void const __iomem *p) \ -{ return __raw_readl(p + (off)); } \ -\ -static inline void write_##reg(u32 v, void __iomem *p) \ -{ __raw_writel(v, p + (off)); } - -DEFINE_SSP_REG(SSCR0, 0x00) -DEFINE_SSP_REG(SSCR1, 0x04) -DEFINE_SSP_REG(SSSR, 0x08) -DEFINE_SSP_REG(SSITR, 0x0c) -DEFINE_SSP_REG(SSDR, 0x10) -DEFINE_SSP_REG(SSTO, 0x28) -DEFINE_SSP_REG(SSPSP, 0x2c) - -#define START_STATE ((void*)0) -#define RUNNING_STATE ((void*)1) -#define DONE_STATE ((void*)2) -#define ERROR_STATE ((void*)-1) - -struct driver_data { - /* Driver model hookup */ - struct platform_device *pdev; - - /* SSP Info */ - struct ssp_device *ssp; - - /* SPI framework hookup */ - enum pxa_ssp_type ssp_type; - struct spi_master *master; - - /* PXA hookup */ - struct pxa2xx_spi_master *master_info; - - /* DMA setup stuff */ - int rx_channel; - int tx_channel; - u32 *null_dma_buf; - - /* SSP register addresses */ - void __iomem *ioaddr; - u32 ssdr_physical; - - /* SSP masks*/ - u32 dma_cr1; - u32 int_cr1; - u32 clear_sr; - u32 mask_sr; - - /* Maximun clock rate */ - unsigned long max_clk_rate; - - /* Message Transfer pump */ - struct tasklet_struct pump_transfers; - - /* Current message transfer state info */ - struct spi_message* cur_msg; - struct spi_transfer* cur_transfer; - struct chip_data *cur_chip; - size_t len; - void *tx; - void *tx_end; - void *rx; - void *rx_end; - int dma_mapped; - dma_addr_t rx_dma; - dma_addr_t tx_dma; - size_t rx_map_len; - size_t tx_map_len; - u8 n_bytes; - u32 dma_width; - int (*write)(struct driver_data *drv_data); - int (*read)(struct driver_data *drv_data); - irqreturn_t (*transfer_handler)(struct driver_data *drv_data); - void (*cs_control)(u32 command); -}; - -struct chip_data { - u32 cr0; - u32 cr1; - u32 psp; - u32 timeout; - u8 n_bytes; - u32 dma_width; - u32 dma_burst_size; - u32 threshold; - u32 dma_threshold; - u8 enable_dma; - u8 bits_per_word; - u32 speed_hz; - union { - int gpio_cs; - unsigned int frm; - }; - int gpio_cs_inverted; - int (*write)(struct driver_data *drv_data); - int (*read)(struct driver_data *drv_data); - void (*cs_control)(u32 command); -}; - static void cs_assert(struct driver_data *drv_data) { struct chip_data *chip = drv_data->cur_chip; @@ -200,26 +94,7 @@ static void cs_deassert(struct driver_data *drv_data) gpio_set_value(chip->gpio_cs, !chip->gpio_cs_inverted); } -static void write_SSSR_CS(struct driver_data *drv_data, u32 val) -{ - void __iomem *reg = drv_data->ioaddr; - - if (drv_data->ssp_type == CE4100_SSP) - val |= read_SSSR(reg) & SSSR_ALT_FRM_MASK; - - write_SSSR(val, reg); -} - -static int pxa25x_ssp_comp(struct driver_data *drv_data) -{ - if (drv_data->ssp_type == PXA25x_SSP) - return 1; - if (drv_data->ssp_type == CE4100_SSP) - return 1; - return 0; -} - -static int flush(struct driver_data *drv_data) +int pxa2xx_spi_flush(struct driver_data *drv_data) { unsigned long limit = loops_per_jiffy << 1; @@ -345,7 +220,7 @@ static int u32_reader(struct driver_data *drv_data) return drv_data->rx == drv_data->rx_end; } -static void *next_transfer(struct driver_data *drv_data) +void *pxa2xx_spi_next_transfer(struct driver_data *drv_data) { struct spi_message *msg = drv_data->cur_msg; struct spi_transfer *trans = drv_data->cur_transfer; @@ -361,76 +236,6 @@ static void *next_transfer(struct driver_data *drv_data) return DONE_STATE; } -static int map_dma_buffers(struct driver_data *drv_data) -{ - struct spi_message *msg = drv_data->cur_msg; - struct device *dev = &msg->spi->dev; - - if (!drv_data->cur_chip->enable_dma) - return 0; - - if (msg->is_dma_mapped) - return drv_data->rx_dma && drv_data->tx_dma; - - if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx)) - return 0; - - /* Modify setup if rx buffer is null */ - if (drv_data->rx == NULL) { - *drv_data->null_dma_buf = 0; - drv_data->rx = drv_data->null_dma_buf; - drv_data->rx_map_len = 4; - } else - drv_data->rx_map_len = drv_data->len; - - - /* Modify setup if tx buffer is null */ - if (drv_data->tx == NULL) { - *drv_data->null_dma_buf = 0; - drv_data->tx = drv_data->null_dma_buf; - drv_data->tx_map_len = 4; - } else - drv_data->tx_map_len = drv_data->len; - - /* Stream map the tx buffer. Always do DMA_TO_DEVICE first - * so we flush the cache *before* invalidating it, in case - * the tx and rx buffers overlap. - */ - drv_data->tx_dma = dma_map_single(dev, drv_data->tx, - drv_data->tx_map_len, DMA_TO_DEVICE); - if (dma_mapping_error(dev, drv_data->tx_dma)) - return 0; - - /* Stream map the rx buffer */ - drv_data->rx_dma = dma_map_single(dev, drv_data->rx, - drv_data->rx_map_len, DMA_FROM_DEVICE); - if (dma_mapping_error(dev, drv_data->rx_dma)) { - dma_unmap_single(dev, drv_data->tx_dma, - drv_data->tx_map_len, DMA_TO_DEVICE); - return 0; - } - - return 1; -} - -static void unmap_dma_buffers(struct driver_data *drv_data) -{ - struct device *dev; - - if (!drv_data->dma_mapped) - return; - - if (!drv_data->cur_msg->is_dma_mapped) { - dev = &drv_data->cur_msg->spi->dev; - dma_unmap_single(dev, drv_data->rx_dma, - drv_data->rx_map_len, DMA_FROM_DEVICE); - dma_unmap_single(dev, drv_data->tx_dma, - drv_data->tx_map_len, DMA_TO_DEVICE); - } - - drv_data->dma_mapped = 0; -} - /* caller already set message->status; dma and pio irqs are blocked */ static void giveback(struct driver_data *drv_data) { @@ -483,161 +288,6 @@ static void giveback(struct driver_data *drv_data) drv_data->cur_chip = NULL; } -static int wait_ssp_rx_stall(void const __iomem *ioaddr) -{ - unsigned long limit = loops_per_jiffy << 1; - - while ((read_SSSR(ioaddr) & SSSR_BSY) && --limit) - cpu_relax(); - - return limit; -} - -static int wait_dma_channel_stop(int channel) -{ - unsigned long limit = loops_per_jiffy << 1; - - while (!(DCSR(channel) & DCSR_STOPSTATE) && --limit) - cpu_relax(); - - return limit; -} - -static void dma_error_stop(struct driver_data *drv_data, const char *msg) -{ - void __iomem *reg = drv_data->ioaddr; - - /* Stop and reset */ - DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; - DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; - write_SSSR_CS(drv_data, drv_data->clear_sr); - write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); - if (!pxa25x_ssp_comp(drv_data)) - write_SSTO(0, reg); - flush(drv_data); - write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); - - unmap_dma_buffers(drv_data); - - dev_err(&drv_data->pdev->dev, "%s\n", msg); - - drv_data->cur_msg->state = ERROR_STATE; - tasklet_schedule(&drv_data->pump_transfers); -} - -static void dma_transfer_complete(struct driver_data *drv_data) -{ - void __iomem *reg = drv_data->ioaddr; - struct spi_message *msg = drv_data->cur_msg; - - /* Clear and disable interrupts on SSP and DMA channels*/ - write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); - write_SSSR_CS(drv_data, drv_data->clear_sr); - DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; - DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; - - if (wait_dma_channel_stop(drv_data->rx_channel) == 0) - dev_err(&drv_data->pdev->dev, - "dma_handler: dma rx channel stop failed\n"); - - if (wait_ssp_rx_stall(drv_data->ioaddr) == 0) - dev_err(&drv_data->pdev->dev, - "dma_transfer: ssp rx stall failed\n"); - - unmap_dma_buffers(drv_data); - - /* update the buffer pointer for the amount completed in dma */ - drv_data->rx += drv_data->len - - (DCMD(drv_data->rx_channel) & DCMD_LENGTH); - - /* read trailing data from fifo, it does not matter how many - * bytes are in the fifo just read until buffer is full - * or fifo is empty, which ever occurs first */ - drv_data->read(drv_data); - - /* return count of what was actually read */ - msg->actual_length += drv_data->len - - (drv_data->rx_end - drv_data->rx); - - /* Transfer delays and chip select release are - * handled in pump_transfers or giveback - */ - - /* Move to next transfer */ - msg->state = next_transfer(drv_data); - - /* Schedule transfer tasklet */ - tasklet_schedule(&drv_data->pump_transfers); -} - -static void dma_handler(int channel, void *data) -{ - struct driver_data *drv_data = data; - u32 irq_status = DCSR(channel) & DMA_INT_MASK; - - if (irq_status & DCSR_BUSERR) { - - if (channel == drv_data->tx_channel) - dma_error_stop(drv_data, - "dma_handler: " - "bad bus address on tx channel"); - else - dma_error_stop(drv_data, - "dma_handler: " - "bad bus address on rx channel"); - return; - } - - /* PXA255x_SSP has no timeout interrupt, wait for tailing bytes */ - if ((channel == drv_data->tx_channel) - && (irq_status & DCSR_ENDINTR) - && (drv_data->ssp_type == PXA25x_SSP)) { - - /* Wait for rx to stall */ - if (wait_ssp_rx_stall(drv_data->ioaddr) == 0) - dev_err(&drv_data->pdev->dev, - "dma_handler: ssp rx stall failed\n"); - - /* finish this transfer, start the next */ - dma_transfer_complete(drv_data); - } -} - -static irqreturn_t dma_transfer(struct driver_data *drv_data) -{ - u32 irq_status; - void __iomem *reg = drv_data->ioaddr; - - irq_status = read_SSSR(reg) & drv_data->mask_sr; - if (irq_status & SSSR_ROR) { - dma_error_stop(drv_data, "dma_transfer: fifo overrun"); - return IRQ_HANDLED; - } - - /* Check for false positive timeout */ - if ((irq_status & SSSR_TINT) - && (DCSR(drv_data->tx_channel) & DCSR_RUN)) { - write_SSSR(SSSR_TINT, reg); - return IRQ_HANDLED; - } - - if (irq_status & SSSR_TINT || drv_data->rx == drv_data->rx_end) { - - /* Clear and disable timeout interrupt, do the rest in - * dma_transfer_complete */ - if (!pxa25x_ssp_comp(drv_data)) - write_SSTO(0, reg); - - /* finish this transfer, start the next */ - dma_transfer_complete(drv_data); - - return IRQ_HANDLED; - } - - /* Opps problem detected */ - return IRQ_NONE; -} - static void reset_sccr1(struct driver_data *drv_data) { void __iomem *reg = drv_data->ioaddr; @@ -659,7 +309,7 @@ static void int_error_stop(struct driver_data *drv_data, const char* msg) reset_sccr1(drv_data); if (!pxa25x_ssp_comp(drv_data)) write_SSTO(0, reg); - flush(drv_data); + pxa2xx_spi_flush(drv_data); write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); dev_err(&drv_data->pdev->dev, "%s\n", msg); @@ -687,7 +337,7 @@ static void int_transfer_complete(struct driver_data *drv_data) */ /* Move to next transfer */ - drv_data->cur_msg->state = next_transfer(drv_data); + drv_data->cur_msg->state = pxa2xx_spi_next_transfer(drv_data); /* Schedule transfer tasklet */ tasklet_schedule(&drv_data->pump_transfers); @@ -798,103 +448,6 @@ static irqreturn_t ssp_int(int irq, void *dev_id) return drv_data->transfer_handler(drv_data); } -static int set_dma_burst_and_threshold(struct chip_data *chip, - struct spi_device *spi, - u8 bits_per_word, u32 *burst_code, - u32 *threshold) -{ - struct pxa2xx_spi_chip *chip_info = - (struct pxa2xx_spi_chip *)spi->controller_data; - int bytes_per_word; - int burst_bytes; - int thresh_words; - int req_burst_size; - int retval = 0; - - /* Set the threshold (in registers) to equal the same amount of data - * as represented by burst size (in bytes). The computation below - * is (burst_size rounded up to nearest 8 byte, word or long word) - * divided by (bytes/register); the tx threshold is the inverse of - * the rx, so that there will always be enough data in the rx fifo - * to satisfy a burst, and there will always be enough space in the - * tx fifo to accept a burst (a tx burst will overwrite the fifo if - * there is not enough space), there must always remain enough empty - * space in the rx fifo for any data loaded to the tx fifo. - * Whenever burst_size (in bytes) equals bits/word, the fifo threshold - * will be 8, or half the fifo; - * The threshold can only be set to 2, 4 or 8, but not 16, because - * to burst 16 to the tx fifo, the fifo would have to be empty; - * however, the minimum fifo trigger level is 1, and the tx will - * request service when the fifo is at this level, with only 15 spaces. - */ - - /* find bytes/word */ - if (bits_per_word <= 8) - bytes_per_word = 1; - else if (bits_per_word <= 16) - bytes_per_word = 2; - else - bytes_per_word = 4; - - /* use struct pxa2xx_spi_chip->dma_burst_size if available */ - if (chip_info) - req_burst_size = chip_info->dma_burst_size; - else { - switch (chip->dma_burst_size) { - default: - /* if the default burst size is not set, - * do it now */ - chip->dma_burst_size = DCMD_BURST8; - case DCMD_BURST8: - req_burst_size = 8; - break; - case DCMD_BURST16: - req_burst_size = 16; - break; - case DCMD_BURST32: - req_burst_size = 32; - break; - } - } - if (req_burst_size <= 8) { - *burst_code = DCMD_BURST8; - burst_bytes = 8; - } else if (req_burst_size <= 16) { - if (bytes_per_word == 1) { - /* don't burst more than 1/2 the fifo */ - *burst_code = DCMD_BURST8; - burst_bytes = 8; - retval = 1; - } else { - *burst_code = DCMD_BURST16; - burst_bytes = 16; - } - } else { - if (bytes_per_word == 1) { - /* don't burst more than 1/2 the fifo */ - *burst_code = DCMD_BURST8; - burst_bytes = 8; - retval = 1; - } else if (bytes_per_word == 2) { - /* don't burst more than 1/2 the fifo */ - *burst_code = DCMD_BURST16; - burst_bytes = 16; - retval = 1; - } else { - *burst_code = DCMD_BURST32; - burst_bytes = 32; - } - } - - thresh_words = burst_bytes / bytes_per_word; - - /* thresh_words will be between 2 and 8 */ - *threshold = (SSCR1_RxTresh(thresh_words) & SSCR1_RFT) - | (SSCR1_TxTresh(16-thresh_words) & SSCR1_TFT); - - return retval; -} - static unsigned int ssp_get_clk_div(struct driver_data *drv_data, int rate) { unsigned long ssp_clk = drv_data->max_clk_rate; @@ -956,8 +509,8 @@ static void pump_transfers(unsigned long data) cs_deassert(drv_data); } - /* Check for transfers that need multiple DMA segments */ - if (transfer->len > MAX_DMA_LEN && chip->enable_dma) { + /* Check if we can DMA this transfer */ + if (!pxa2xx_spi_dma_is_possible(transfer->len) && chip->enable_dma) { /* reject already-mapped transfers; PIO won't always work */ if (message->is_dma_mapped @@ -980,21 +533,20 @@ static void pump_transfers(unsigned long data) } /* Setup the transfer state based on the type of transfer */ - if (flush(drv_data) == 0) { + if (pxa2xx_spi_flush(drv_data) == 0) { dev_err(&drv_data->pdev->dev, "pump_transfers: flush failed\n"); message->status = -EIO; giveback(drv_data); return; } drv_data->n_bytes = chip->n_bytes; - drv_data->dma_width = chip->dma_width; drv_data->tx = (void *)transfer->tx_buf; drv_data->tx_end = drv_data->tx + transfer->len; drv_data->rx = transfer->rx_buf; drv_data->rx_end = drv_data->rx + transfer->len; drv_data->rx_dma = transfer->rx_dma; drv_data->tx_dma = transfer->tx_dma; - drv_data->len = transfer->len & DCMD_LENGTH; + drv_data->len = transfer->len; drv_data->write = drv_data->tx ? chip->write : null_writer; drv_data->read = drv_data->rx ? chip->read : null_reader; @@ -1015,21 +567,18 @@ static void pump_transfers(unsigned long data) if (bits <= 8) { drv_data->n_bytes = 1; - drv_data->dma_width = DCMD_WIDTH1; drv_data->read = drv_data->read != null_reader ? u8_reader : null_reader; drv_data->write = drv_data->write != null_writer ? u8_writer : null_writer; } else if (bits <= 16) { drv_data->n_bytes = 2; - drv_data->dma_width = DCMD_WIDTH2; drv_data->read = drv_data->read != null_reader ? u16_reader : null_reader; drv_data->write = drv_data->write != null_writer ? u16_writer : null_writer; } else if (bits <= 32) { drv_data->n_bytes = 4; - drv_data->dma_width = DCMD_WIDTH4; drv_data->read = drv_data->read != null_reader ? u32_reader : null_reader; drv_data->write = drv_data->write != null_writer ? @@ -1038,7 +587,8 @@ static void pump_transfers(unsigned long data) /* if bits/word is changed in dma mode, then must check the * thresholds and burst also */ if (chip->enable_dma) { - if (set_dma_burst_and_threshold(chip, message->spi, + if (pxa2xx_spi_set_dma_burst_and_threshold(chip, + message->spi, bits, &dma_burst, &dma_thresh)) if (printk_ratelimit()) @@ -1057,70 +607,21 @@ static void pump_transfers(unsigned long data) message->state = RUNNING_STATE; - /* Try to map dma buffer and do a dma transfer if successful, but - * only if the length is non-zero and less than MAX_DMA_LEN. - * - * Zero-length non-descriptor DMA is illegal on PXA2xx; force use - * of PIO instead. Care is needed above because the transfer may - * have have been passed with buffers that are already dma mapped. - * A zero-length transfer in PIO mode will not try to write/read - * to/from the buffers - * - * REVISIT large transfers are exactly where we most want to be - * using DMA. If this happens much, split those transfers into - * multiple DMA segments rather than forcing PIO. - */ drv_data->dma_mapped = 0; - if (drv_data->len > 0 && drv_data->len <= MAX_DMA_LEN) - drv_data->dma_mapped = map_dma_buffers(drv_data); + if (pxa2xx_spi_dma_is_possible(drv_data->len)) + drv_data->dma_mapped = pxa2xx_spi_map_dma_buffers(drv_data); if (drv_data->dma_mapped) { /* Ensure we have the correct interrupt handler */ - drv_data->transfer_handler = dma_transfer; - - /* Setup rx DMA Channel */ - DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; - DSADR(drv_data->rx_channel) = drv_data->ssdr_physical; - DTADR(drv_data->rx_channel) = drv_data->rx_dma; - if (drv_data->rx == drv_data->null_dma_buf) - /* No target address increment */ - DCMD(drv_data->rx_channel) = DCMD_FLOWSRC - | drv_data->dma_width - | dma_burst - | drv_data->len; - else - DCMD(drv_data->rx_channel) = DCMD_INCTRGADDR - | DCMD_FLOWSRC - | drv_data->dma_width - | dma_burst - | drv_data->len; - - /* Setup tx DMA Channel */ - DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; - DSADR(drv_data->tx_channel) = drv_data->tx_dma; - DTADR(drv_data->tx_channel) = drv_data->ssdr_physical; - if (drv_data->tx == drv_data->null_dma_buf) - /* No source address increment */ - DCMD(drv_data->tx_channel) = DCMD_FLOWTRG - | drv_data->dma_width - | dma_burst - | drv_data->len; - else - DCMD(drv_data->tx_channel) = DCMD_INCSRCADDR - | DCMD_FLOWTRG - | drv_data->dma_width - | dma_burst - | drv_data->len; - - /* Enable dma end irqs on SSP to detect end of transfer */ - if (drv_data->ssp_type == PXA25x_SSP) - DCMD(drv_data->tx_channel) |= DCMD_ENDIRQEN; + drv_data->transfer_handler = pxa2xx_spi_dma_transfer; + + pxa2xx_spi_dma_prepare(drv_data, dma_burst); /* Clear status and start DMA engine */ cr1 = chip->cr1 | dma_thresh | drv_data->dma_cr1; write_SSSR(drv_data->clear_sr, reg); - DCSR(drv_data->rx_channel) |= DCSR_RUN; - DCSR(drv_data->tx_channel) |= DCSR_RUN; + + pxa2xx_spi_dma_start(drv_data); } else { /* Ensure we have the correct interrupt handler */ drv_data->transfer_handler = interrupt_transfer; @@ -1262,8 +763,6 @@ static int setup(struct spi_device *spi) chip->gpio_cs = -1; chip->enable_dma = 0; chip->timeout = TIMOUT_DFLT; - chip->dma_burst_size = drv_data->master_info->enable_dma ? - DCMD_BURST8 : 0; } /* protocol drivers may change the chip settings, so... @@ -1293,7 +792,8 @@ static int setup(struct spi_device *spi) * burst and threshold can still respond to changes in bits_per_word */ if (chip->enable_dma) { /* set up legal burst and threshold for dma */ - if (set_dma_burst_and_threshold(chip, spi, spi->bits_per_word, + if (pxa2xx_spi_set_dma_burst_and_threshold(chip, spi, + spi->bits_per_word, &chip->dma_burst_size, &chip->dma_threshold)) { dev_warn(&spi->dev, "in setup: DMA burst size reduced " @@ -1328,18 +828,15 @@ static int setup(struct spi_device *spi) if (spi->bits_per_word <= 8) { chip->n_bytes = 1; - chip->dma_width = DCMD_WIDTH1; chip->read = u8_reader; chip->write = u8_writer; } else if (spi->bits_per_word <= 16) { chip->n_bytes = 2; - chip->dma_width = DCMD_WIDTH2; chip->read = u16_reader; chip->write = u16_writer; } else if (spi->bits_per_word <= 32) { chip->cr0 |= SSCR0_EDSS; chip->n_bytes = 4; - chip->dma_width = DCMD_WIDTH4; chip->read = u32_reader; chip->write = u32_writer; } else { @@ -1447,31 +944,11 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) drv_data->tx_channel = -1; drv_data->rx_channel = -1; if (platform_info->enable_dma) { - - /* Get two DMA channels (rx and tx) */ - drv_data->rx_channel = pxa_request_dma("pxa2xx_spi_ssp_rx", - DMA_PRIO_HIGH, - dma_handler, - drv_data); - if (drv_data->rx_channel < 0) { - dev_err(dev, "problem (%d) requesting rx channel\n", - drv_data->rx_channel); - status = -ENODEV; - goto out_error_irq_alloc; + status = pxa2xx_spi_dma_setup(drv_data); + if (status) { + dev_warn(dev, "failed to setup DMA, using PIO\n"); + platform_info->enable_dma = false; } - drv_data->tx_channel = pxa_request_dma("pxa2xx_spi_ssp_tx", - DMA_PRIO_MEDIUM, - dma_handler, - drv_data); - if (drv_data->tx_channel < 0) { - dev_err(dev, "problem (%d) requesting tx channel\n", - drv_data->tx_channel); - status = -ENODEV; - goto out_error_dma_alloc; - } - - DRCMR(ssp->drcmr_rx) = DRCMR_MAPVLD | drv_data->rx_channel; - DRCMR(ssp->drcmr_tx) = DRCMR_MAPVLD | drv_data->tx_channel; } /* Enable SOC clock */ @@ -1507,14 +984,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) out_error_clock_enabled: clk_disable_unprepare(ssp->clk); - -out_error_dma_alloc: - if (drv_data->tx_channel != -1) - pxa_free_dma(drv_data->tx_channel); - if (drv_data->rx_channel != -1) - pxa_free_dma(drv_data->rx_channel); - -out_error_irq_alloc: + pxa2xx_spi_dma_release(drv_data); free_irq(ssp->irq, drv_data); out_error_master_alloc: @@ -1537,12 +1007,8 @@ static int pxa2xx_spi_remove(struct platform_device *pdev) clk_disable_unprepare(ssp->clk); /* Release DMA */ - if (drv_data->master_info->enable_dma) { - DRCMR(ssp->drcmr_rx) = 0; - DRCMR(ssp->drcmr_tx) = 0; - pxa_free_dma(drv_data->tx_channel); - pxa_free_dma(drv_data->rx_channel); - } + if (drv_data->master_info->enable_dma) + pxa2xx_spi_dma_release(drv_data); /* Release IRQ */ free_irq(ssp->irq, drv_data); @@ -1589,12 +1055,7 @@ static int pxa2xx_spi_resume(struct device *dev) struct ssp_device *ssp = drv_data->ssp; int status = 0; - if (drv_data->rx_channel != -1) - DRCMR(drv_data->ssp->drcmr_rx) = - DRCMR_MAPVLD | drv_data->rx_channel; - if (drv_data->tx_channel != -1) - DRCMR(drv_data->ssp->drcmr_tx) = - DRCMR_MAPVLD | drv_data->tx_channel; + pxa2xx_spi_dma_resume(drv_data); /* Enable the SSP clock */ clk_prepare_enable(ssp->clk); diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h new file mode 100644 index 0000000..0a98905 --- /dev/null +++ b/drivers/spi/spi-pxa2xx.h @@ -0,0 +1,185 @@ +/* + * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs + * Copyright (C) 2013, Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef SPI_PXA2XX_H +#define SPI_PXA2XX_H + +#include +#include +#include +#include +#include +#include +#include + +struct driver_data { + /* Driver model hookup */ + struct platform_device *pdev; + + /* SSP Info */ + struct ssp_device *ssp; + + /* SPI framework hookup */ + enum pxa_ssp_type ssp_type; + struct spi_master *master; + + /* PXA hookup */ + struct pxa2xx_spi_master *master_info; + + /* PXA private DMA setup stuff */ + int rx_channel; + int tx_channel; + u32 *null_dma_buf; + + /* SSP register addresses */ + void __iomem *ioaddr; + u32 ssdr_physical; + + /* SSP masks*/ + u32 dma_cr1; + u32 int_cr1; + u32 clear_sr; + u32 mask_sr; + + /* Maximun clock rate */ + unsigned long max_clk_rate; + + /* Message Transfer pump */ + struct tasklet_struct pump_transfers; + + /* Current message transfer state info */ + struct spi_message *cur_msg; + struct spi_transfer *cur_transfer; + struct chip_data *cur_chip; + size_t len; + void *tx; + void *tx_end; + void *rx; + void *rx_end; + int dma_mapped; + dma_addr_t rx_dma; + dma_addr_t tx_dma; + size_t rx_map_len; + size_t tx_map_len; + u8 n_bytes; + int (*write)(struct driver_data *drv_data); + int (*read)(struct driver_data *drv_data); + irqreturn_t (*transfer_handler)(struct driver_data *drv_data); + void (*cs_control)(u32 command); +}; + +struct chip_data { + u32 cr0; + u32 cr1; + u32 psp; + u32 timeout; + u8 n_bytes; + u32 dma_burst_size; + u32 threshold; + u32 dma_threshold; + u8 enable_dma; + u8 bits_per_word; + u32 speed_hz; + union { + int gpio_cs; + unsigned int frm; + }; + int gpio_cs_inverted; + int (*write)(struct driver_data *drv_data); + int (*read)(struct driver_data *drv_data); + void (*cs_control)(u32 command); +}; + +#define DEFINE_SSP_REG(reg, off) \ +static inline u32 read_##reg(void const __iomem *p) \ +{ return __raw_readl(p + (off)); } \ +\ +static inline void write_##reg(u32 v, void __iomem *p) \ +{ __raw_writel(v, p + (off)); } + +DEFINE_SSP_REG(SSCR0, 0x00) +DEFINE_SSP_REG(SSCR1, 0x04) +DEFINE_SSP_REG(SSSR, 0x08) +DEFINE_SSP_REG(SSITR, 0x0c) +DEFINE_SSP_REG(SSDR, 0x10) +DEFINE_SSP_REG(SSTO, 0x28) +DEFINE_SSP_REG(SSPSP, 0x2c) + +#define START_STATE ((void *)0) +#define RUNNING_STATE ((void *)1) +#define DONE_STATE ((void *)2) +#define ERROR_STATE ((void *)-1) + +#define MAX_DMA_LEN 8191 +#define IS_DMA_ALIGNED(x) IS_ALIGNED((unsigned long)(x), DMA_ALIGNMENT) +#define DMA_ALIGNMENT 8 + +static inline int pxa25x_ssp_comp(struct driver_data *drv_data) +{ + if (drv_data->ssp_type == PXA25x_SSP) + return 1; + if (drv_data->ssp_type == CE4100_SSP) + return 1; + return 0; +} + +static inline void write_SSSR_CS(struct driver_data *drv_data, u32 val) +{ + void __iomem *reg = drv_data->ioaddr; + + if (drv_data->ssp_type == CE4100_SSP) + val |= read_SSSR(reg) & SSSR_ALT_FRM_MASK; + + write_SSSR(val, reg); +} + +extern int pxa2xx_spi_flush(struct driver_data *drv_data); +extern void *pxa2xx_spi_next_transfer(struct driver_data *drv_data); + +#if defined(CONFIG_SPI_PXA2XX_PXADMA) +extern bool pxa2xx_spi_dma_is_possible(size_t len); +extern int pxa2xx_spi_map_dma_buffers(struct driver_data *drv_data); +extern irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data); +extern int pxa2xx_spi_dma_prepare(struct driver_data *drv_data, u32 dma_burst); +extern void pxa2xx_spi_dma_start(struct driver_data *drv_data); +extern int pxa2xx_spi_dma_setup(struct driver_data *drv_data); +extern void pxa2xx_spi_dma_release(struct driver_data *drv_data); +extern void pxa2xx_spi_dma_resume(struct driver_data *drv_data); +extern int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip, + struct spi_device *spi, + u8 bits_per_word, + u32 *burst_code, + u32 *threshold); +#else +static inline bool pxa2xx_spi_dma_is_possible(size_t len) { return false; } +static inline int pxa2xx_spi_map_dma_buffers(struct driver_data *drv_data) +{ + return 0; +} +#define pxa2xx_spi_dma_transfer NULL +static inline void pxa2xx_spi_dma_prepare(struct driver_data *drv_data, + u32 dma_burst) {} +static inline void pxa2xx_spi_dma_start(struct driver_data *drv_data) {} +static inline int pxa2xx_spi_dma_setup(struct driver_data *drv_data) +{ + return 0; +} +static inline void pxa2xx_spi_dma_release(struct driver_data *drv_data) {} +static inline void pxa2xx_spi_dma_resume(struct driver_data *drv_data) {} +static inline int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip, + struct spi_device *spi, + u8 bits_per_word, + u32 *burst_code, + u32 *threshold) +{ + return -ENODEV; +} +#endif + +#endif /* SPI_PXA2XX_H */ diff --git a/include/linux/spi/pxa2xx_spi.h b/include/linux/spi/pxa2xx_spi.h index 053b5ba..d6d2b4d 100644 --- a/include/linux/spi/pxa2xx_spi.h +++ b/include/linux/spi/pxa2xx_spi.h @@ -53,85 +53,5 @@ struct pxa2xx_spi_chip { extern void pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_master *info); -#else -/* - * This is the implemtation for CE4100 on x86. ARM defines them in mach/ or - * plat/ include path. - * The CE4100 does not provide DMA support. This bits are here to let the driver - * compile and will never be used. Maybe we get DMA support at a later point in - * time. - */ - -#define DCSR(n) (n) -#define DSADR(n) (n) -#define DTADR(n) (n) -#define DCMD(n) (n) -#define DRCMR(n) (n) - -#define DCSR_RUN (1 << 31) /* Run Bit */ -#define DCSR_NODESC (1 << 30) /* No-Descriptor Fetch */ -#define DCSR_STOPIRQEN (1 << 29) /* Stop Interrupt Enable */ -#define DCSR_REQPEND (1 << 8) /* Request Pending (read-only) */ -#define DCSR_STOPSTATE (1 << 3) /* Stop State (read-only) */ -#define DCSR_ENDINTR (1 << 2) /* End Interrupt */ -#define DCSR_STARTINTR (1 << 1) /* Start Interrupt */ -#define DCSR_BUSERR (1 << 0) /* Bus Error Interrupt */ - -#define DCSR_EORIRQEN (1 << 28) /* End of Receive Interrupt Enable */ -#define DCSR_EORJMPEN (1 << 27) /* Jump to next descriptor on EOR */ -#define DCSR_EORSTOPEN (1 << 26) /* STOP on an EOR */ -#define DCSR_SETCMPST (1 << 25) /* Set Descriptor Compare Status */ -#define DCSR_CLRCMPST (1 << 24) /* Clear Descriptor Compare Status */ -#define DCSR_CMPST (1 << 10) /* The Descriptor Compare Status */ -#define DCSR_EORINTR (1 << 9) /* The end of Receive */ - -#define DRCMR_MAPVLD (1 << 7) /* Map Valid */ -#define DRCMR_CHLNUM 0x1f /* mask for Channel Number */ - -#define DDADR_DESCADDR 0xfffffff0 /* Address of next descriptor */ -#define DDADR_STOP (1 << 0) /* Stop */ - -#define DCMD_INCSRCADDR (1 << 31) /* Source Address Increment Setting. */ -#define DCMD_INCTRGADDR (1 << 30) /* Target Address Increment Setting. */ -#define DCMD_FLOWSRC (1 << 29) /* Flow Control by the source. */ -#define DCMD_FLOWTRG (1 << 28) /* Flow Control by the target. */ -#define DCMD_STARTIRQEN (1 << 22) /* Start Interrupt Enable */ -#define DCMD_ENDIRQEN (1 << 21) /* End Interrupt Enable */ -#define DCMD_ENDIAN (1 << 18) /* Device Endian-ness. */ -#define DCMD_BURST8 (1 << 16) /* 8 byte burst */ -#define DCMD_BURST16 (2 << 16) /* 16 byte burst */ -#define DCMD_BURST32 (3 << 16) /* 32 byte burst */ -#define DCMD_WIDTH1 (1 << 14) /* 1 byte width */ -#define DCMD_WIDTH2 (2 << 14) /* 2 byte width (HalfWord) */ -#define DCMD_WIDTH4 (3 << 14) /* 4 byte width (Word) */ -#define DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */ - -/* - * Descriptor structure for PXA's DMA engine - * Note: this structure must always be aligned to a 16-byte boundary. - */ - -typedef enum { - DMA_PRIO_HIGH = 0, - DMA_PRIO_MEDIUM = 1, - DMA_PRIO_LOW = 2 -} pxa_dma_prio; - -/* - * DMA registration - */ - -static inline int pxa_request_dma(char *name, - pxa_dma_prio prio, - void (*irq_handler)(int, void *), - void *data) -{ - return -ENODEV; -} - -static inline void pxa_free_dma(int dma_ch) -{ -} - #endif #endif -- cgit v0.10.2 From 5928808ef623347e0d4aa22327b992e9e125b6ad Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Tue, 22 Jan 2013 12:26:29 +0200 Subject: spi/pxa2xx: add support for DMA engine To be able to use DMA with this driver on non-PXA platforms we implement support for the generic DMA engine API. This lets user to use different DMA engines with little or no modification to the driver. Request lines and channel numbers can be passed to the driver from the platform specific data. The DMA engine implementation will be selected by default even on PXA platform. User can select the legacy DMA API by enabling Kconfig option CONFIG_SPI_PXA2XX_PXADMA. Signed-off-by: Mika Westerberg Acked-by: Linus Walleij Tested-by: Lu Cao Signed-off-by: Mark Brown diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index f187866..8913d3d 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -301,7 +301,12 @@ config SPI_PXA2XX_PXADMA bool "PXA2xx SSP legacy PXA DMA API support" depends on SPI_PXA2XX && ARCH_PXA help - Enable PXA private legacy DMA API support. + Enable PXA private legacy DMA API support. Note that this is + deprecated in favor of generic DMA engine API. + +config SPI_PXA2XX_DMA + def_bool y + depends on SPI_PXA2XX && !SPI_PXA2XX_PXADMA config SPI_PXA2XX tristate "PXA2xx SSP SPI master" diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index 2e3cdd3..e53c309 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile @@ -49,6 +49,7 @@ obj-$(CONFIG_SPI_PL022) += spi-pl022.o obj-$(CONFIG_SPI_PPC4xx) += spi-ppc4xx.o spi-pxa2xx-platform-objs := spi-pxa2xx.o spi-pxa2xx-platform-$(CONFIG_SPI_PXA2XX_PXADMA) += spi-pxa2xx-pxadma.o +spi-pxa2xx-platform-$(CONFIG_SPI_PXA2XX_DMA) += spi-pxa2xx-dma.o obj-$(CONFIG_SPI_PXA2XX) += spi-pxa2xx-platform.o obj-$(CONFIG_SPI_PXA2XX_PCI) += spi-pxa2xx-pci.o obj-$(CONFIG_SPI_RSPI) += spi-rspi.o diff --git a/drivers/spi/spi-pxa2xx-dma.c b/drivers/spi/spi-pxa2xx-dma.c new file mode 100644 index 0000000..c735c5a --- /dev/null +++ b/drivers/spi/spi-pxa2xx-dma.c @@ -0,0 +1,392 @@ +/* + * PXA2xx SPI DMA engine support. + * + * Copyright (C) 2013, Intel Corporation + * Author: Mika Westerberg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "spi-pxa2xx.h" + +static int pxa2xx_spi_map_dma_buffer(struct driver_data *drv_data, + enum dma_data_direction dir) +{ + int i, nents, len = drv_data->len; + struct scatterlist *sg; + struct device *dmadev; + struct sg_table *sgt; + void *buf, *pbuf; + + /* + * Some DMA controllers have problems transferring buffers that are + * not multiple of 4 bytes. So we truncate the transfer so that it + * is suitable for such controllers, and handle the trailing bytes + * manually after the DMA completes. + * + * REVISIT: It would be better if this information could be + * retrieved directly from the DMA device in a similar way than + * ->copy_align etc. is done. + */ + len = ALIGN(drv_data->len, 4); + + if (dir == DMA_TO_DEVICE) { + dmadev = drv_data->tx_chan->device->dev; + sgt = &drv_data->tx_sgt; + buf = drv_data->tx; + drv_data->tx_map_len = len; + } else { + dmadev = drv_data->rx_chan->device->dev; + sgt = &drv_data->rx_sgt; + buf = drv_data->rx; + drv_data->rx_map_len = len; + } + + nents = DIV_ROUND_UP(len, SZ_2K); + if (nents != sgt->nents) { + int ret; + + sg_free_table(sgt); + ret = sg_alloc_table(sgt, nents, GFP_KERNEL); + if (ret) + return ret; + } + + pbuf = buf; + for_each_sg(sgt->sgl, sg, sgt->nents, i) { + size_t bytes = min_t(size_t, len, SZ_2K); + + if (buf) + sg_set_buf(sg, pbuf, bytes); + else + sg_set_buf(sg, drv_data->dummy, bytes); + + pbuf += bytes; + len -= bytes; + } + + nents = dma_map_sg(dmadev, sgt->sgl, sgt->nents, dir); + if (!nents) + return -ENOMEM; + + return nents; +} + +static void pxa2xx_spi_unmap_dma_buffer(struct driver_data *drv_data, + enum dma_data_direction dir) +{ + struct device *dmadev; + struct sg_table *sgt; + + if (dir == DMA_TO_DEVICE) { + dmadev = drv_data->tx_chan->device->dev; + sgt = &drv_data->tx_sgt; + } else { + dmadev = drv_data->rx_chan->device->dev; + sgt = &drv_data->rx_sgt; + } + + dma_unmap_sg(dmadev, sgt->sgl, sgt->nents, dir); +} + +static void pxa2xx_spi_unmap_dma_buffers(struct driver_data *drv_data) +{ + if (!drv_data->dma_mapped) + return; + + pxa2xx_spi_unmap_dma_buffer(drv_data, DMA_FROM_DEVICE); + pxa2xx_spi_unmap_dma_buffer(drv_data, DMA_TO_DEVICE); + + drv_data->dma_mapped = 0; +} + +static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data, + bool error) +{ + struct spi_message *msg = drv_data->cur_msg; + + /* + * It is possible that one CPU is handling ROR interrupt and other + * just gets DMA completion. Calling pump_transfers() twice for the + * same transfer leads to problems thus we prevent concurrent calls + * by using ->dma_running. + */ + if (atomic_dec_and_test(&drv_data->dma_running)) { + void __iomem *reg = drv_data->ioaddr; + + /* + * If the other CPU is still handling the ROR interrupt we + * might not know about the error yet. So we re-check the + * ROR bit here before we clear the status register. + */ + if (!error) { + u32 status = read_SSSR(reg) & drv_data->mask_sr; + error = status & SSSR_ROR; + } + + /* Clear status & disable interrupts */ + write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); + write_SSSR_CS(drv_data, drv_data->clear_sr); + if (!pxa25x_ssp_comp(drv_data)) + write_SSTO(0, reg); + + if (!error) { + pxa2xx_spi_unmap_dma_buffers(drv_data); + + /* Handle the last bytes of unaligned transfer */ + drv_data->tx += drv_data->tx_map_len; + drv_data->write(drv_data); + + drv_data->rx += drv_data->rx_map_len; + drv_data->read(drv_data); + + msg->actual_length += drv_data->len; + msg->state = pxa2xx_spi_next_transfer(drv_data); + } else { + /* In case we got an error we disable the SSP now */ + write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); + + msg->state = ERROR_STATE; + } + + tasklet_schedule(&drv_data->pump_transfers); + } +} + +static void pxa2xx_spi_dma_callback(void *data) +{ + pxa2xx_spi_dma_transfer_complete(data, false); +} + +static struct dma_async_tx_descriptor * +pxa2xx_spi_dma_prepare_one(struct driver_data *drv_data, + enum dma_transfer_direction dir) +{ + struct pxa2xx_spi_master *pdata = drv_data->master_info; + struct chip_data *chip = drv_data->cur_chip; + enum dma_slave_buswidth width; + struct dma_slave_config cfg; + struct dma_chan *chan; + struct sg_table *sgt; + int nents, ret; + + switch (drv_data->n_bytes) { + case 1: + width = DMA_SLAVE_BUSWIDTH_1_BYTE; + break; + case 2: + width = DMA_SLAVE_BUSWIDTH_2_BYTES; + break; + default: + width = DMA_SLAVE_BUSWIDTH_4_BYTES; + break; + } + + memset(&cfg, 0, sizeof(cfg)); + cfg.direction = dir; + + if (dir == DMA_MEM_TO_DEV) { + cfg.dst_addr = drv_data->ssdr_physical; + cfg.dst_addr_width = width; + cfg.dst_maxburst = chip->dma_burst_size; + cfg.slave_id = pdata->tx_slave_id; + + sgt = &drv_data->tx_sgt; + nents = drv_data->tx_nents; + chan = drv_data->tx_chan; + } else { + cfg.src_addr = drv_data->ssdr_physical; + cfg.src_addr_width = width; + cfg.src_maxburst = chip->dma_burst_size; + cfg.slave_id = pdata->rx_slave_id; + + sgt = &drv_data->rx_sgt; + nents = drv_data->rx_nents; + chan = drv_data->rx_chan; + } + + ret = dmaengine_slave_config(chan, &cfg); + if (ret) { + dev_warn(&drv_data->pdev->dev, "DMA slave config failed\n"); + return NULL; + } + + return dmaengine_prep_slave_sg(chan, sgt->sgl, nents, dir, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); +} + +static bool pxa2xx_spi_dma_filter(struct dma_chan *chan, void *param) +{ + const struct pxa2xx_spi_master *pdata = param; + + return chan->chan_id == pdata->tx_chan_id || + chan->chan_id == pdata->rx_chan_id; +} + +bool pxa2xx_spi_dma_is_possible(size_t len) +{ + return len <= MAX_DMA_LEN; +} + +int pxa2xx_spi_map_dma_buffers(struct driver_data *drv_data) +{ + const struct chip_data *chip = drv_data->cur_chip; + int ret; + + if (!chip->enable_dma) + return 0; + + /* Don't bother with DMA if we can't do even a single burst */ + if (drv_data->len < chip->dma_burst_size) + return 0; + + ret = pxa2xx_spi_map_dma_buffer(drv_data, DMA_TO_DEVICE); + if (ret <= 0) { + dev_warn(&drv_data->pdev->dev, "failed to DMA map TX\n"); + return 0; + } + + drv_data->tx_nents = ret; + + ret = pxa2xx_spi_map_dma_buffer(drv_data, DMA_FROM_DEVICE); + if (ret <= 0) { + pxa2xx_spi_unmap_dma_buffer(drv_data, DMA_TO_DEVICE); + dev_warn(&drv_data->pdev->dev, "failed to DMA map RX\n"); + return 0; + } + + drv_data->rx_nents = ret; + return 1; +} + +irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data) +{ + u32 status; + + status = read_SSSR(drv_data->ioaddr) & drv_data->mask_sr; + if (status & SSSR_ROR) { + dev_err(&drv_data->pdev->dev, "FIFO overrun\n"); + + dmaengine_terminate_all(drv_data->rx_chan); + dmaengine_terminate_all(drv_data->tx_chan); + + pxa2xx_spi_dma_transfer_complete(drv_data, true); + return IRQ_HANDLED; + } + + return IRQ_NONE; +} + +int pxa2xx_spi_dma_prepare(struct driver_data *drv_data, u32 dma_burst) +{ + struct dma_async_tx_descriptor *tx_desc, *rx_desc; + + tx_desc = pxa2xx_spi_dma_prepare_one(drv_data, DMA_MEM_TO_DEV); + if (!tx_desc) { + dev_err(&drv_data->pdev->dev, + "failed to get DMA TX descriptor\n"); + return -EBUSY; + } + + rx_desc = pxa2xx_spi_dma_prepare_one(drv_data, DMA_DEV_TO_MEM); + if (!rx_desc) { + dev_err(&drv_data->pdev->dev, + "failed to get DMA RX descriptor\n"); + return -EBUSY; + } + + /* We are ready when RX completes */ + rx_desc->callback = pxa2xx_spi_dma_callback; + rx_desc->callback_param = drv_data; + + dmaengine_submit(rx_desc); + dmaengine_submit(tx_desc); + return 0; +} + +void pxa2xx_spi_dma_start(struct driver_data *drv_data) +{ + dma_async_issue_pending(drv_data->rx_chan); + dma_async_issue_pending(drv_data->tx_chan); + + atomic_set(&drv_data->dma_running, 1); +} + +int pxa2xx_spi_dma_setup(struct driver_data *drv_data) +{ + struct pxa2xx_spi_master *pdata = drv_data->master_info; + dma_cap_mask_t mask; + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + drv_data->dummy = devm_kzalloc(&drv_data->pdev->dev, SZ_2K, GFP_KERNEL); + if (!drv_data->dummy) + return -ENOMEM; + + drv_data->tx_chan = dma_request_channel(mask, pxa2xx_spi_dma_filter, + pdata); + if (!drv_data->tx_chan) + return -ENODEV; + + drv_data->rx_chan = dma_request_channel(mask, pxa2xx_spi_dma_filter, + pdata); + if (!drv_data->rx_chan) { + dma_release_channel(drv_data->tx_chan); + drv_data->tx_chan = NULL; + return -ENODEV; + } + + return 0; +} + +void pxa2xx_spi_dma_release(struct driver_data *drv_data) +{ + if (drv_data->rx_chan) { + dmaengine_terminate_all(drv_data->rx_chan); + dma_release_channel(drv_data->rx_chan); + sg_free_table(&drv_data->rx_sgt); + drv_data->rx_chan = NULL; + } + if (drv_data->tx_chan) { + dmaengine_terminate_all(drv_data->tx_chan); + dma_release_channel(drv_data->tx_chan); + sg_free_table(&drv_data->tx_sgt); + drv_data->tx_chan = NULL; + } +} + +void pxa2xx_spi_dma_resume(struct driver_data *drv_data) +{ +} + +int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip, + struct spi_device *spi, + u8 bits_per_word, u32 *burst_code, + u32 *threshold) +{ + struct pxa2xx_spi_chip *chip_info = spi->controller_data; + + /* + * If the DMA burst size is given in chip_info we use that, + * otherwise we use the default. Also we use the default FIFO + * thresholds for now. + */ + *burst_code = chip_info ? chip_info->dma_burst_size : 16; + *threshold = SSCR1_RxTresh(RX_THRESH_DFLT) + | SSCR1_TxTresh(TX_THRESH_DFLT); + + return 0; +} diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index 5b7c2a4..c3d7807 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c @@ -928,7 +928,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) drv_data->mask_sr = SSSR_RFS | SSSR_TFS | SSSR_ROR; } else { drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE | SSCR1_TINTE; - drv_data->dma_cr1 = SSCR1_TSRE | SSCR1_RSRE | SSCR1_TINTE; + drv_data->dma_cr1 = DEFAULT_DMA_CR1; drv_data->clear_sr = SSSR_ROR | SSSR_TINT; drv_data->mask_sr = SSSR_TINT | SSSR_RFS | SSSR_TFS | SSSR_ROR; } diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h index 0a98905..97ff471 100644 --- a/drivers/spi/spi-pxa2xx.h +++ b/drivers/spi/spi-pxa2xx.h @@ -10,11 +10,15 @@ #ifndef SPI_PXA2XX_H #define SPI_PXA2XX_H +#include +#include #include #include #include #include #include +#include +#include #include #include @@ -53,6 +57,16 @@ struct driver_data { /* Message Transfer pump */ struct tasklet_struct pump_transfers; + /* DMA engine support */ + struct dma_chan *rx_chan; + struct dma_chan *tx_chan; + struct sg_table rx_sgt; + struct sg_table tx_sgt; + int rx_nents; + int tx_nents; + void *dummy; + atomic_t dma_running; + /* Current message transfer state info */ struct spi_message *cur_msg; struct spi_transfer *cur_transfer; @@ -116,7 +130,6 @@ DEFINE_SSP_REG(SSPSP, 0x2c) #define DONE_STATE ((void *)2) #define ERROR_STATE ((void *)-1) -#define MAX_DMA_LEN 8191 #define IS_DMA_ALIGNED(x) IS_ALIGNED((unsigned long)(x), DMA_ALIGNMENT) #define DMA_ALIGNMENT 8 @@ -142,7 +155,24 @@ static inline void write_SSSR_CS(struct driver_data *drv_data, u32 val) extern int pxa2xx_spi_flush(struct driver_data *drv_data); extern void *pxa2xx_spi_next_transfer(struct driver_data *drv_data); +/* + * Select the right DMA implementation. + */ #if defined(CONFIG_SPI_PXA2XX_PXADMA) +#define SPI_PXA2XX_USE_DMA 1 +#define MAX_DMA_LEN 8191 +#define DEFAULT_DMA_CR1 (SSCR1_TSRE | SSCR1_RSRE | SSCR1_TINTE) +#elif defined(CONFIG_SPI_PXA2XX_DMA) +#define SPI_PXA2XX_USE_DMA 1 +#define MAX_DMA_LEN SZ_64K +#define DEFAULT_DMA_CR1 (SSCR1_TSRE | SSCR1_RSRE | SSCR1_TRAIL) +#else +#undef SPI_PXA2XX_USE_DMA +#define MAX_DMA_LEN 0 +#define DEFAULT_DMA_CR1 0 +#endif + +#ifdef SPI_PXA2XX_USE_DMA extern bool pxa2xx_spi_dma_is_possible(size_t len); extern int pxa2xx_spi_map_dma_buffers(struct driver_data *drv_data); extern irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data); diff --git a/include/linux/spi/pxa2xx_spi.h b/include/linux/spi/pxa2xx_spi.h index d6d2b4d..e5cbbc4 100644 --- a/include/linux/spi/pxa2xx_spi.h +++ b/include/linux/spi/pxa2xx_spi.h @@ -29,6 +29,12 @@ struct pxa2xx_spi_master { u16 num_chipselect; u8 enable_dma; + /* DMA engine specific config */ + int rx_chan_id; + int tx_chan_id; + int rx_slave_id; + int tx_slave_id; + /* For non-PXA arches */ struct ssp_device ssp; }; -- cgit v0.10.2 From 7d94a50585884180a487ce17e8ddbfa37f7694b6 Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Tue, 22 Jan 2013 12:26:30 +0200 Subject: spi/pxa2xx: add support for runtime PM Drivers should put the device into low power states proactively whenever the device is not in use. Thus implement support for runtime PM and use the autosuspend feature to make sure that we can still perform well in case we see lots of SPI traffic within short period of time. Signed-off-by: Mika Westerberg Tested-by: Lu Cao Signed-off-by: Mark Brown diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index c3d7807..7f8f939 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include @@ -417,10 +418,20 @@ static irqreturn_t ssp_int(int irq, void *dev_id) { struct driver_data *drv_data = dev_id; void __iomem *reg = drv_data->ioaddr; - u32 sccr1_reg = read_SSCR1(reg); + u32 sccr1_reg; u32 mask = drv_data->mask_sr; u32 status; + /* + * The IRQ might be shared with other peripherals so we must first + * check that are we RPM suspended or not. If we are we assume that + * the IRQ was not for us (we shouldn't be RPM suspended when the + * interrupt is enabled). + */ + if (pm_runtime_suspended(&drv_data->pdev->dev)) + return IRQ_NONE; + + sccr1_reg = read_SSCR1(reg); status = read_SSSR(reg); /* Ignore possible writes if we don't need to write */ @@ -678,6 +689,27 @@ static int pxa2xx_spi_transfer_one_message(struct spi_master *master, return 0; } +static int pxa2xx_spi_prepare_transfer(struct spi_master *master) +{ + struct driver_data *drv_data = spi_master_get_devdata(master); + + pm_runtime_get_sync(&drv_data->pdev->dev); + return 0; +} + +static int pxa2xx_spi_unprepare_transfer(struct spi_master *master) +{ + struct driver_data *drv_data = spi_master_get_devdata(master); + + /* Disable the SSP now */ + write_SSCR0(read_SSCR0(drv_data->ioaddr) & ~SSCR0_SSE, + drv_data->ioaddr); + + pm_runtime_mark_last_busy(&drv_data->pdev->dev); + pm_runtime_put_autosuspend(&drv_data->pdev->dev); + return 0; +} + static int setup_cs(struct spi_device *spi, struct chip_data *chip, struct pxa2xx_spi_chip *chip_info) { @@ -915,6 +947,8 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) master->cleanup = cleanup; master->setup = setup; master->transfer_one_message = pxa2xx_spi_transfer_one_message; + master->prepare_transfer_hardware = pxa2xx_spi_prepare_transfer; + master->unprepare_transfer_hardware = pxa2xx_spi_unprepare_transfer; drv_data->ssp_type = ssp->type; drv_data->null_dma_buf = (u32 *)PTR_ALIGN(&drv_data[1], DMA_ALIGNMENT); @@ -980,6 +1014,11 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) goto out_error_clock_enabled; } + pm_runtime_set_autosuspend_delay(&pdev->dev, 50); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + return status; out_error_clock_enabled: @@ -1002,6 +1041,8 @@ static int pxa2xx_spi_remove(struct platform_device *pdev) return 0; ssp = drv_data->ssp; + pm_runtime_get_sync(&pdev->dev); + /* Disable the SSP at the peripheral and SOC level */ write_SSCR0(0, drv_data->ioaddr); clk_disable_unprepare(ssp->clk); @@ -1010,6 +1051,9 @@ static int pxa2xx_spi_remove(struct platform_device *pdev) if (drv_data->master_info->enable_dma) pxa2xx_spi_dma_release(drv_data); + pm_runtime_put_noidle(&pdev->dev); + pm_runtime_disable(&pdev->dev); + /* Release IRQ */ free_irq(ssp->irq, drv_data); @@ -1069,20 +1113,37 @@ static int pxa2xx_spi_resume(struct device *dev) return 0; } +#endif + +#ifdef CONFIG_PM_RUNTIME +static int pxa2xx_spi_runtime_suspend(struct device *dev) +{ + struct driver_data *drv_data = dev_get_drvdata(dev); + + clk_disable_unprepare(drv_data->ssp->clk); + return 0; +} + +static int pxa2xx_spi_runtime_resume(struct device *dev) +{ + struct driver_data *drv_data = dev_get_drvdata(dev); + + clk_prepare_enable(drv_data->ssp->clk); + return 0; +} +#endif static const struct dev_pm_ops pxa2xx_spi_pm_ops = { - .suspend = pxa2xx_spi_suspend, - .resume = pxa2xx_spi_resume, + SET_SYSTEM_SLEEP_PM_OPS(pxa2xx_spi_suspend, pxa2xx_spi_resume) + SET_RUNTIME_PM_OPS(pxa2xx_spi_runtime_suspend, + pxa2xx_spi_runtime_resume, NULL) }; -#endif static struct platform_driver driver = { .driver = { .name = "pxa2xx-spi", .owner = THIS_MODULE, -#ifdef CONFIG_PM .pm = &pxa2xx_spi_pm_ops, -#endif }, .probe = pxa2xx_spi_probe, .remove = pxa2xx_spi_remove, -- cgit v0.10.2 From b833172fd8f44fb56e0b3cb810155a6baecc65dc Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Tue, 22 Jan 2013 12:26:31 +0200 Subject: spi/pxa2xx: add support for SPI_LOOP This is useful when testing the functionality of the controller from userspace and there aren't any real SPI slave devices connected to the bus. Signed-off-by: Mika Westerberg Tested-by: Lu Cao Signed-off-by: Mark Brown diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index 7f8f939..8a5ba0a 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c @@ -846,6 +846,9 @@ static int setup(struct spi_device *spi) chip->cr1 |= (((spi->mode & SPI_CPHA) != 0) ? SSCR1_SPH : 0) | (((spi->mode & SPI_CPOL) != 0) ? SSCR1_SPO : 0); + if (spi->mode & SPI_LOOP) + chip->cr1 |= SSCR1_LBM; + /* NOTE: PXA25x_SSP _could_ use external clocking ... */ if (!pxa25x_ssp_comp(drv_data)) dev_dbg(&spi->dev, "%ld Hz actual, %s\n", @@ -939,7 +942,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) master->dev.parent = &pdev->dev; master->dev.of_node = pdev->dev.of_node; /* the spi->mode bits understood by this driver: */ - master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; + master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP; master->bus_num = ssp->port_id; master->num_chipselect = platform_info->num_chipselect; -- cgit v0.10.2 From a0d2642e9296882cda3ad03ff3d9a6649cd70439 Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Tue, 22 Jan 2013 12:26:32 +0200 Subject: spi/pxa2xx: add support for Intel Low Power Subsystem SPI Intel LPSS SPI is pretty much the same as the PXA27xx SPI except that it has few additional features over the original: o FIFO depth is 256 entries o RX FIFO has one watermark o TX FIFO has two watermarks, low and high o chip select can be controlled by writing to a register The new FIFO registers follow immediately the PXA27xx registers but then there are some additional LPSS private registers at offset 1k or 2k from the base address. For these private registers we add new accessors that take advantage of drv_data->lpss_base once it is resolved. We add a new type LPSS_SSP that can be used to distinguish the LPSS devices from others. Signed-off-by: Mika Westerberg Tested-by: Lu Cao Signed-off-by: Mark Brown diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index 8a5ba0a..4bd6b72 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c @@ -1,5 +1,6 @@ /* * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs + * Copyright (C) 2013, Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -61,6 +62,98 @@ MODULE_ALIAS("platform:pxa2xx-spi"); | SSCR1_RFT | SSCR1_TFT | SSCR1_MWDS \ | SSCR1_SPH | SSCR1_SPO | SSCR1_LBM) +#define LPSS_RX_THRESH_DFLT 64 +#define LPSS_TX_LOTHRESH_DFLT 160 +#define LPSS_TX_HITHRESH_DFLT 224 + +/* Offset from drv_data->lpss_base */ +#define SPI_CS_CONTROL 0x18 +#define SPI_CS_CONTROL_SW_MODE BIT(0) +#define SPI_CS_CONTROL_CS_HIGH BIT(1) + +static bool is_lpss_ssp(const struct driver_data *drv_data) +{ + return drv_data->ssp_type == LPSS_SSP; +} + +/* + * Read and write LPSS SSP private registers. Caller must first check that + * is_lpss_ssp() returns true before these can be called. + */ +static u32 __lpss_ssp_read_priv(struct driver_data *drv_data, unsigned offset) +{ + WARN_ON(!drv_data->lpss_base); + return readl(drv_data->lpss_base + offset); +} + +static void __lpss_ssp_write_priv(struct driver_data *drv_data, + unsigned offset, u32 value) +{ + WARN_ON(!drv_data->lpss_base); + writel(value, drv_data->lpss_base + offset); +} + +/* + * lpss_ssp_setup - perform LPSS SSP specific setup + * @drv_data: pointer to the driver private data + * + * Perform LPSS SSP specific setup. This function must be called first if + * one is going to use LPSS SSP private registers. + */ +static void lpss_ssp_setup(struct driver_data *drv_data) +{ + unsigned offset = 0x400; + u32 value, orig; + + if (!is_lpss_ssp(drv_data)) + return; + + /* + * Perform auto-detection of the LPSS SSP private registers. They + * can be either at 1k or 2k offset from the base address. + */ + orig = readl(drv_data->ioaddr + offset + SPI_CS_CONTROL); + + value = orig | SPI_CS_CONTROL_SW_MODE; + writel(value, drv_data->ioaddr + offset + SPI_CS_CONTROL); + value = readl(drv_data->ioaddr + offset + SPI_CS_CONTROL); + if (value != (orig | SPI_CS_CONTROL_SW_MODE)) { + offset = 0x800; + goto detection_done; + } + + value &= ~SPI_CS_CONTROL_SW_MODE; + writel(value, drv_data->ioaddr + offset + SPI_CS_CONTROL); + value = readl(drv_data->ioaddr + offset + SPI_CS_CONTROL); + if (value != orig) { + offset = 0x800; + goto detection_done; + } + +detection_done: + /* Now set the LPSS base */ + drv_data->lpss_base = drv_data->ioaddr + offset; + + /* Enable software chip select control */ + value = SPI_CS_CONTROL_SW_MODE | SPI_CS_CONTROL_CS_HIGH; + __lpss_ssp_write_priv(drv_data, SPI_CS_CONTROL, value); +} + +static void lpss_ssp_cs_control(struct driver_data *drv_data, bool enable) +{ + u32 value; + + if (!is_lpss_ssp(drv_data)) + return; + + value = __lpss_ssp_read_priv(drv_data, SPI_CS_CONTROL); + if (enable) + value &= ~SPI_CS_CONTROL_CS_HIGH; + else + value |= SPI_CS_CONTROL_CS_HIGH; + __lpss_ssp_write_priv(drv_data, SPI_CS_CONTROL, value); +} + static void cs_assert(struct driver_data *drv_data) { struct chip_data *chip = drv_data->cur_chip; @@ -75,8 +168,12 @@ static void cs_assert(struct driver_data *drv_data) return; } - if (gpio_is_valid(chip->gpio_cs)) + if (gpio_is_valid(chip->gpio_cs)) { gpio_set_value(chip->gpio_cs, chip->gpio_cs_inverted); + return; + } + + lpss_ssp_cs_control(drv_data, true); } static void cs_deassert(struct driver_data *drv_data) @@ -91,8 +188,12 @@ static void cs_deassert(struct driver_data *drv_data) return; } - if (gpio_is_valid(chip->gpio_cs)) + if (gpio_is_valid(chip->gpio_cs)) { gpio_set_value(chip->gpio_cs, !chip->gpio_cs_inverted); + return; + } + + lpss_ssp_cs_control(drv_data, false); } int pxa2xx_spi_flush(struct driver_data *drv_data) @@ -642,6 +743,13 @@ static void pump_transfers(unsigned long data) write_SSSR_CS(drv_data, drv_data->clear_sr); } + if (is_lpss_ssp(drv_data)) { + if ((read_SSIRF(reg) & 0xff) != chip->lpss_rx_threshold) + write_SSIRF(chip->lpss_rx_threshold, reg); + if ((read_SSITF(reg) & 0xffff) != chip->lpss_tx_threshold) + write_SSITF(chip->lpss_tx_threshold, reg); + } + /* see if we need to reload the config registers */ if ((read_SSCR0(reg) != cr0) || (read_SSCR1(reg) & SSCR1_CHANGE_MASK) != @@ -754,8 +862,17 @@ static int setup(struct spi_device *spi) struct chip_data *chip; struct driver_data *drv_data = spi_master_get_devdata(spi->master); unsigned int clk_div; - uint tx_thres = TX_THRESH_DFLT; - uint rx_thres = RX_THRESH_DFLT; + uint tx_thres, tx_hi_thres, rx_thres; + + if (is_lpss_ssp(drv_data)) { + tx_thres = LPSS_TX_LOTHRESH_DFLT; + tx_hi_thres = LPSS_TX_HITHRESH_DFLT; + rx_thres = LPSS_RX_THRESH_DFLT; + } else { + tx_thres = TX_THRESH_DFLT; + tx_hi_thres = 0; + rx_thres = RX_THRESH_DFLT; + } if (!pxa25x_ssp_comp(drv_data) && (spi->bits_per_word < 4 || spi->bits_per_word > 32)) { @@ -808,6 +925,8 @@ static int setup(struct spi_device *spi) chip->timeout = chip_info->timeout; if (chip_info->tx_threshold) tx_thres = chip_info->tx_threshold; + if (chip_info->tx_hi_threshold) + tx_hi_thres = chip_info->tx_hi_threshold; if (chip_info->rx_threshold) rx_thres = chip_info->rx_threshold; chip->enable_dma = drv_data->master_info->enable_dma; @@ -819,6 +938,10 @@ static int setup(struct spi_device *spi) chip->threshold = (SSCR1_RxTresh(rx_thres) & SSCR1_RFT) | (SSCR1_TxTresh(tx_thres) & SSCR1_TFT); + chip->lpss_rx_threshold = SSIRF_RxThresh(rx_thres); + chip->lpss_tx_threshold = SSITF_TxLoThresh(tx_thres) + | SSITF_TxHiThresh(tx_hi_thres); + /* set dma burst and threshold outside of chip_info path so that if * chip_info goes away after setting chip->enable_dma, the * burst and threshold can still respond to changes in bits_per_word */ @@ -1006,6 +1129,8 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) write_SSTO(0, drv_data->ioaddr); write_SSPSP(0, drv_data->ioaddr); + lpss_ssp_setup(drv_data); + tasklet_init(&drv_data->pump_transfers, pump_transfers, (unsigned long)drv_data); diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h index 97ff471..5adc2a1 100644 --- a/drivers/spi/spi-pxa2xx.h +++ b/drivers/spi/spi-pxa2xx.h @@ -86,6 +86,8 @@ struct driver_data { int (*read)(struct driver_data *drv_data); irqreturn_t (*transfer_handler)(struct driver_data *drv_data); void (*cs_control)(u32 command); + + void __iomem *lpss_base; }; struct chip_data { @@ -97,6 +99,8 @@ struct chip_data { u32 dma_burst_size; u32 threshold; u32 dma_threshold; + u16 lpss_rx_threshold; + u16 lpss_tx_threshold; u8 enable_dma; u8 bits_per_word; u32 speed_hz; @@ -124,6 +128,8 @@ DEFINE_SSP_REG(SSITR, 0x0c) DEFINE_SSP_REG(SSDR, 0x10) DEFINE_SSP_REG(SSTO, 0x28) DEFINE_SSP_REG(SSPSP, 0x2c) +DEFINE_SSP_REG(SSITF, SSITF) +DEFINE_SSP_REG(SSIRF, SSIRF) #define START_STATE ((void *)0) #define RUNNING_STATE ((void *)1) diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h index 065e7f6..467cc63 100644 --- a/include/linux/pxa2xx_ssp.h +++ b/include/linux/pxa2xx_ssp.h @@ -155,6 +155,14 @@ #define SSACD_ACDS(x) ((x) << 0) /* Audio clock divider select */ #define SSACD_SCDX8 (1 << 7) /* SYSCLK division ratio select */ +/* LPSS SSP */ +#define SSITF 0x44 /* TX FIFO trigger level */ +#define SSITF_TxLoThresh(x) (((x) - 1) << 8) +#define SSITF_TxHiThresh(x) ((x) - 1) + +#define SSIRF 0x48 /* RX FIFO trigger level */ +#define SSIRF_RxThresh(x) ((x) - 1) + enum pxa_ssp_type { SSP_UNDEFINED = 0, PXA25x_SSP, /* pxa 210, 250, 255, 26x */ @@ -164,6 +172,7 @@ enum pxa_ssp_type { PXA168_SSP, PXA910_SSP, CE4100_SSP, + LPSS_SSP, }; struct ssp_device { diff --git a/include/linux/spi/pxa2xx_spi.h b/include/linux/spi/pxa2xx_spi.h index e5cbbc4..82d5111 100644 --- a/include/linux/spi/pxa2xx_spi.h +++ b/include/linux/spi/pxa2xx_spi.h @@ -44,6 +44,7 @@ struct pxa2xx_spi_master { */ struct pxa2xx_spi_chip { u8 tx_threshold; + u8 tx_hi_threshold; u8 rx_threshold; u8 dma_burst_size; u32 timeout; -- cgit v0.10.2 From a3496855d9f1948d1b977afe8bd922725ded05d5 Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Tue, 22 Jan 2013 12:26:33 +0200 Subject: spi/pxa2xx: add support for Lynxpoint SPI controllers Intel Lynxpoint PCH Low Power Subsystem has two general purpose SPI controllers that are LPSS_SSP compatible. These controllers are enumerated from ACPI namespace with ACPI IDs INT33C0 and INT33C1. Signed-off-by: Mika Westerberg Tested-by: Lu Cao Signed-off-by: Mark Brown diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 8913d3d..e79884e9 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -310,7 +310,7 @@ config SPI_PXA2XX_DMA config SPI_PXA2XX tristate "PXA2xx SSP SPI master" - depends on ARCH_PXA || PCI + depends on ARCH_PXA || PCI || ACPI select PXA_SSP if ARCH_PXA help This enables using a PXA2xx or Sodaville SSP port as a SPI master diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index 4bd6b72..90b27a3 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include @@ -933,6 +934,13 @@ static int setup(struct spi_device *spi) chip->dma_threshold = 0; if (chip_info->enable_loopback) chip->cr1 = SSCR1_LBM; + } else if (ACPI_HANDLE(&spi->dev)) { + /* + * Slave devices enumerated from ACPI namespace don't + * usually have chip_info but we still might want to use + * DMA with them. + */ + chip->enable_dma = drv_data->master_info->enable_dma; } chip->threshold = (SSCR1_RxTresh(rx_thres) & SSCR1_RFT) | @@ -1025,6 +1033,99 @@ static void cleanup(struct spi_device *spi) kfree(chip); } +#ifdef CONFIG_ACPI +static int pxa2xx_spi_acpi_add_dma(struct acpi_resource *res, void *data) +{ + struct pxa2xx_spi_master *pdata = data; + + if (res->type == ACPI_RESOURCE_TYPE_FIXED_DMA) { + const struct acpi_resource_fixed_dma *dma; + + dma = &res->data.fixed_dma; + if (pdata->tx_slave_id < 0) { + pdata->tx_slave_id = dma->request_lines; + pdata->tx_chan_id = dma->channels; + } else if (pdata->rx_slave_id < 0) { + pdata->rx_slave_id = dma->request_lines; + pdata->rx_chan_id = dma->channels; + } + } + + /* Tell the ACPI core to skip this resource */ + return 1; +} + +static struct pxa2xx_spi_master * +pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev) +{ + struct pxa2xx_spi_master *pdata; + struct list_head resource_list; + struct acpi_device *adev; + struct ssp_device *ssp; + struct resource *res; + int devid; + + if (!ACPI_HANDLE(&pdev->dev) || + acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev)) + return NULL; + + pdata = devm_kzalloc(&pdev->dev, sizeof(*ssp), GFP_KERNEL); + if (!pdata) { + dev_err(&pdev->dev, + "failed to allocate memory for platform data\n"); + return NULL; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return NULL; + + ssp = &pdata->ssp; + + ssp->phys_base = res->start; + ssp->mmio_base = devm_request_and_ioremap(&pdev->dev, res); + if (!ssp->mmio_base) { + dev_err(&pdev->dev, "failed to ioremap mmio_base\n"); + return NULL; + } + + ssp->clk = devm_clk_get(&pdev->dev, NULL); + ssp->irq = platform_get_irq(pdev, 0); + ssp->type = LPSS_SSP; + ssp->pdev = pdev; + + ssp->port_id = -1; + if (adev->pnp.unique_id && !kstrtoint(adev->pnp.unique_id, 0, &devid)) + ssp->port_id = devid; + + pdata->num_chipselect = 1; + pdata->rx_slave_id = -1; + pdata->tx_slave_id = -1; + + INIT_LIST_HEAD(&resource_list); + acpi_dev_get_resources(adev, &resource_list, pxa2xx_spi_acpi_add_dma, + pdata); + acpi_dev_free_resource_list(&resource_list); + + pdata->enable_dma = pdata->rx_slave_id >= 0 && pdata->tx_slave_id >= 0; + + return pdata; +} + +static struct acpi_device_id pxa2xx_spi_acpi_match[] = { + { "INT33C0", 0 }, + { "INT33C1", 0 }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, pxa2xx_spi_acpi_match); +#else +static inline struct pxa2xx_spi_master * +pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev) +{ + return NULL; +} +#endif + static int pxa2xx_spi_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -1036,8 +1137,11 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) platform_info = dev_get_platdata(dev); if (!platform_info) { - dev_err(&pdev->dev, "missing platform data\n"); - return -ENODEV; + platform_info = pxa2xx_spi_acpi_get_pdata(pdev); + if (!platform_info) { + dev_err(&pdev->dev, "missing platform data\n"); + return -ENODEV; + } } ssp = pxa_ssp_request(pdev->id, pdev->name); @@ -1064,6 +1168,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) master->dev.parent = &pdev->dev; master->dev.of_node = pdev->dev.of_node; + ACPI_HANDLE_SET(&master->dev, ACPI_HANDLE(&pdev->dev)); /* the spi->mode bits understood by this driver: */ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP; @@ -1272,6 +1377,7 @@ static struct platform_driver driver = { .name = "pxa2xx-spi", .owner = THIS_MODULE, .pm = &pxa2xx_spi_pm_ops, + .acpi_match_table = ACPI_PTR(pxa2xx_spi_acpi_match), }, .probe = pxa2xx_spi_probe, .remove = pxa2xx_spi_remove, -- cgit v0.10.2