summaryrefslogtreecommitdiff
path: root/drivers/spi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/spi')
-rw-r--r--drivers/spi/Kconfig6
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/spi/spi-sirf.c877
-rw-r--r--drivers/spi/spi-zynqmp-gqspi.c1122
-rw-r--r--drivers/spi/spidev.c33
5 files changed, 1778 insertions, 261 deletions
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index ec40a27..0cae169 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -610,6 +610,12 @@ config SPI_XTENSA_XTFPGA
16 bit words in SPI mode 0, automatically asserting CS on transfer
start and deasserting on end.
+config SPI_ZYNQMP_GQSPI
+ tristate "Xilinx ZynqMP GQSPI controller"
+ depends on SPI_MASTER
+ help
+ Enables Xilinx GQSPI controller driver for Zynq UltraScale+ MPSoC.
+
config SPI_NUC900
tristate "Nuvoton NUC900 series SPI"
depends on ARCH_W90X900
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 2e7089f..1154dba 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -89,3 +89,4 @@ obj-$(CONFIG_SPI_TXX9) += spi-txx9.o
obj-$(CONFIG_SPI_XCOMM) += spi-xcomm.o
obj-$(CONFIG_SPI_XILINX) += spi-xilinx.o
obj-$(CONFIG_SPI_XTENSA_XTFPGA) += spi-xtensa-xtfpga.o
+obj-$(CONFIG_SPI_ZYNQMP_GQSPI) += spi-zynqmp-gqspi.o
diff --git a/drivers/spi/spi-sirf.c b/drivers/spi/spi-sirf.c
index f5715c9..7072276a 100644
--- a/drivers/spi/spi-sirf.c
+++ b/drivers/spi/spi-sirf.c
@@ -26,28 +26,6 @@
#include <linux/reset.h>
#define DRIVER_NAME "sirfsoc_spi"
-
-#define SIRFSOC_SPI_CTRL 0x0000
-#define SIRFSOC_SPI_CMD 0x0004
-#define SIRFSOC_SPI_TX_RX_EN 0x0008
-#define SIRFSOC_SPI_INT_EN 0x000C
-#define SIRFSOC_SPI_INT_STATUS 0x0010
-#define SIRFSOC_SPI_TX_DMA_IO_CTRL 0x0100
-#define SIRFSOC_SPI_TX_DMA_IO_LEN 0x0104
-#define SIRFSOC_SPI_TXFIFO_CTRL 0x0108
-#define SIRFSOC_SPI_TXFIFO_LEVEL_CHK 0x010C
-#define SIRFSOC_SPI_TXFIFO_OP 0x0110
-#define SIRFSOC_SPI_TXFIFO_STATUS 0x0114
-#define SIRFSOC_SPI_TXFIFO_DATA 0x0118
-#define SIRFSOC_SPI_RX_DMA_IO_CTRL 0x0120
-#define SIRFSOC_SPI_RX_DMA_IO_LEN 0x0124
-#define SIRFSOC_SPI_RXFIFO_CTRL 0x0128
-#define SIRFSOC_SPI_RXFIFO_LEVEL_CHK 0x012C
-#define SIRFSOC_SPI_RXFIFO_OP 0x0130
-#define SIRFSOC_SPI_RXFIFO_STATUS 0x0134
-#define SIRFSOC_SPI_RXFIFO_DATA 0x0138
-#define SIRFSOC_SPI_DUMMY_DELAY_CTL 0x0144
-
/* SPI CTRL register defines */
#define SIRFSOC_SPI_SLV_MODE BIT(16)
#define SIRFSOC_SPI_CMD_MODE BIT(17)
@@ -80,8 +58,6 @@
#define SIRFSOC_SPI_TXFIFO_THD_INT_EN BIT(9)
#define SIRFSOC_SPI_FRM_END_INT_EN BIT(10)
-#define SIRFSOC_SPI_INT_MASK_ALL 0x1FFF
-
/* Interrupt status */
#define SIRFSOC_SPI_RX_DONE BIT(0)
#define SIRFSOC_SPI_TX_DONE BIT(1)
@@ -110,20 +86,66 @@
#define SIRFSOC_SPI_FIFO_WIDTH_BYTE (0 << 0)
#define SIRFSOC_SPI_FIFO_WIDTH_WORD (1 << 0)
#define SIRFSOC_SPI_FIFO_WIDTH_DWORD (2 << 0)
-
-/* FIFO Status */
-#define SIRFSOC_SPI_FIFO_LEVEL_MASK 0xFF
-#define SIRFSOC_SPI_FIFO_FULL BIT(8)
-#define SIRFSOC_SPI_FIFO_EMPTY BIT(9)
-
-/* 256 bytes rx/tx FIFO */
-#define SIRFSOC_SPI_FIFO_SIZE 256
-#define SIRFSOC_SPI_DAT_FRM_LEN_MAX (64 * 1024)
-
-#define SIRFSOC_SPI_FIFO_SC(x) ((x) & 0x3F)
-#define SIRFSOC_SPI_FIFO_LC(x) (((x) & 0x3F) << 10)
-#define SIRFSOC_SPI_FIFO_HC(x) (((x) & 0x3F) << 20)
-#define SIRFSOC_SPI_FIFO_THD(x) (((x) & 0xFF) << 2)
+/* USP related */
+#define SIRFSOC_USP_SYNC_MODE BIT(0)
+#define SIRFSOC_USP_SLV_MODE BIT(1)
+#define SIRFSOC_USP_LSB BIT(4)
+#define SIRFSOC_USP_EN BIT(5)
+#define SIRFSOC_USP_RXD_FALLING_EDGE BIT(6)
+#define SIRFSOC_USP_TXD_FALLING_EDGE BIT(7)
+#define SIRFSOC_USP_CS_HIGH_VALID BIT(9)
+#define SIRFSOC_USP_SCLK_IDLE_STAT BIT(11)
+#define SIRFSOC_USP_TFS_IO_MODE BIT(14)
+#define SIRFSOC_USP_TFS_IO_INPUT BIT(19)
+
+#define SIRFSOC_USP_RXD_DELAY_LEN_MASK 0xFF
+#define SIRFSOC_USP_TXD_DELAY_LEN_MASK 0xFF
+#define SIRFSOC_USP_RXD_DELAY_OFFSET 0
+#define SIRFSOC_USP_TXD_DELAY_OFFSET 8
+#define SIRFSOC_USP_RXD_DELAY_LEN 1
+#define SIRFSOC_USP_TXD_DELAY_LEN 1
+#define SIRFSOC_USP_CLK_DIVISOR_OFFSET 21
+#define SIRFSOC_USP_CLK_DIVISOR_MASK 0x3FF
+#define SIRFSOC_USP_CLK_10_11_MASK 0x3
+#define SIRFSOC_USP_CLK_10_11_OFFSET 30
+#define SIRFSOC_USP_CLK_12_15_MASK 0xF
+#define SIRFSOC_USP_CLK_12_15_OFFSET 24
+
+#define SIRFSOC_USP_TX_DATA_OFFSET 0
+#define SIRFSOC_USP_TX_SYNC_OFFSET 8
+#define SIRFSOC_USP_TX_FRAME_OFFSET 16
+#define SIRFSOC_USP_TX_SHIFTER_OFFSET 24
+
+#define SIRFSOC_USP_TX_DATA_MASK 0xFF
+#define SIRFSOC_USP_TX_SYNC_MASK 0xFF
+#define SIRFSOC_USP_TX_FRAME_MASK 0xFF
+#define SIRFSOC_USP_TX_SHIFTER_MASK 0x1F
+
+#define SIRFSOC_USP_RX_DATA_OFFSET 0
+#define SIRFSOC_USP_RX_FRAME_OFFSET 8
+#define SIRFSOC_USP_RX_SHIFTER_OFFSET 16
+
+#define SIRFSOC_USP_RX_DATA_MASK 0xFF
+#define SIRFSOC_USP_RX_FRAME_MASK 0xFF
+#define SIRFSOC_USP_RX_SHIFTER_MASK 0x1F
+#define SIRFSOC_USP_CS_HIGH_VALUE BIT(1)
+
+#define SIRFSOC_SPI_FIFO_SC_OFFSET 0
+#define SIRFSOC_SPI_FIFO_LC_OFFSET 10
+#define SIRFSOC_SPI_FIFO_HC_OFFSET 20
+
+#define SIRFSOC_SPI_FIFO_FULL_MASK(s) (1 << ((s)->fifo_full_offset))
+#define SIRFSOC_SPI_FIFO_EMPTY_MASK(s) (1 << ((s)->fifo_full_offset + 1))
+#define SIRFSOC_SPI_FIFO_THD_MASK(s) ((s)->fifo_size - 1)
+#define SIRFSOC_SPI_FIFO_THD_OFFSET 2
+#define SIRFSOC_SPI_FIFO_LEVEL_CHK_MASK(s, val) \
+ ((val) & (s)->fifo_level_chk_mask)
+
+enum sirf_spi_type {
+ SIRF_REAL_SPI,
+ SIRF_USP_SPI_P2,
+ SIRF_USP_SPI_A7,
+};
/*
* only if the rx/tx buffer and transfer size are 4-bytes aligned, we use dma
@@ -137,6 +159,95 @@
#define SIRFSOC_MAX_CMD_BYTES 4
#define SIRFSOC_SPI_DEFAULT_FRQ 1000000
+struct sirf_spi_register {
+ /*SPI and USP-SPI common*/
+ u32 tx_rx_en;
+ u32 int_en;
+ u32 int_st;
+ u32 tx_dma_io_ctrl;
+ u32 tx_dma_io_len;
+ u32 txfifo_ctrl;
+ u32 txfifo_level_chk;
+ u32 txfifo_op;
+ u32 txfifo_st;
+ u32 txfifo_data;
+ u32 rx_dma_io_ctrl;
+ u32 rx_dma_io_len;
+ u32 rxfifo_ctrl;
+ u32 rxfifo_level_chk;
+ u32 rxfifo_op;
+ u32 rxfifo_st;
+ u32 rxfifo_data;
+ /*SPI self*/
+ u32 spi_ctrl;
+ u32 spi_cmd;
+ u32 spi_dummy_delay_ctrl;
+ /*USP-SPI self*/
+ u32 usp_mode1;
+ u32 usp_mode2;
+ u32 usp_tx_frame_ctrl;
+ u32 usp_rx_frame_ctrl;
+ u32 usp_pin_io_data;
+ u32 usp_risc_dsp_mode;
+ u32 usp_async_param_reg;
+ u32 usp_irda_x_mode_div;
+ u32 usp_sm_cfg;
+ u32 usp_int_en_clr;
+};
+
+static const struct sirf_spi_register real_spi_register = {
+ .tx_rx_en = 0x8,
+ .int_en = 0xc,
+ .int_st = 0x10,
+ .tx_dma_io_ctrl = 0x100,
+ .tx_dma_io_len = 0x104,
+ .txfifo_ctrl = 0x108,
+ .txfifo_level_chk = 0x10c,
+ .txfifo_op = 0x110,
+ .txfifo_st = 0x114,
+ .txfifo_data = 0x118,
+ .rx_dma_io_ctrl = 0x120,
+ .rx_dma_io_len = 0x124,
+ .rxfifo_ctrl = 0x128,
+ .rxfifo_level_chk = 0x12c,
+ .rxfifo_op = 0x130,
+ .rxfifo_st = 0x134,
+ .rxfifo_data = 0x138,
+ .spi_ctrl = 0x0,
+ .spi_cmd = 0x4,
+ .spi_dummy_delay_ctrl = 0x144,
+};
+
+static const struct sirf_spi_register usp_spi_register = {
+ .tx_rx_en = 0x10,
+ .int_en = 0x14,
+ .int_st = 0x18,
+ .tx_dma_io_ctrl = 0x100,
+ .tx_dma_io_len = 0x104,
+ .txfifo_ctrl = 0x108,
+ .txfifo_level_chk = 0x10c,
+ .txfifo_op = 0x110,
+ .txfifo_st = 0x114,
+ .txfifo_data = 0x118,
+ .rx_dma_io_ctrl = 0x120,
+ .rx_dma_io_len = 0x124,
+ .rxfifo_ctrl = 0x128,
+ .rxfifo_level_chk = 0x12c,
+ .rxfifo_op = 0x130,
+ .rxfifo_st = 0x134,
+ .rxfifo_data = 0x138,
+ .usp_mode1 = 0x0,
+ .usp_mode2 = 0x4,
+ .usp_tx_frame_ctrl = 0x8,
+ .usp_rx_frame_ctrl = 0xc,
+ .usp_pin_io_data = 0x1c,
+ .usp_risc_dsp_mode = 0x20,
+ .usp_async_param_reg = 0x24,
+ .usp_irda_x_mode_div = 0x28,
+ .usp_sm_cfg = 0x2c,
+ .usp_int_en_clr = 0x140,
+};
+
struct sirfsoc_spi {
struct spi_bitbang bitbang;
struct completion rx_done;
@@ -164,7 +275,6 @@ struct sirfsoc_spi {
struct dma_chan *tx_chan;
dma_addr_t src_start;
dma_addr_t dst_start;
- void *dummypage;
int word_width; /* in bytes */
/*
@@ -173,14 +283,39 @@ struct sirfsoc_spi {
*/
bool tx_by_cmd;
bool hw_cs;
+ enum sirf_spi_type type;
+ const struct sirf_spi_register *regs;
+ unsigned int fifo_size;
+ /* fifo empty offset is (fifo full offset + 1)*/
+ unsigned int fifo_full_offset;
+ /* fifo_level_chk_mask is (fifo_size/4 - 1) */
+ unsigned int fifo_level_chk_mask;
+ unsigned int dat_max_frm_len;
+};
+
+struct sirf_spi_comp_data {
+ const struct sirf_spi_register *regs;
+ enum sirf_spi_type type;
+ unsigned int dat_max_frm_len;
+ unsigned int fifo_size;
+ void (*hwinit)(struct sirfsoc_spi *sspi);
};
+static void sirfsoc_usp_hwinit(struct sirfsoc_spi *sspi)
+{
+ /* reset USP and let USP can operate */
+ writel(readl(sspi->base + sspi->regs->usp_mode1) &
+ ~SIRFSOC_USP_EN, sspi->base + sspi->regs->usp_mode1);
+ writel(readl(sspi->base + sspi->regs->usp_mode1) |
+ SIRFSOC_USP_EN, sspi->base + sspi->regs->usp_mode1);
+}
+
static void spi_sirfsoc_rx_word_u8(struct sirfsoc_spi *sspi)
{
u32 data;
u8 *rx = sspi->rx;
- data = readl(sspi->base + SIRFSOC_SPI_RXFIFO_DATA);
+ data = readl(sspi->base + sspi->regs->rxfifo_data);
if (rx) {
*rx++ = (u8) data;
@@ -199,8 +334,7 @@ static void spi_sirfsoc_tx_word_u8(struct sirfsoc_spi *sspi)
data = *tx++;
sspi->tx = tx;
}
-
- writel(data, sspi->base + SIRFSOC_SPI_TXFIFO_DATA);
+ writel(data, sspi->base + sspi->regs->txfifo_data);
sspi->left_tx_word--;
}
@@ -209,7 +343,7 @@ static void spi_sirfsoc_rx_word_u16(struct sirfsoc_spi *sspi)
u32 data;
u16 *rx = sspi->rx;
- data = readl(sspi->base + SIRFSOC_SPI_RXFIFO_DATA);
+ data = readl(sspi->base + sspi->regs->rxfifo_data);
if (rx) {
*rx++ = (u16) data;
@@ -229,7 +363,7 @@ static void spi_sirfsoc_tx_word_u16(struct sirfsoc_spi *sspi)
sspi->tx = tx;
}
- writel(data, sspi->base + SIRFSOC_SPI_TXFIFO_DATA);
+ writel(data, sspi->base + sspi->regs->txfifo_data);
sspi->left_tx_word--;
}
@@ -238,7 +372,7 @@ static void spi_sirfsoc_rx_word_u32(struct sirfsoc_spi *sspi)
u32 data;
u32 *rx = sspi->rx;
- data = readl(sspi->base + SIRFSOC_SPI_RXFIFO_DATA);
+ data = readl(sspi->base + sspi->regs->rxfifo_data);
if (rx) {
*rx++ = (u32) data;
@@ -259,41 +393,59 @@ static void spi_sirfsoc_tx_word_u32(struct sirfsoc_spi *sspi)
sspi->tx = tx;
}
- writel(data, sspi->base + SIRFSOC_SPI_TXFIFO_DATA);
+ writel(data, sspi->base + sspi->regs->txfifo_data);
sspi->left_tx_word--;
}
static irqreturn_t spi_sirfsoc_irq(int irq, void *dev_id)
{
struct sirfsoc_spi *sspi = dev_id;
- u32 spi_stat = readl(sspi->base + SIRFSOC_SPI_INT_STATUS);
- if (sspi->tx_by_cmd && (spi_stat & SIRFSOC_SPI_FRM_END)) {
+ u32 spi_stat;
+
+ spi_stat = readl(sspi->base + sspi->regs->int_st);
+ if (sspi->tx_by_cmd && sspi->type == SIRF_REAL_SPI
+ && (spi_stat & SIRFSOC_SPI_FRM_END)) {
complete(&sspi->tx_done);
- writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN);
- writel(SIRFSOC_SPI_INT_MASK_ALL,
- sspi->base + SIRFSOC_SPI_INT_STATUS);
+ writel(0x0, sspi->base + sspi->regs->int_en);
+ writel(readl(sspi->base + sspi->regs->int_st),
+ sspi->base + sspi->regs->int_st);
return IRQ_HANDLED;
}
-
/* Error Conditions */
if (spi_stat & SIRFSOC_SPI_RX_OFLOW ||
spi_stat & SIRFSOC_SPI_TX_UFLOW) {
complete(&sspi->tx_done);
complete(&sspi->rx_done);
- writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN);
- writel(SIRFSOC_SPI_INT_MASK_ALL,
- sspi->base + SIRFSOC_SPI_INT_STATUS);
+ switch (sspi->type) {
+ case SIRF_REAL_SPI:
+ case SIRF_USP_SPI_P2:
+ writel(0x0, sspi->base + sspi->regs->int_en);
+ break;
+ case SIRF_USP_SPI_A7:
+ writel(~0UL, sspi->base + sspi->regs->usp_int_en_clr);
+ break;
+ }
+ writel(readl(sspi->base + sspi->regs->int_st),
+ sspi->base + sspi->regs->int_st);
return IRQ_HANDLED;
}
if (spi_stat & SIRFSOC_SPI_TXFIFO_EMPTY)
complete(&sspi->tx_done);
- while (!(readl(sspi->base + SIRFSOC_SPI_INT_STATUS) &
+ while (!(readl(sspi->base + sspi->regs->int_st) &
SIRFSOC_SPI_RX_IO_DMA))
cpu_relax();
complete(&sspi->rx_done);
- writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN);
- writel(SIRFSOC_SPI_INT_MASK_ALL,
- sspi->base + SIRFSOC_SPI_INT_STATUS);
+ switch (sspi->type) {
+ case SIRF_REAL_SPI:
+ case SIRF_USP_SPI_P2:
+ writel(0x0, sspi->base + sspi->regs->int_en);
+ break;
+ case SIRF_USP_SPI_A7:
+ writel(~0UL, sspi->base + sspi->regs->usp_int_en_clr);
+ break;
+ }
+ writel(readl(sspi->base + sspi->regs->int_st),
+ sspi->base + sspi->regs->int_st);
return IRQ_HANDLED;
}
@@ -313,8 +465,8 @@ static void spi_sirfsoc_cmd_transfer(struct spi_device *spi,
u32 cmd;
sspi = spi_master_get_devdata(spi->master);
- writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
- writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
+ writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + sspi->regs->txfifo_op);
+ writel(SIRFSOC_SPI_FIFO_START, sspi->base + sspi->regs->txfifo_op);
memcpy(&cmd, sspi->tx, t->len);
if (sspi->word_width == 1 && !(spi->mode & SPI_LSB_FIRST))
cmd = cpu_to_be32(cmd) >>
@@ -322,11 +474,11 @@ static void spi_sirfsoc_cmd_transfer(struct spi_device *spi,
if (sspi->word_width == 2 && t->len == 4 &&
(!(spi->mode & SPI_LSB_FIRST)))
cmd = ((cmd & 0xffff) << 16) | (cmd >> 16);
- writel(cmd, sspi->base + SIRFSOC_SPI_CMD);
+ writel(cmd, sspi->base + sspi->regs->spi_cmd);
writel(SIRFSOC_SPI_FRM_END_INT_EN,
- sspi->base + SIRFSOC_SPI_INT_EN);
+ sspi->base + sspi->regs->int_en);
writel(SIRFSOC_SPI_CMD_TX_EN,
- sspi->base + SIRFSOC_SPI_TX_RX_EN);
+ sspi->base + sspi->regs->tx_rx_en);
if (wait_for_completion_timeout(&sspi->tx_done, timeout) == 0) {
dev_err(&spi->dev, "cmd transfer timeout\n");
return;
@@ -342,25 +494,56 @@ static void spi_sirfsoc_dma_transfer(struct spi_device *spi,
int timeout = t->len * 10;
sspi = spi_master_get_devdata(spi->master);
- writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
- writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
- writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
- writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
- writel(0, sspi->base + SIRFSOC_SPI_INT_EN);
- writel(SIRFSOC_SPI_INT_MASK_ALL, sspi->base + SIRFSOC_SPI_INT_STATUS);
- if (sspi->left_tx_word < SIRFSOC_SPI_DAT_FRM_LEN_MAX) {
- writel(readl(sspi->base + SIRFSOC_SPI_CTRL) |
- SIRFSOC_SPI_ENA_AUTO_CLR | SIRFSOC_SPI_MUL_DAT_MODE,
- sspi->base + SIRFSOC_SPI_CTRL);
- writel(sspi->left_tx_word - 1,
- sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN);
- writel(sspi->left_tx_word - 1,
- sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN);
+ writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + sspi->regs->rxfifo_op);
+ writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + sspi->regs->txfifo_op);
+ switch (sspi->type) {
+ case SIRF_REAL_SPI:
+ writel(SIRFSOC_SPI_FIFO_START,
+ sspi->base + sspi->regs->rxfifo_op);
+ writel(SIRFSOC_SPI_FIFO_START,
+ sspi->base + sspi->regs->txfifo_op);
+ writel(0, sspi->base + sspi->regs->int_en);
+ break;
+ case SIRF_USP_SPI_P2:
+ writel(0x0, sspi->base + sspi->regs->rxfifo_op);
+ writel(0x0, sspi->base + sspi->regs->txfifo_op);
+ writel(0, sspi->base + sspi->regs->int_en);
+ break;
+ case SIRF_USP_SPI_A7:
+ writel(0x0, sspi->base + sspi->regs->rxfifo_op);
+ writel(0x0, sspi->base + sspi->regs->txfifo_op);
+ writel(~0UL, sspi->base + sspi->regs->usp_int_en_clr);
+ break;
+ }
+ writel(readl(sspi->base + sspi->regs->int_st),
+ sspi->base + sspi->regs->int_st);
+ if (sspi->left_tx_word < sspi->dat_max_frm_len) {
+ switch (sspi->type) {
+ case SIRF_REAL_SPI:
+ writel(readl(sspi->base + sspi->regs->spi_ctrl) |
+ SIRFSOC_SPI_ENA_AUTO_CLR |
+ SIRFSOC_SPI_MUL_DAT_MODE,
+ sspi->base + sspi->regs->spi_ctrl);
+ writel(sspi->left_tx_word - 1,
+ sspi->base + sspi->regs->tx_dma_io_len);
+ writel(sspi->left_tx_word - 1,
+ sspi->base + sspi->regs->rx_dma_io_len);
+ break;
+ case SIRF_USP_SPI_P2:
+ case SIRF_USP_SPI_A7:
+ /*USP simulate SPI, tx/rx_dma_io_len indicates bytes*/
+ writel(sspi->left_tx_word * sspi->word_width,
+ sspi->base + sspi->regs->tx_dma_io_len);
+ writel(sspi->left_tx_word * sspi->word_width,
+ sspi->base + sspi->regs->rx_dma_io_len);
+ break;
+ }
} else {
- writel(readl(sspi->base + SIRFSOC_SPI_CTRL),
- sspi->base + SIRFSOC_SPI_CTRL);
- writel(0, sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN);
- writel(0, sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN);
+ if (sspi->type == SIRF_REAL_SPI)
+ writel(readl(sspi->base + sspi->regs->spi_ctrl),
+ sspi->base + sspi->regs->spi_ctrl);
+ writel(0, sspi->base + sspi->regs->tx_dma_io_len);
+ writel(0, sspi->base + sspi->regs->rx_dma_io_len);
}
sspi->dst_start = dma_map_single(&spi->dev, sspi->rx, t->len,
(t->tx_buf != t->rx_buf) ?
@@ -385,7 +568,14 @@ static void spi_sirfsoc_dma_transfer(struct spi_device *spi,
dma_async_issue_pending(sspi->tx_chan);
dma_async_issue_pending(sspi->rx_chan);
writel(SIRFSOC_SPI_RX_EN | SIRFSOC_SPI_TX_EN,
- sspi->base + SIRFSOC_SPI_TX_RX_EN);
+ sspi->base + sspi->regs->tx_rx_en);
+ if (sspi->type == SIRF_USP_SPI_P2 ||
+ sspi->type == SIRF_USP_SPI_A7) {
+ writel(SIRFSOC_SPI_FIFO_START,
+ sspi->base + sspi->regs->rxfifo_op);
+ writel(SIRFSOC_SPI_FIFO_START,
+ sspi->base + sspi->regs->txfifo_op);
+ }
if (wait_for_completion_timeout(&sspi->rx_done, timeout) == 0) {
dev_err(&spi->dev, "transfer timeout\n");
dmaengine_terminate_all(sspi->rx_chan);
@@ -398,15 +588,21 @@ static void spi_sirfsoc_dma_transfer(struct spi_device *spi,
*/
if (wait_for_completion_timeout(&sspi->tx_done, timeout) == 0) {
dev_err(&spi->dev, "transfer timeout\n");
+ if (sspi->type == SIRF_USP_SPI_P2 ||
+ sspi->type == SIRF_USP_SPI_A7)
+ writel(0, sspi->base + sspi->regs->tx_rx_en);
dmaengine_terminate_all(sspi->tx_chan);
}
dma_unmap_single(&spi->dev, sspi->src_start, t->len, DMA_TO_DEVICE);
dma_unmap_single(&spi->dev, sspi->dst_start, t->len, DMA_FROM_DEVICE);
/* TX, RX FIFO stop */
- writel(0, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
- writel(0, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
- if (sspi->left_tx_word >= SIRFSOC_SPI_DAT_FRM_LEN_MAX)
- writel(0, sspi->base + SIRFSOC_SPI_TX_RX_EN);
+ writel(0, sspi->base + sspi->regs->rxfifo_op);
+ writel(0, sspi->base + sspi->regs->txfifo_op);
+ if (sspi->left_tx_word >= sspi->dat_max_frm_len)
+ writel(0, sspi->base + sspi->regs->tx_rx_en);
+ if (sspi->type == SIRF_USP_SPI_P2 ||
+ sspi->type == SIRF_USP_SPI_A7)
+ writel(0, sspi->base + sspi->regs->tx_rx_en);
}
static void spi_sirfsoc_pio_transfer(struct spi_device *spi,
@@ -414,57 +610,105 @@ static void spi_sirfsoc_pio_transfer(struct spi_device *spi,
{
struct sirfsoc_spi *sspi;
int timeout = t->len * 10;
+ unsigned int data_units;
sspi = spi_master_get_devdata(spi->master);
do {
writel(SIRFSOC_SPI_FIFO_RESET,
- sspi->base + SIRFSOC_SPI_RXFIFO_OP);
+ sspi->base + sspi->regs->rxfifo_op);
writel(SIRFSOC_SPI_FIFO_RESET,
- sspi->base + SIRFSOC_SPI_TXFIFO_OP);
- writel(SIRFSOC_SPI_FIFO_START,
- sspi->base + SIRFSOC_SPI_RXFIFO_OP);
- writel(SIRFSOC_SPI_FIFO_START,
- sspi->base + SIRFSOC_SPI_TXFIFO_OP);
- writel(0, sspi->base + SIRFSOC_SPI_INT_EN);
- writel(SIRFSOC_SPI_INT_MASK_ALL,
- sspi->base + SIRFSOC_SPI_INT_STATUS);
- writel(readl(sspi->base + SIRFSOC_SPI_CTRL) |
- SIRFSOC_SPI_MUL_DAT_MODE | SIRFSOC_SPI_ENA_AUTO_CLR,
- sspi->base + SIRFSOC_SPI_CTRL);
- writel(min(sspi->left_tx_word, (u32)(256 / sspi->word_width))
- - 1, sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN);
- writel(min(sspi->left_rx_word, (u32)(256 / sspi->word_width))
- - 1, sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN);
- while (!((readl(sspi->base + SIRFSOC_SPI_TXFIFO_STATUS)
- & SIRFSOC_SPI_FIFO_FULL)) && sspi->left_tx_word)
+ sspi->base + sspi->regs->txfifo_op);
+ switch (sspi->type) {
+ case SIRF_USP_SPI_P2:
+ writel(0x0, sspi->base + sspi->regs->rxfifo_op);
+ writel(0x0, sspi->base + sspi->regs->txfifo_op);
+ writel(0, sspi->base + sspi->regs->int_en);
+ writel(readl(sspi->base + sspi->regs->int_st),
+ sspi->base + sspi->regs->int_st);
+ writel(min((sspi->left_tx_word * sspi->word_width),
+ sspi->fifo_size),
+ sspi->base + sspi->regs->tx_dma_io_len);
+ writel(min((sspi->left_rx_word * sspi->word_width),
+ sspi->fifo_size),
+ sspi->base + sspi->regs->rx_dma_io_len);
+ break;
+ case SIRF_USP_SPI_A7:
+ writel(0x0, sspi->base + sspi->regs->rxfifo_op);
+ writel(0x0, sspi->base + sspi->regs->txfifo_op);
+ writel(~0UL, sspi->base + sspi->regs->usp_int_en_clr);
+ writel(readl(sspi->base + sspi->regs->int_st),
+ sspi->base + sspi->regs->int_st);
+ writel(min((sspi->left_tx_word * sspi->word_width),
+ sspi->fifo_size),
+ sspi->base + sspi->regs->tx_dma_io_len);
+ writel(min((sspi->left_rx_word * sspi->word_width),
+ sspi->fifo_size),
+ sspi->base + sspi->regs->rx_dma_io_len);
+ break;
+ case SIRF_REAL_SPI:
+ writel(SIRFSOC_SPI_FIFO_START,
+ sspi->base + sspi->regs->rxfifo_op);
+ writel(SIRFSOC_SPI_FIFO_START,
+ sspi->base + sspi->regs->txfifo_op);
+ writel(0, sspi->base + sspi->regs->int_en);
+ writel(readl(sspi->base + sspi->regs->int_st),
+ sspi->base + sspi->regs->int_st);
+ writel(readl(sspi->base + sspi->regs->spi_ctrl) |
+ SIRFSOC_SPI_MUL_DAT_MODE |
+ SIRFSOC_SPI_ENA_AUTO_CLR,
+ sspi->base + sspi->regs->spi_ctrl);
+ data_units = sspi->fifo_size / sspi->word_width;
+ writel(min(sspi->left_tx_word, data_units) - 1,
+ sspi->base + sspi->regs->tx_dma_io_len);
+ writel(min(sspi->left_rx_word, data_units) - 1,
+ sspi->base + sspi->regs->rx_dma_io_len);
+ break;
+ }
+ while (!((readl(sspi->base + sspi->regs->txfifo_st)
+ & SIRFSOC_SPI_FIFO_FULL_MASK(sspi))) &&
+ sspi->left_tx_word)
sspi->tx_word(sspi);
writel(SIRFSOC_SPI_TXFIFO_EMPTY_INT_EN |
SIRFSOC_SPI_TX_UFLOW_INT_EN |
SIRFSOC_SPI_RX_OFLOW_INT_EN |
SIRFSOC_SPI_RX_IO_DMA_INT_EN,
- sspi->base + SIRFSOC_SPI_INT_EN);
+ sspi->base + sspi->regs->int_en);
writel(SIRFSOC_SPI_RX_EN | SIRFSOC_SPI_TX_EN,
- sspi->base + SIRFSOC_SPI_TX_RX_EN);
+ sspi->base + sspi->regs->tx_rx_en);
+ if (sspi->type == SIRF_USP_SPI_P2 ||
+ sspi->type == SIRF_USP_SPI_A7) {
+ writel(SIRFSOC_SPI_FIFO_START,
+ sspi->base + sspi->regs->rxfifo_op);
+ writel(SIRFSOC_SPI_FIFO_START,
+ sspi->base + sspi->regs->txfifo_op);
+ }
if (!wait_for_completion_timeout(&sspi->tx_done, timeout) ||
!wait_for_completion_timeout(&sspi->rx_done, timeout)) {
dev_err(&spi->dev, "transfer timeout\n");
+ if (sspi->type == SIRF_USP_SPI_P2 ||
+ sspi->type == SIRF_USP_SPI_A7)
+ writel(0, sspi->base + sspi->regs->tx_rx_en);
break;
}
- while (!((readl(sspi->base + SIRFSOC_SPI_RXFIFO_STATUS)
- & SIRFSOC_SPI_FIFO_EMPTY)) && sspi->left_rx_word)
+ while (!((readl(sspi->base + sspi->regs->rxfifo_st)
+ & SIRFSOC_SPI_FIFO_EMPTY_MASK(sspi))) &&
+ sspi->left_rx_word)
sspi->rx_word(sspi);
- writel(0, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
- writel(0, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
+ if (sspi->type == SIRF_USP_SPI_P2 ||
+ sspi->type == SIRF_USP_SPI_A7)
+ writel(0, sspi->base + sspi->regs->tx_rx_en);
+ writel(0, sspi->base + sspi->regs->rxfifo_op);
+ writel(0, sspi->base + sspi->regs->txfifo_op);
} while (sspi->left_tx_word != 0 || sspi->left_rx_word != 0);
}
static int spi_sirfsoc_transfer(struct spi_device *spi, struct spi_transfer *t)
{
struct sirfsoc_spi *sspi;
- sspi = spi_master_get_devdata(spi->master);
- sspi->tx = t->tx_buf ? t->tx_buf : sspi->dummypage;
- sspi->rx = t->rx_buf ? t->rx_buf : sspi->dummypage;
+ sspi = spi_master_get_devdata(spi->master);
+ sspi->tx = t->tx_buf;
+ sspi->rx = t->rx_buf;
sspi->left_tx_word = sspi->left_rx_word = t->len / sspi->word_width;
reinit_completion(&sspi->rx_done);
reinit_completion(&sspi->tx_done);
@@ -473,7 +717,7 @@ static int spi_sirfsoc_transfer(struct spi_device *spi, struct spi_transfer *t)
* null, just fill command data into command register and wait for its
* completion.
*/
- if (sspi->tx_by_cmd)
+ if (sspi->type == SIRF_REAL_SPI && sspi->tx_by_cmd)
spi_sirfsoc_cmd_transfer(spi, t);
else if (IS_DMA_VALID(t))
spi_sirfsoc_dma_transfer(spi, t);
@@ -488,22 +732,49 @@ static void spi_sirfsoc_chipselect(struct spi_device *spi, int value)
struct sirfsoc_spi *sspi = spi_master_get_devdata(spi->master);
if (sspi->hw_cs) {
- u32 regval = readl(sspi->base + SIRFSOC_SPI_CTRL);
- switch (value) {
- case BITBANG_CS_ACTIVE:
- if (spi->mode & SPI_CS_HIGH)
- regval |= SIRFSOC_SPI_CS_IO_OUT;
- else
- regval &= ~SIRFSOC_SPI_CS_IO_OUT;
+ u32 regval;
+
+ switch (sspi->type) {
+ case SIRF_REAL_SPI:
+ regval = readl(sspi->base + sspi->regs->spi_ctrl);
+ switch (value) {
+ case BITBANG_CS_ACTIVE:
+ if (spi->mode & SPI_CS_HIGH)
+ regval |= SIRFSOC_SPI_CS_IO_OUT;
+ else
+ regval &= ~SIRFSOC_SPI_CS_IO_OUT;
+ break;
+ case BITBANG_CS_INACTIVE:
+ if (spi->mode & SPI_CS_HIGH)
+ regval &= ~SIRFSOC_SPI_CS_IO_OUT;
+ else
+ regval |= SIRFSOC_SPI_CS_IO_OUT;
+ break;
+ }
+ writel(regval, sspi->base + sspi->regs->spi_ctrl);
break;
- case BITBANG_CS_INACTIVE:
- if (spi->mode & SPI_CS_HIGH)
- regval &= ~SIRFSOC_SPI_CS_IO_OUT;
- else
- regval |= SIRFSOC_SPI_CS_IO_OUT;
+ case SIRF_USP_SPI_P2:
+ case SIRF_USP_SPI_A7:
+ regval = readl(sspi->base +
+ sspi->regs->usp_pin_io_data);
+ switch (value) {
+ case BITBANG_CS_ACTIVE:
+ if (spi->mode & SPI_CS_HIGH)
+ regval |= SIRFSOC_USP_CS_HIGH_VALUE;
+ else
+ regval &= ~(SIRFSOC_USP_CS_HIGH_VALUE);
+ break;
+ case BITBANG_CS_INACTIVE:
+ if (spi->mode & SPI_CS_HIGH)
+ regval &= ~(SIRFSOC_USP_CS_HIGH_VALUE);
+ else
+ regval |= SIRFSOC_USP_CS_HIGH_VALUE;
+ break;
+ }
+ writel(regval,
+ sspi->base + sspi->regs->usp_pin_io_data);
break;
}
- writel(regval, sspi->base + SIRFSOC_SPI_CTRL);
} else {
switch (value) {
case BITBANG_CS_ACTIVE:
@@ -518,27 +789,102 @@ static void spi_sirfsoc_chipselect(struct spi_device *spi, int value)
}
}
+static int spi_sirfsoc_config_mode(struct spi_device *spi)
+{
+ struct sirfsoc_spi *sspi;
+ u32 regval, usp_mode1;
+
+ sspi = spi_master_get_devdata(spi->master);
+ regval = readl(sspi->base + sspi->regs->spi_ctrl);
+ usp_mode1 = readl(sspi->base + sspi->regs->usp_mode1);
+ if (!(spi->mode & SPI_CS_HIGH)) {
+ regval |= SIRFSOC_SPI_CS_IDLE_STAT;
+ usp_mode1 &= ~SIRFSOC_USP_CS_HIGH_VALID;
+ } else {
+ regval &= ~SIRFSOC_SPI_CS_IDLE_STAT;
+ usp_mode1 |= SIRFSOC_USP_CS_HIGH_VALID;
+ }
+ if (!(spi->mode & SPI_LSB_FIRST)) {
+ regval |= SIRFSOC_SPI_TRAN_MSB;
+ usp_mode1 &= ~SIRFSOC_USP_LSB;
+ } else {
+ regval &= ~SIRFSOC_SPI_TRAN_MSB;
+ usp_mode1 |= SIRFSOC_USP_LSB;
+ }
+ if (spi->mode & SPI_CPOL) {
+ regval |= SIRFSOC_SPI_CLK_IDLE_STAT;
+ usp_mode1 |= SIRFSOC_USP_SCLK_IDLE_STAT;
+ } else {
+ regval &= ~SIRFSOC_SPI_CLK_IDLE_STAT;
+ usp_mode1 &= ~SIRFSOC_USP_SCLK_IDLE_STAT;
+ }
+ /*
+ * Data should be driven at least 1/2 cycle before the fetch edge
+ * to make sure that data gets stable at the fetch edge.
+ */
+ if (((spi->mode & SPI_CPOL) && (spi->mode & SPI_CPHA)) ||
+ (!(spi->mode & SPI_CPOL) && !(spi->mode & SPI_CPHA))) {
+ regval &= ~SIRFSOC_SPI_DRV_POS_EDGE;
+ usp_mode1 |= (SIRFSOC_USP_TXD_FALLING_EDGE |
+ SIRFSOC_USP_RXD_FALLING_EDGE);
+ } else {
+ regval |= SIRFSOC_SPI_DRV_POS_EDGE;
+ usp_mode1 &= ~(SIRFSOC_USP_RXD_FALLING_EDGE |
+ SIRFSOC_USP_TXD_FALLING_EDGE);
+ }
+ writel((SIRFSOC_SPI_FIFO_LEVEL_CHK_MASK(sspi, sspi->fifo_size - 2) <<
+ SIRFSOC_SPI_FIFO_SC_OFFSET) |
+ (SIRFSOC_SPI_FIFO_LEVEL_CHK_MASK(sspi, sspi->fifo_size / 2) <<
+ SIRFSOC_SPI_FIFO_LC_OFFSET) |
+ (SIRFSOC_SPI_FIFO_LEVEL_CHK_MASK(sspi, 2) <<
+ SIRFSOC_SPI_FIFO_HC_OFFSET),
+ sspi->base + sspi->regs->txfifo_level_chk);
+ writel((SIRFSOC_SPI_FIFO_LEVEL_CHK_MASK(sspi, 2) <<
+ SIRFSOC_SPI_FIFO_SC_OFFSET) |
+ (SIRFSOC_SPI_FIFO_LEVEL_CHK_MASK(sspi, sspi->fifo_size / 2) <<
+ SIRFSOC_SPI_FIFO_LC_OFFSET) |
+ (SIRFSOC_SPI_FIFO_LEVEL_CHK_MASK(sspi, sspi->fifo_size - 2) <<
+ SIRFSOC_SPI_FIFO_HC_OFFSET),
+ sspi->base + sspi->regs->rxfifo_level_chk);
+ /*
+ * it should never set to hardware cs mode because in hardware cs mode,
+ * cs signal can't controlled by driver.
+ */
+ switch (sspi->type) {
+ case SIRF_REAL_SPI:
+ regval |= SIRFSOC_SPI_CS_IO_MODE;
+ writel(regval, sspi->base + sspi->regs->spi_ctrl);
+ break;
+ case SIRF_USP_SPI_P2:
+ case SIRF_USP_SPI_A7:
+ usp_mode1 |= SIRFSOC_USP_SYNC_MODE;
+ usp_mode1 |= SIRFSOC_USP_TFS_IO_MODE;
+ usp_mode1 &= ~SIRFSOC_USP_TFS_IO_INPUT;
+ writel(usp_mode1, sspi->base + sspi->regs->usp_mode1);
+ break;
+ }
+
+ return 0;
+}
+
static int
spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
{
struct sirfsoc_spi *sspi;
u8 bits_per_word = 0;
int hz = 0;
- u32 regval;
- u32 txfifo_ctrl, rxfifo_ctrl;
- u32 fifo_size = SIRFSOC_SPI_FIFO_SIZE / 4;
+ u32 regval, txfifo_ctrl, rxfifo_ctrl, tx_frm_ctl, rx_frm_ctl, usp_mode2;
sspi = spi_master_get_devdata(spi->master);
bits_per_word = (t) ? t->bits_per_word : spi->bits_per_word;
hz = t && t->speed_hz ? t->speed_hz : spi->max_speed_hz;
- regval = (sspi->ctrl_freq / (2 * hz)) - 1;
+ usp_mode2 = regval = (sspi->ctrl_freq / (2 * hz)) - 1;
if (regval > 0xFFFF || regval < 0) {
dev_err(&spi->dev, "Speed %d not supported\n", hz);
return -EINVAL;
}
-
switch (bits_per_word) {
case 8:
regval |= SIRFSOC_SPI_TRAN_DAT_FORMAT_8;
@@ -559,94 +905,177 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
sspi->tx_word = spi_sirfsoc_tx_word_u32;
break;
default:
- BUG();
+ dev_err(&spi->dev, "bpw %d not supported\n", bits_per_word);
+ return -EINVAL;
}
-
sspi->word_width = DIV_ROUND_UP(bits_per_word, 8);
- txfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
- (sspi->word_width >> 1);
- rxfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
- (sspi->word_width >> 1);
-
- if (!(spi->mode & SPI_CS_HIGH))
- regval |= SIRFSOC_SPI_CS_IDLE_STAT;
- if (!(spi->mode & SPI_LSB_FIRST))
- regval |= SIRFSOC_SPI_TRAN_MSB;
- if (spi->mode & SPI_CPOL)
- regval |= SIRFSOC_SPI_CLK_IDLE_STAT;
-
- /*
- * Data should be driven at least 1/2 cycle before the fetch edge
- * to make sure that data gets stable at the fetch edge.
- */
- if (((spi->mode & SPI_CPOL) && (spi->mode & SPI_CPHA)) ||
- (!(spi->mode & SPI_CPOL) && !(spi->mode & SPI_CPHA)))
- regval &= ~SIRFSOC_SPI_DRV_POS_EDGE;
- else
- regval |= SIRFSOC_SPI_DRV_POS_EDGE;
-
- writel(SIRFSOC_SPI_FIFO_SC(fifo_size - 2) |
- SIRFSOC_SPI_FIFO_LC(fifo_size / 2) |
- SIRFSOC_SPI_FIFO_HC(2),
- sspi->base + SIRFSOC_SPI_TXFIFO_LEVEL_CHK);
- writel(SIRFSOC_SPI_FIFO_SC(2) |
- SIRFSOC_SPI_FIFO_LC(fifo_size / 2) |
- SIRFSOC_SPI_FIFO_HC(fifo_size - 2),
- sspi->base + SIRFSOC_SPI_RXFIFO_LEVEL_CHK);
- writel(txfifo_ctrl, sspi->base + SIRFSOC_SPI_TXFIFO_CTRL);
- writel(rxfifo_ctrl, sspi->base + SIRFSOC_SPI_RXFIFO_CTRL);
-
- if (t && t->tx_buf && !t->rx_buf && (t->len <= SIRFSOC_MAX_CMD_BYTES)) {
- regval |= (SIRFSOC_SPI_CMD_BYTE_NUM((t->len - 1)) |
- SIRFSOC_SPI_CMD_MODE);
- sspi->tx_by_cmd = true;
- } else {
- regval &= ~SIRFSOC_SPI_CMD_MODE;
- sspi->tx_by_cmd = false;
+ txfifo_ctrl = (((sspi->fifo_size / 2) &
+ SIRFSOC_SPI_FIFO_THD_MASK(sspi))
+ << SIRFSOC_SPI_FIFO_THD_OFFSET) |
+ (sspi->word_width >> 1);
+ rxfifo_ctrl = (((sspi->fifo_size / 2) &
+ SIRFSOC_SPI_FIFO_THD_MASK(sspi))
+ << SIRFSOC_SPI_FIFO_THD_OFFSET) |
+ (sspi->word_width >> 1);
+ writel(txfifo_ctrl, sspi->base + sspi->regs->txfifo_ctrl);
+ writel(rxfifo_ctrl, sspi->base + sspi->regs->rxfifo_ctrl);
+ if (sspi->type == SIRF_USP_SPI_P2 ||
+ sspi->type == SIRF_USP_SPI_A7) {
+ tx_frm_ctl = 0;
+ tx_frm_ctl |= ((bits_per_word - 1) & SIRFSOC_USP_TX_DATA_MASK)
+ << SIRFSOC_USP_TX_DATA_OFFSET;
+ tx_frm_ctl |= ((bits_per_word + 1 + SIRFSOC_USP_TXD_DELAY_LEN
+ - 1) & SIRFSOC_USP_TX_SYNC_MASK) <<
+ SIRFSOC_USP_TX_SYNC_OFFSET;
+ tx_frm_ctl |= ((bits_per_word + 1 + SIRFSOC_USP_TXD_DELAY_LEN
+ + 2 - 1) & SIRFSOC_USP_TX_FRAME_MASK) <<
+ SIRFSOC_USP_TX_FRAME_OFFSET;
+ tx_frm_ctl |= ((bits_per_word - 1) &
+ SIRFSOC_USP_TX_SHIFTER_MASK) <<
+ SIRFSOC_USP_TX_SHIFTER_OFFSET;
+ rx_frm_ctl = 0;
+ rx_frm_ctl |= ((bits_per_word - 1) & SIRFSOC_USP_RX_DATA_MASK)
+ << SIRFSOC_USP_RX_DATA_OFFSET;
+ rx_frm_ctl |= ((bits_per_word + 1 + SIRFSOC_USP_RXD_DELAY_LEN
+ + 2 - 1) & SIRFSOC_USP_RX_FRAME_MASK) <<
+ SIRFSOC_USP_RX_FRAME_OFFSET;
+ rx_frm_ctl |= ((bits_per_word - 1)
+ & SIRFSOC_USP_RX_SHIFTER_MASK) <<
+ SIRFSOC_USP_RX_SHIFTER_OFFSET;
+ writel(tx_frm_ctl | (((usp_mode2 >> 10) &
+ SIRFSOC_USP_CLK_10_11_MASK) <<
+ SIRFSOC_USP_CLK_10_11_OFFSET),
+ sspi->base + sspi->regs->usp_tx_frame_ctrl);
+ writel(rx_frm_ctl | (((usp_mode2 >> 12) &
+ SIRFSOC_USP_CLK_12_15_MASK) <<
+ SIRFSOC_USP_CLK_12_15_OFFSET),
+ sspi->base + sspi->regs->usp_rx_frame_ctrl);
+ writel(readl(sspi->base + sspi->regs->usp_mode2) |
+ ((usp_mode2 & SIRFSOC_USP_CLK_DIVISOR_MASK) <<
+ SIRFSOC_USP_CLK_DIVISOR_OFFSET) |
+ (SIRFSOC_USP_RXD_DELAY_LEN <<
+ SIRFSOC_USP_RXD_DELAY_OFFSET) |
+ (SIRFSOC_USP_TXD_DELAY_LEN <<
+ SIRFSOC_USP_TXD_DELAY_OFFSET),
+ sspi->base + sspi->regs->usp_mode2);
+ }
+ if (sspi->type == SIRF_REAL_SPI)
+ writel(regval, sspi->base + sspi->regs->spi_ctrl);
+ spi_sirfsoc_config_mode(spi);
+ if (sspi->type == SIRF_REAL_SPI) {
+ if (t && t->tx_buf && !t->rx_buf &&
+ (t->len <= SIRFSOC_MAX_CMD_BYTES)) {
+ sspi->tx_by_cmd = true;
+ writel(readl(sspi->base + sspi->regs->spi_ctrl) |
+ (SIRFSOC_SPI_CMD_BYTE_NUM((t->len - 1)) |
+ SIRFSOC_SPI_CMD_MODE),
+ sspi->base + sspi->regs->spi_ctrl);
+ } else {
+ sspi->tx_by_cmd = false;
+ writel(readl(sspi->base + sspi->regs->spi_ctrl) &
+ ~SIRFSOC_SPI_CMD_MODE,
+ sspi->base + sspi->regs->spi_ctrl);
+ }
}
- /*
- * it should never set to hardware cs mode because in hardware cs mode,
- * cs signal can't controlled by driver.
- */
- regval |= SIRFSOC_SPI_CS_IO_MODE;
- writel(regval, sspi->base + SIRFSOC_SPI_CTRL);
-
if (IS_DMA_VALID(t)) {
/* Enable DMA mode for RX, TX */
- writel(0, sspi->base + SIRFSOC_SPI_TX_DMA_IO_CTRL);
+ writel(0, sspi->base + sspi->regs->tx_dma_io_ctrl);
writel(SIRFSOC_SPI_RX_DMA_FLUSH,
- sspi->base + SIRFSOC_SPI_RX_DMA_IO_CTRL);
+ sspi->base + sspi->regs->rx_dma_io_ctrl);
} else {
/* Enable IO mode for RX, TX */
writel(SIRFSOC_SPI_IO_MODE_SEL,
- sspi->base + SIRFSOC_SPI_TX_DMA_IO_CTRL);
+ sspi->base + sspi->regs->tx_dma_io_ctrl);
writel(SIRFSOC_SPI_IO_MODE_SEL,
- sspi->base + SIRFSOC_SPI_RX_DMA_IO_CTRL);
+ sspi->base + sspi->regs->rx_dma_io_ctrl);
}
-
return 0;
}
static int spi_sirfsoc_setup(struct spi_device *spi)
{
struct sirfsoc_spi *sspi;
+ int ret = 0;
sspi = spi_master_get_devdata(spi->master);
-
if (spi->cs_gpio == -ENOENT)
sspi->hw_cs = true;
- else
+ else {
sspi->hw_cs = false;
- return spi_sirfsoc_setup_transfer(spi, NULL);
+ if (!spi_get_ctldata(spi)) {
+ void *cs = kmalloc(sizeof(int), GFP_KERNEL);
+ if (!cs) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+ ret = gpio_is_valid(spi->cs_gpio);
+ if (!ret) {
+ dev_err(&spi->dev, "no valid gpio\n");
+ ret = -ENOENT;
+ goto exit;
+ }
+ ret = gpio_request(spi->cs_gpio, DRIVER_NAME);
+ if (ret) {
+ dev_err(&spi->dev, "failed to request gpio\n");
+ goto exit;
+ }
+ spi_set_ctldata(spi, cs);
+ }
+ }
+ spi_sirfsoc_config_mode(spi);
+ spi_sirfsoc_chipselect(spi, BITBANG_CS_INACTIVE);
+exit:
+ return ret;
+}
+
+static void spi_sirfsoc_cleanup(struct spi_device *spi)
+{
+ if (spi_get_ctldata(spi)) {
+ gpio_free(spi->cs_gpio);
+ kfree(spi_get_ctldata(spi));
+ }
}
+static const struct sirf_spi_comp_data sirf_real_spi = {
+ .regs = &real_spi_register,
+ .type = SIRF_REAL_SPI,
+ .dat_max_frm_len = 64 * 1024,
+ .fifo_size = 256,
+};
+
+static const struct sirf_spi_comp_data sirf_usp_spi_p2 = {
+ .regs = &usp_spi_register,
+ .type = SIRF_USP_SPI_P2,
+ .dat_max_frm_len = 1024 * 1024,
+ .fifo_size = 128,
+ .hwinit = sirfsoc_usp_hwinit,
+};
+
+static const struct sirf_spi_comp_data sirf_usp_spi_a7 = {
+ .regs = &usp_spi_register,
+ .type = SIRF_USP_SPI_A7,
+ .dat_max_frm_len = 1024 * 1024,
+ .fifo_size = 512,
+ .hwinit = sirfsoc_usp_hwinit,
+};
+
+static const struct of_device_id spi_sirfsoc_of_match[] = {
+ { .compatible = "sirf,prima2-spi", .data = &sirf_real_spi},
+ { .compatible = "sirf,prima2-usp-spi", .data = &sirf_usp_spi_p2},
+ { .compatible = "sirf,atlas7-usp-spi", .data = &sirf_usp_spi_a7},
+ {}
+};
+MODULE_DEVICE_TABLE(of, spi_sirfsoc_of_match);
+
static int spi_sirfsoc_probe(struct platform_device *pdev)
{
struct sirfsoc_spi *sspi;
struct spi_master *master;
struct resource *mem_res;
+ struct sirf_spi_comp_data *spi_comp_data;
int irq;
- int i, ret;
+ int ret;
+ const struct of_device_id *match;
ret = device_reset(&pdev->dev);
if (ret) {
@@ -659,16 +1088,22 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Unable to allocate SPI master\n");
return -ENOMEM;
}
+ match = of_match_node(spi_sirfsoc_of_match, pdev->dev.of_node);
platform_set_drvdata(pdev, master);
sspi = spi_master_get_devdata(master);
-
+ sspi->fifo_full_offset = ilog2(sspi->fifo_size);
+ spi_comp_data = (struct sirf_spi_comp_data *)match->data;
+ sspi->regs = spi_comp_data->regs;
+ sspi->type = spi_comp_data->type;
+ sspi->fifo_level_chk_mask = (sspi->fifo_size / 4) - 1;
+ sspi->dat_max_frm_len = spi_comp_data->dat_max_frm_len;
+ sspi->fifo_size = spi_comp_data->fifo_size;
mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
sspi->base = devm_ioremap_resource(&pdev->dev, mem_res);
if (IS_ERR(sspi->base)) {
ret = PTR_ERR(sspi->base);
goto free_master;
}
-
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = -ENXIO;
@@ -684,11 +1119,13 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
sspi->bitbang.setup_transfer = spi_sirfsoc_setup_transfer;
sspi->bitbang.txrx_bufs = spi_sirfsoc_transfer;
sspi->bitbang.master->setup = spi_sirfsoc_setup;
+ sspi->bitbang.master->cleanup = spi_sirfsoc_cleanup;
master->bus_num = pdev->id;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_CS_HIGH;
master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(12) |
SPI_BPW_MASK(16) | SPI_BPW_MASK(32);
master->max_speed_hz = SIRFSOC_SPI_DEFAULT_FRQ;
+ master->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX;
sspi->bitbang.master->dev.of_node = pdev->dev.of_node;
/* request DMA channels */
@@ -711,47 +1148,19 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
goto free_tx_dma;
}
clk_prepare_enable(sspi->clk);
+ if (spi_comp_data->hwinit)
+ spi_comp_data->hwinit(sspi);
sspi->ctrl_freq = clk_get_rate(sspi->clk);
init_completion(&sspi->rx_done);
init_completion(&sspi->tx_done);
- writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
- writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
- writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
- writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
- /* We are not using dummy delay between command and data */
- writel(0, sspi->base + SIRFSOC_SPI_DUMMY_DELAY_CTL);
-
- sspi->dummypage = kmalloc(2 * PAGE_SIZE, GFP_KERNEL);
- if (!sspi->dummypage) {
- ret = -ENOMEM;
- goto free_clk;
- }
-
ret = spi_bitbang_start(&sspi->bitbang);
if (ret)
- goto free_dummypage;
- for (i = 0; master->cs_gpios && i < master->num_chipselect; i++) {
- if (master->cs_gpios[i] == -ENOENT)
- continue;
- if (!gpio_is_valid(master->cs_gpios[i])) {
- dev_err(&pdev->dev, "no valid gpio\n");
- ret = -EINVAL;
- goto free_dummypage;
- }
- ret = devm_gpio_request(&pdev->dev,
- master->cs_gpios[i], DRIVER_NAME);
- if (ret) {
- dev_err(&pdev->dev, "failed to request gpio\n");
- goto free_dummypage;
- }
- }
+ goto free_clk;
dev_info(&pdev->dev, "registerred, bus number = %d\n", master->bus_num);
return 0;
-free_dummypage:
- kfree(sspi->dummypage);
free_clk:
clk_disable_unprepare(sspi->clk);
clk_put(sspi->clk);
@@ -772,9 +1181,7 @@ static int spi_sirfsoc_remove(struct platform_device *pdev)
master = platform_get_drvdata(pdev);
sspi = spi_master_get_devdata(master);
-
spi_bitbang_stop(&sspi->bitbang);
- kfree(sspi->dummypage);
clk_disable_unprepare(sspi->clk);
clk_put(sspi->clk);
dma_release_channel(sspi->rx_chan);
@@ -804,24 +1211,17 @@ static int spi_sirfsoc_resume(struct device *dev)
struct sirfsoc_spi *sspi = spi_master_get_devdata(master);
clk_enable(sspi->clk);
- writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
- writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
- writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
- writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
-
- return spi_master_resume(master);
+ writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + sspi->regs->txfifo_op);
+ writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + sspi->regs->rxfifo_op);
+ writel(SIRFSOC_SPI_FIFO_START, sspi->base + sspi->regs->txfifo_op);
+ writel(SIRFSOC_SPI_FIFO_START, sspi->base + sspi->regs->rxfifo_op);
+ return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(spi_sirfsoc_pm_ops, spi_sirfsoc_suspend,
spi_sirfsoc_resume);
-static const struct of_device_id spi_sirfsoc_of_match[] = {
- { .compatible = "sirf,prima2-spi", },
- {}
-};
-MODULE_DEVICE_TABLE(of, spi_sirfsoc_of_match);
-
static struct platform_driver spi_sirfsoc_driver = {
.driver = {
.name = DRIVER_NAME,
@@ -835,4 +1235,5 @@ module_platform_driver(spi_sirfsoc_driver);
MODULE_DESCRIPTION("SiRF SoC SPI master driver");
MODULE_AUTHOR("Zhiwu Song <Zhiwu.Song@csr.com>");
MODULE_AUTHOR("Barry Song <Baohua.Song@csr.com>");
+MODULE_AUTHOR("Qipan Li <Qipan.Li@csr.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c
new file mode 100644
index 0000000..87b20a5
--- /dev/null
+++ b/drivers/spi/spi-zynqmp-gqspi.c
@@ -0,0 +1,1122 @@
+/*
+ * Xilinx Zynq UltraScale+ MPSoC Quad-SPI (QSPI) controller driver
+ * (master mode only)
+ *
+ * Copyright (C) 2009 - 2015 Xilinx, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+/* Generic QSPI register offsets */
+#define GQSPI_CONFIG_OFST 0x00000100
+#define GQSPI_ISR_OFST 0x00000104
+#define GQSPI_IDR_OFST 0x0000010C
+#define GQSPI_IER_OFST 0x00000108
+#define GQSPI_IMASK_OFST 0x00000110
+#define GQSPI_EN_OFST 0x00000114
+#define GQSPI_TXD_OFST 0x0000011C
+#define GQSPI_RXD_OFST 0x00000120
+#define GQSPI_TX_THRESHOLD_OFST 0x00000128
+#define GQSPI_RX_THRESHOLD_OFST 0x0000012C
+#define GQSPI_LPBK_DLY_ADJ_OFST 0x00000138
+#define GQSPI_GEN_FIFO_OFST 0x00000140
+#define GQSPI_SEL_OFST 0x00000144
+#define GQSPI_GF_THRESHOLD_OFST 0x00000150
+#define GQSPI_FIFO_CTRL_OFST 0x0000014C
+#define GQSPI_QSPIDMA_DST_CTRL_OFST 0x0000080C
+#define GQSPI_QSPIDMA_DST_SIZE_OFST 0x00000804
+#define GQSPI_QSPIDMA_DST_STS_OFST 0x00000808
+#define GQSPI_QSPIDMA_DST_I_STS_OFST 0x00000814
+#define GQSPI_QSPIDMA_DST_I_EN_OFST 0x00000818
+#define GQSPI_QSPIDMA_DST_I_DIS_OFST 0x0000081C
+#define GQSPI_QSPIDMA_DST_I_MASK_OFST 0x00000820
+#define GQSPI_QSPIDMA_DST_ADDR_OFST 0x00000800
+#define GQSPI_QSPIDMA_DST_ADDR_MSB_OFST 0x00000828
+
+/* GQSPI register bit masks */
+#define GQSPI_SEL_MASK 0x00000001
+#define GQSPI_EN_MASK 0x00000001
+#define GQSPI_LPBK_DLY_ADJ_USE_LPBK_MASK 0x00000020
+#define GQSPI_ISR_WR_TO_CLR_MASK 0x00000002
+#define GQSPI_IDR_ALL_MASK 0x00000FBE
+#define GQSPI_CFG_MODE_EN_MASK 0xC0000000
+#define GQSPI_CFG_GEN_FIFO_START_MODE_MASK 0x20000000
+#define GQSPI_CFG_ENDIAN_MASK 0x04000000
+#define GQSPI_CFG_EN_POLL_TO_MASK 0x00100000
+#define GQSPI_CFG_WP_HOLD_MASK 0x00080000
+#define GQSPI_CFG_BAUD_RATE_DIV_MASK 0x00000038
+#define GQSPI_CFG_CLK_PHA_MASK 0x00000004
+#define GQSPI_CFG_CLK_POL_MASK 0x00000002
+#define GQSPI_CFG_START_GEN_FIFO_MASK 0x10000000
+#define GQSPI_GENFIFO_IMM_DATA_MASK 0x000000FF
+#define GQSPI_GENFIFO_DATA_XFER 0x00000100
+#define GQSPI_GENFIFO_EXP 0x00000200
+#define GQSPI_GENFIFO_MODE_SPI 0x00000400
+#define GQSPI_GENFIFO_MODE_DUALSPI 0x00000800
+#define GQSPI_GENFIFO_MODE_QUADSPI 0x00000C00
+#define GQSPI_GENFIFO_MODE_MASK 0x00000C00
+#define GQSPI_GENFIFO_CS_LOWER 0x00001000
+#define GQSPI_GENFIFO_CS_UPPER 0x00002000
+#define GQSPI_GENFIFO_BUS_LOWER 0x00004000
+#define GQSPI_GENFIFO_BUS_UPPER 0x00008000
+#define GQSPI_GENFIFO_BUS_BOTH 0x0000C000
+#define GQSPI_GENFIFO_BUS_MASK 0x0000C000
+#define GQSPI_GENFIFO_TX 0x00010000
+#define GQSPI_GENFIFO_RX 0x00020000
+#define GQSPI_GENFIFO_STRIPE 0x00040000
+#define GQSPI_GENFIFO_POLL 0x00080000
+#define GQSPI_GENFIFO_EXP_START 0x00000100
+#define GQSPI_FIFO_CTRL_RST_RX_FIFO_MASK 0x00000004
+#define GQSPI_FIFO_CTRL_RST_TX_FIFO_MASK 0x00000002
+#define GQSPI_FIFO_CTRL_RST_GEN_FIFO_MASK 0x00000001
+#define GQSPI_ISR_RXEMPTY_MASK 0x00000800
+#define GQSPI_ISR_GENFIFOFULL_MASK 0x00000400
+#define GQSPI_ISR_GENFIFONOT_FULL_MASK 0x00000200
+#define GQSPI_ISR_TXEMPTY_MASK 0x00000100
+#define GQSPI_ISR_GENFIFOEMPTY_MASK 0x00000080
+#define GQSPI_ISR_RXFULL_MASK 0x00000020
+#define GQSPI_ISR_RXNEMPTY_MASK 0x00000010
+#define GQSPI_ISR_TXFULL_MASK 0x00000008
+#define GQSPI_ISR_TXNOT_FULL_MASK 0x00000004
+#define GQSPI_ISR_POLL_TIME_EXPIRE_MASK 0x00000002
+#define GQSPI_IER_TXNOT_FULL_MASK 0x00000004
+#define GQSPI_IER_RXEMPTY_MASK 0x00000800
+#define GQSPI_IER_POLL_TIME_EXPIRE_MASK 0x00000002
+#define GQSPI_IER_RXNEMPTY_MASK 0x00000010
+#define GQSPI_IER_GENFIFOEMPTY_MASK 0x00000080
+#define GQSPI_IER_TXEMPTY_MASK 0x00000100
+#define GQSPI_QSPIDMA_DST_INTR_ALL_MASK 0x000000FE
+#define GQSPI_QSPIDMA_DST_STS_WTC 0x0000E000
+#define GQSPI_CFG_MODE_EN_DMA_MASK 0x80000000
+#define GQSPI_ISR_IDR_MASK 0x00000994
+#define GQSPI_QSPIDMA_DST_I_EN_DONE_MASK 0x00000002
+#define GQSPI_QSPIDMA_DST_I_STS_DONE_MASK 0x00000002
+#define GQSPI_IRQ_MASK 0x00000980
+
+#define GQSPI_CFG_BAUD_RATE_DIV_SHIFT 3
+#define GQSPI_GENFIFO_CS_SETUP 0x4
+#define GQSPI_GENFIFO_CS_HOLD 0x3
+#define GQSPI_TXD_DEPTH 64
+#define GQSPI_RX_FIFO_THRESHOLD 32
+#define GQSPI_RX_FIFO_FILL (GQSPI_RX_FIFO_THRESHOLD * 4)
+#define GQSPI_TX_FIFO_THRESHOLD_RESET_VAL 32
+#define GQSPI_TX_FIFO_FILL (GQSPI_TXD_DEPTH -\
+ GQSPI_TX_FIFO_THRESHOLD_RESET_VAL)
+#define GQSPI_GEN_FIFO_THRESHOLD_RESET_VAL 0X10
+#define GQSPI_QSPIDMA_DST_CTRL_RESET_VAL 0x803FFA00
+#define GQSPI_SELECT_FLASH_CS_LOWER 0x1
+#define GQSPI_SELECT_FLASH_CS_UPPER 0x2
+#define GQSPI_SELECT_FLASH_CS_BOTH 0x3
+#define GQSPI_SELECT_FLASH_BUS_LOWER 0x1
+#define GQSPI_SELECT_FLASH_BUS_UPPER 0x2
+#define GQSPI_SELECT_FLASH_BUS_BOTH 0x3
+#define GQSPI_BAUD_DIV_MAX 7 /* Baud rate divisor maximum */
+#define GQSPI_BAUD_DIV_SHIFT 2 /* Baud rate divisor shift */
+#define GQSPI_SELECT_MODE_SPI 0x1
+#define GQSPI_SELECT_MODE_DUALSPI 0x2
+#define GQSPI_SELECT_MODE_QUADSPI 0x4
+#define GQSPI_DMA_UNALIGN 0x3
+#define GQSPI_DEFAULT_NUM_CS 1 /* Default number of chip selects */
+
+enum mode_type {GQSPI_MODE_IO, GQSPI_MODE_DMA};
+
+/**
+ * struct zynqmp_qspi - Defines qspi driver instance
+ * @regs: Virtual address of the QSPI controller registers
+ * @refclk: Pointer to the peripheral clock
+ * @pclk: Pointer to the APB clock
+ * @irq: IRQ number
+ * @dev: Pointer to struct device
+ * @txbuf: Pointer to the TX buffer
+ * @rxbuf: Pointer to the RX buffer
+ * @bytes_to_transfer: Number of bytes left to transfer
+ * @bytes_to_receive: Number of bytes left to receive
+ * @genfifocs: Used for chip select
+ * @genfifobus: Used to select the upper or lower bus
+ * @dma_rx_bytes: Remaining bytes to receive by DMA mode
+ * @dma_addr: DMA address after mapping the kernel buffer
+ * @genfifoentry: Used for storing the genfifoentry instruction.
+ * @mode: Defines the mode in which QSPI is operating
+ */
+struct zynqmp_qspi {
+ void __iomem *regs;
+ struct clk *refclk;
+ struct clk *pclk;
+ int irq;
+ struct device *dev;
+ const void *txbuf;
+ void *rxbuf;
+ int bytes_to_transfer;
+ int bytes_to_receive;
+ u32 genfifocs;
+ u32 genfifobus;
+ u32 dma_rx_bytes;
+ dma_addr_t dma_addr;
+ u32 genfifoentry;
+ enum mode_type mode;
+};
+
+/**
+ * zynqmp_gqspi_read: For GQSPI controller read operation
+ * @xqspi: Pointer to the zynqmp_qspi structure
+ * @offset: Offset from where to read
+ */
+static u32 zynqmp_gqspi_read(struct zynqmp_qspi *xqspi, u32 offset)
+{
+ return readl_relaxed(xqspi->regs + offset);
+}
+
+/**
+ * zynqmp_gqspi_write: For GQSPI controller write operation
+ * @xqspi: Pointer to the zynqmp_qspi structure
+ * @offset: Offset where to write
+ * @val: Value to be written
+ */
+static inline void zynqmp_gqspi_write(struct zynqmp_qspi *xqspi, u32 offset,
+ u32 val)
+{
+ writel_relaxed(val, (xqspi->regs + offset));
+}
+
+/**
+ * zynqmp_gqspi_selectslave: For selection of slave device
+ * @instanceptr: Pointer to the zynqmp_qspi structure
+ * @flashcs: For chip select
+ * @flashbus: To check which bus is selected- upper or lower
+ */
+static void zynqmp_gqspi_selectslave(struct zynqmp_qspi *instanceptr,
+ u8 slavecs, u8 slavebus)
+{
+ /*
+ * Bus and CS lines selected here will be updated in the instance and
+ * used for subsequent GENFIFO entries during transfer.
+ */
+
+ /* Choose slave select line */
+ switch (slavecs) {
+ case GQSPI_SELECT_FLASH_CS_BOTH:
+ instanceptr->genfifocs = GQSPI_GENFIFO_CS_LOWER |
+ GQSPI_GENFIFO_CS_UPPER;
+ case GQSPI_SELECT_FLASH_CS_UPPER:
+ instanceptr->genfifocs = GQSPI_GENFIFO_CS_UPPER;
+ break;
+ case GQSPI_SELECT_FLASH_CS_LOWER:
+ instanceptr->genfifocs = GQSPI_GENFIFO_CS_LOWER;
+ break;
+ default:
+ dev_warn(instanceptr->dev, "Invalid slave select\n");
+ }
+
+ /* Choose the bus */
+ switch (slavebus) {
+ case GQSPI_SELECT_FLASH_BUS_BOTH:
+ instanceptr->genfifobus = GQSPI_GENFIFO_BUS_LOWER |
+ GQSPI_GENFIFO_BUS_UPPER;
+ break;
+ case GQSPI_SELECT_FLASH_BUS_UPPER:
+ instanceptr->genfifobus = GQSPI_GENFIFO_BUS_UPPER;
+ break;
+ case GQSPI_SELECT_FLASH_BUS_LOWER:
+ instanceptr->genfifobus = GQSPI_GENFIFO_BUS_LOWER;
+ break;
+ default:
+ dev_warn(instanceptr->dev, "Invalid slave bus\n");
+ }
+}
+
+/**
+ * zynqmp_qspi_init_hw: Initialize the hardware
+ * @xqspi: Pointer to the zynqmp_qspi structure
+ *
+ * The default settings of the QSPI controller's configurable parameters on
+ * reset are
+ * - Master mode
+ * - TX threshold set to 1
+ * - RX threshold set to 1
+ * - Flash memory interface mode enabled
+ * This function performs the following actions
+ * - Disable and clear all the interrupts
+ * - Enable manual slave select
+ * - Enable manual start
+ * - Deselect all the chip select lines
+ * - Set the little endian mode of TX FIFO and
+ * - Enable the QSPI controller
+ */
+static void zynqmp_qspi_init_hw(struct zynqmp_qspi *xqspi)
+{
+ u32 config_reg;
+
+ /* Select the GQSPI mode */
+ zynqmp_gqspi_write(xqspi, GQSPI_SEL_OFST, GQSPI_SEL_MASK);
+ /* Clear and disable interrupts */
+ zynqmp_gqspi_write(xqspi, GQSPI_ISR_OFST,
+ zynqmp_gqspi_read(xqspi, GQSPI_ISR_OFST) |
+ GQSPI_ISR_WR_TO_CLR_MASK);
+ /* Clear the DMA STS */
+ zynqmp_gqspi_write(xqspi, GQSPI_QSPIDMA_DST_I_STS_OFST,
+ zynqmp_gqspi_read(xqspi,
+ GQSPI_QSPIDMA_DST_I_STS_OFST));
+ zynqmp_gqspi_write(xqspi, GQSPI_QSPIDMA_DST_STS_OFST,
+ zynqmp_gqspi_read(xqspi,
+ GQSPI_QSPIDMA_DST_STS_OFST) |
+ GQSPI_QSPIDMA_DST_STS_WTC);
+ zynqmp_gqspi_write(xqspi, GQSPI_IDR_OFST, GQSPI_IDR_ALL_MASK);
+ zynqmp_gqspi_write(xqspi,
+ GQSPI_QSPIDMA_DST_I_DIS_OFST,
+ GQSPI_QSPIDMA_DST_INTR_ALL_MASK);
+ /* Disable the GQSPI */
+ zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, 0x0);
+ config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST);
+ config_reg &= ~GQSPI_CFG_MODE_EN_MASK;
+ /* Manual start */
+ config_reg |= GQSPI_CFG_GEN_FIFO_START_MODE_MASK;
+ /* Little endian by default */
+ config_reg &= ~GQSPI_CFG_ENDIAN_MASK;
+ /* Disable poll time out */
+ config_reg &= ~GQSPI_CFG_EN_POLL_TO_MASK;
+ /* Set hold bit */
+ config_reg |= GQSPI_CFG_WP_HOLD_MASK;
+ /* Clear pre-scalar by default */
+ config_reg &= ~GQSPI_CFG_BAUD_RATE_DIV_MASK;
+ /* CPHA 0 */
+ config_reg &= ~GQSPI_CFG_CLK_PHA_MASK;
+ /* CPOL 0 */
+ config_reg &= ~GQSPI_CFG_CLK_POL_MASK;
+ zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg);
+
+ /* Clear the TX and RX FIFO */
+ zynqmp_gqspi_write(xqspi, GQSPI_FIFO_CTRL_OFST,
+ GQSPI_FIFO_CTRL_RST_RX_FIFO_MASK |
+ GQSPI_FIFO_CTRL_RST_TX_FIFO_MASK |
+ GQSPI_FIFO_CTRL_RST_GEN_FIFO_MASK);
+ /* Set by default to allow for high frequencies */
+ zynqmp_gqspi_write(xqspi, GQSPI_LPBK_DLY_ADJ_OFST,
+ zynqmp_gqspi_read(xqspi, GQSPI_LPBK_DLY_ADJ_OFST) |
+ GQSPI_LPBK_DLY_ADJ_USE_LPBK_MASK);
+ /* Reset thresholds */
+ zynqmp_gqspi_write(xqspi, GQSPI_TX_THRESHOLD_OFST,
+ GQSPI_TX_FIFO_THRESHOLD_RESET_VAL);
+ zynqmp_gqspi_write(xqspi, GQSPI_RX_THRESHOLD_OFST,
+ GQSPI_RX_FIFO_THRESHOLD);
+ zynqmp_gqspi_write(xqspi, GQSPI_GF_THRESHOLD_OFST,
+ GQSPI_GEN_FIFO_THRESHOLD_RESET_VAL);
+ zynqmp_gqspi_selectslave(xqspi,
+ GQSPI_SELECT_FLASH_CS_LOWER,
+ GQSPI_SELECT_FLASH_BUS_LOWER);
+ /* Initialize DMA */
+ zynqmp_gqspi_write(xqspi,
+ GQSPI_QSPIDMA_DST_CTRL_OFST,
+ GQSPI_QSPIDMA_DST_CTRL_RESET_VAL);
+
+ /* Enable the GQSPI */
+ zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, GQSPI_EN_MASK);
+}
+
+/**
+ * zynqmp_qspi_copy_read_data: Copy data to RX buffer
+ * @xqspi: Pointer to the zynqmp_qspi structure
+ * @data: The variable where data is stored
+ * @size: Number of bytes to be copied from data to RX buffer
+ */
+static void zynqmp_qspi_copy_read_data(struct zynqmp_qspi *xqspi,
+ ulong data, u8 size)
+{
+ memcpy(xqspi->rxbuf, &data, size);
+ xqspi->rxbuf += size;
+ xqspi->bytes_to_receive -= size;
+}
+
+/**
+ * zynqmp_prepare_transfer_hardware: Prepares hardware for transfer.
+ * @master: Pointer to the spi_master structure which provides
+ * information about the controller.
+ *
+ * This function enables SPI master controller.
+ *
+ * Return: 0 on success; error value otherwise
+ */
+static int zynqmp_prepare_transfer_hardware(struct spi_master *master)
+{
+ struct zynqmp_qspi *xqspi = spi_master_get_devdata(master);
+ int ret;
+
+ ret = clk_enable(xqspi->refclk);
+ if (ret)
+ goto clk_err;
+
+ ret = clk_enable(xqspi->pclk);
+ if (ret)
+ goto clk_err;
+
+ zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, GQSPI_EN_MASK);
+ return 0;
+clk_err:
+ return ret;
+}
+
+/**
+ * zynqmp_unprepare_transfer_hardware: Relaxes hardware after transfer
+ * @master: Pointer to the spi_master structure which provides
+ * information about the controller.
+ *
+ * This function disables the SPI master controller.
+ *
+ * Return: Always 0
+ */
+static int zynqmp_unprepare_transfer_hardware(struct spi_master *master)
+{
+ struct zynqmp_qspi *xqspi = spi_master_get_devdata(master);
+
+ zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, 0x0);
+ clk_disable(xqspi->refclk);
+ clk_disable(xqspi->pclk);
+ return 0;
+}
+
+/**
+ * zynqmp_qspi_chipselect: Select or deselect the chip select line
+ * @qspi: Pointer to the spi_device structure
+ * @is_high: Select(0) or deselect (1) the chip select line
+ */
+static void zynqmp_qspi_chipselect(struct spi_device *qspi, bool is_high)
+{
+ struct zynqmp_qspi *xqspi = spi_master_get_devdata(qspi->master);
+ ulong timeout;
+ u32 genfifoentry = 0x0, statusreg;
+
+ genfifoentry |= GQSPI_GENFIFO_MODE_SPI;
+ genfifoentry |= xqspi->genfifobus;
+
+ if (!is_high) {
+ genfifoentry |= xqspi->genfifocs;
+ genfifoentry |= GQSPI_GENFIFO_CS_SETUP;
+ } else {
+ genfifoentry |= GQSPI_GENFIFO_CS_HOLD;
+ }
+
+ zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, genfifoentry);
+
+ /* Dummy generic FIFO entry */
+ zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, 0x0);
+
+ /* Manually start the generic FIFO command */
+ zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST,
+ zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST) |
+ GQSPI_CFG_START_GEN_FIFO_MASK);
+
+ timeout = jiffies + msecs_to_jiffies(1000);
+
+ /* Wait until the generic FIFO command is empty */
+ do {
+ statusreg = zynqmp_gqspi_read(xqspi, GQSPI_ISR_OFST);
+
+ if ((statusreg & GQSPI_ISR_GENFIFOEMPTY_MASK) &&
+ (statusreg & GQSPI_ISR_TXEMPTY_MASK))
+ break;
+ else
+ cpu_relax();
+ } while (!time_after_eq(jiffies, timeout));
+
+ if (time_after_eq(jiffies, timeout))
+ dev_err(xqspi->dev, "Chip select timed out\n");
+}
+
+/**
+ * zynqmp_qspi_setup_transfer: Configure QSPI controller for specified
+ * transfer
+ * @qspi: Pointer to the spi_device structure
+ * @transfer: Pointer to the spi_transfer structure which provides
+ * information about next transfer setup parameters
+ *
+ * Sets the operational mode of QSPI controller for the next QSPI transfer and
+ * sets the requested clock frequency.
+ *
+ * Return: Always 0
+ *
+ * Note:
+ * If the requested frequency is not an exact match with what can be
+ * obtained using the pre-scalar value, the driver sets the clock
+ * frequency which is lower than the requested frequency (maximum lower)
+ * for the transfer.
+ *
+ * If the requested frequency is higher or lower than that is supported
+ * by the QSPI controller the driver will set the highest or lowest
+ * frequency supported by controller.
+ */
+static int zynqmp_qspi_setup_transfer(struct spi_device *qspi,
+ struct spi_transfer *transfer)
+{
+ struct zynqmp_qspi *xqspi = spi_master_get_devdata(qspi->master);
+ ulong clk_rate;
+ u32 config_reg, req_hz, baud_rate_val = 0;
+
+ if (transfer)
+ req_hz = transfer->speed_hz;
+ else
+ req_hz = qspi->max_speed_hz;
+
+ /* Set the clock frequency */
+ /* If req_hz == 0, default to lowest speed */
+ clk_rate = clk_get_rate(xqspi->refclk);
+
+ while ((baud_rate_val < GQSPI_BAUD_DIV_MAX) &&
+ (clk_rate /
+ (GQSPI_BAUD_DIV_SHIFT << baud_rate_val)) > req_hz)
+ baud_rate_val++;
+
+ config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST);
+
+ /* Set the QSPI clock phase and clock polarity */
+ config_reg &= (~GQSPI_CFG_CLK_PHA_MASK) & (~GQSPI_CFG_CLK_POL_MASK);
+
+ if (qspi->mode & SPI_CPHA)
+ config_reg |= GQSPI_CFG_CLK_PHA_MASK;
+ if (qspi->mode & SPI_CPOL)
+ config_reg |= GQSPI_CFG_CLK_POL_MASK;
+
+ config_reg &= ~GQSPI_CFG_BAUD_RATE_DIV_MASK;
+ config_reg |= (baud_rate_val << GQSPI_CFG_BAUD_RATE_DIV_SHIFT);
+ zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg);
+ return 0;
+}
+
+/**
+ * zynqmp_qspi_setup: Configure the QSPI controller
+ * @qspi: Pointer to the spi_device structure
+ *
+ * Sets the operational mode of QSPI controller for the next QSPI transfer,
+ * baud rate and divisor value to setup the requested qspi clock.
+ *
+ * Return: 0 on success; error value otherwise.
+ */
+static int zynqmp_qspi_setup(struct spi_device *qspi)
+{
+ if (qspi->master->busy)
+ return -EBUSY;
+ return 0;
+}
+
+/**
+ * zynqmp_qspi_filltxfifo: Fills the TX FIFO as long as there is room in
+ * the FIFO or the bytes required to be
+ * transmitted.
+ * @xqspi: Pointer to the zynqmp_qspi structure
+ * @size: Number of bytes to be copied from TX buffer to TX FIFO
+ */
+static void zynqmp_qspi_filltxfifo(struct zynqmp_qspi *xqspi, int size)
+{
+ u32 count = 0, intermediate;
+
+ while ((xqspi->bytes_to_transfer > 0) && (count < size)) {
+ memcpy(&intermediate, xqspi->txbuf, 4);
+ zynqmp_gqspi_write(xqspi, GQSPI_TXD_OFST, intermediate);
+
+ if (xqspi->bytes_to_transfer >= 4) {
+ xqspi->txbuf += 4;
+ xqspi->bytes_to_transfer -= 4;
+ } else {
+ xqspi->txbuf += xqspi->bytes_to_transfer;
+ xqspi->bytes_to_transfer = 0;
+ }
+ count++;
+ }
+}
+
+/**
+ * zynqmp_qspi_readrxfifo: Fills the RX FIFO as long as there is room in
+ * the FIFO.
+ * @xqspi: Pointer to the zynqmp_qspi structure
+ * @size: Number of bytes to be copied from RX buffer to RX FIFO
+ */
+static void zynqmp_qspi_readrxfifo(struct zynqmp_qspi *xqspi, u32 size)
+{
+ ulong data;
+ int count = 0;
+
+ while ((count < size) && (xqspi->bytes_to_receive > 0)) {
+ if (xqspi->bytes_to_receive >= 4) {
+ (*(u32 *) xqspi->rxbuf) =
+ zynqmp_gqspi_read(xqspi, GQSPI_RXD_OFST);
+ xqspi->rxbuf += 4;
+ xqspi->bytes_to_receive -= 4;
+ count += 4;
+ } else {
+ data = zynqmp_gqspi_read(xqspi, GQSPI_RXD_OFST);
+ count += xqspi->bytes_to_receive;
+ zynqmp_qspi_copy_read_data(xqspi, data,
+ xqspi->bytes_to_receive);
+ xqspi->bytes_to_receive = 0;
+ }
+ }
+}
+
+/**
+ * zynqmp_process_dma_irq: Handler for DMA done interrupt of QSPI
+ * controller
+ * @xqspi: zynqmp_qspi instance pointer
+ *
+ * This function handles DMA interrupt only.
+ */
+static void zynqmp_process_dma_irq(struct zynqmp_qspi *xqspi)
+{
+ u32 config_reg, genfifoentry;
+
+ dma_unmap_single(xqspi->dev, xqspi->dma_addr,
+ xqspi->dma_rx_bytes, DMA_FROM_DEVICE);
+ xqspi->rxbuf += xqspi->dma_rx_bytes;
+ xqspi->bytes_to_receive -= xqspi->dma_rx_bytes;
+ xqspi->dma_rx_bytes = 0;
+
+ /* Disabling the DMA interrupts */
+ zynqmp_gqspi_write(xqspi, GQSPI_QSPIDMA_DST_I_DIS_OFST,
+ GQSPI_QSPIDMA_DST_I_EN_DONE_MASK);
+
+ if (xqspi->bytes_to_receive > 0) {
+ /* Switch to IO mode,for remaining bytes to receive */
+ config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST);
+ config_reg &= ~GQSPI_CFG_MODE_EN_MASK;
+ zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg);
+
+ /* Initiate the transfer of remaining bytes */
+ genfifoentry = xqspi->genfifoentry;
+ genfifoentry |= xqspi->bytes_to_receive;
+ zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, genfifoentry);
+
+ /* Dummy generic FIFO entry */
+ zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, 0x0);
+
+ /* Manual start */
+ zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST,
+ (zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST) |
+ GQSPI_CFG_START_GEN_FIFO_MASK));
+
+ /* Enable the RX interrupts for IO mode */
+ zynqmp_gqspi_write(xqspi, GQSPI_IER_OFST,
+ GQSPI_IER_GENFIFOEMPTY_MASK |
+ GQSPI_IER_RXNEMPTY_MASK |
+ GQSPI_IER_RXEMPTY_MASK);
+ }
+}
+
+/**
+ * zynqmp_qspi_irq: Interrupt service routine of the QSPI controller
+ * @irq: IRQ number
+ * @dev_id: Pointer to the xqspi structure
+ *
+ * This function handles TX empty only.
+ * On TX empty interrupt this function reads the received data from RX FIFO
+ * and fills the TX FIFO if there is any data remaining to be transferred.
+ *
+ * Return: IRQ_HANDLED when interrupt is handled
+ * IRQ_NONE otherwise.
+ */
+static irqreturn_t zynqmp_qspi_irq(int irq, void *dev_id)
+{
+ struct spi_master *master = dev_id;
+ struct zynqmp_qspi *xqspi = spi_master_get_devdata(master);
+ int ret = IRQ_NONE;
+ u32 status, mask, dma_status = 0;
+
+ status = zynqmp_gqspi_read(xqspi, GQSPI_ISR_OFST);
+ zynqmp_gqspi_write(xqspi, GQSPI_ISR_OFST, status);
+ mask = (status & ~(zynqmp_gqspi_read(xqspi, GQSPI_IMASK_OFST)));
+
+ /* Read and clear DMA status */
+ if (xqspi->mode == GQSPI_MODE_DMA) {
+ dma_status =
+ zynqmp_gqspi_read(xqspi, GQSPI_QSPIDMA_DST_I_STS_OFST);
+ zynqmp_gqspi_write(xqspi, GQSPI_QSPIDMA_DST_I_STS_OFST,
+ dma_status);
+ }
+
+ if (mask & GQSPI_ISR_TXNOT_FULL_MASK) {
+ zynqmp_qspi_filltxfifo(xqspi, GQSPI_TX_FIFO_FILL);
+ ret = IRQ_HANDLED;
+ }
+
+ if (dma_status & GQSPI_QSPIDMA_DST_I_STS_DONE_MASK) {
+ zynqmp_process_dma_irq(xqspi);
+ ret = IRQ_HANDLED;
+ } else if (!(mask & GQSPI_IER_RXEMPTY_MASK) &&
+ (mask & GQSPI_IER_GENFIFOEMPTY_MASK)) {
+ zynqmp_qspi_readrxfifo(xqspi, GQSPI_RX_FIFO_FILL);
+ ret = IRQ_HANDLED;
+ }
+
+ if ((xqspi->bytes_to_receive == 0) && (xqspi->bytes_to_transfer == 0)
+ && ((status & GQSPI_IRQ_MASK) == GQSPI_IRQ_MASK)) {
+ zynqmp_gqspi_write(xqspi, GQSPI_IDR_OFST, GQSPI_ISR_IDR_MASK);
+ spi_finalize_current_transfer(master);
+ ret = IRQ_HANDLED;
+ }
+ return ret;
+}
+
+/**
+ * zynqmp_qspi_selectspimode: Selects SPI mode - x1 or x2 or x4.
+ * @xqspi: xqspi is a pointer to the GQSPI instance
+ * @spimode: spimode - SPI or DUAL or QUAD.
+ * Return: Mask to set desired SPI mode in GENFIFO entry.
+ */
+static inline u32 zynqmp_qspi_selectspimode(struct zynqmp_qspi *xqspi,
+ u8 spimode)
+{
+ u32 mask = 0;
+
+ switch (spimode) {
+ case GQSPI_SELECT_MODE_DUALSPI:
+ mask = GQSPI_GENFIFO_MODE_DUALSPI;
+ break;
+ case GQSPI_SELECT_MODE_QUADSPI:
+ mask = GQSPI_GENFIFO_MODE_QUADSPI;
+ break;
+ case GQSPI_SELECT_MODE_SPI:
+ mask = GQSPI_GENFIFO_MODE_SPI;
+ break;
+ default:
+ dev_warn(xqspi->dev, "Invalid SPI mode\n");
+ }
+
+ return mask;
+}
+
+/**
+ * zynq_qspi_setuprxdma: This function sets up the RX DMA operation
+ * @xqspi: xqspi is a pointer to the GQSPI instance.
+ */
+static void zynq_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
+{
+ u32 rx_bytes, rx_rem, config_reg;
+ dma_addr_t addr;
+ u64 dma_align = (u64)(uintptr_t)xqspi->rxbuf;
+
+ if ((xqspi->bytes_to_receive < 8) ||
+ ((dma_align & GQSPI_DMA_UNALIGN) != 0x0)) {
+ /* Setting to IO mode */
+ config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST);
+ config_reg &= ~GQSPI_CFG_MODE_EN_MASK;
+ zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg);
+ xqspi->mode = GQSPI_MODE_IO;
+ xqspi->dma_rx_bytes = 0;
+ return;
+ }
+
+ rx_rem = xqspi->bytes_to_receive % 4;
+ rx_bytes = (xqspi->bytes_to_receive - rx_rem);
+
+ addr = dma_map_single(xqspi->dev, (void *)xqspi->rxbuf,
+ rx_bytes, DMA_FROM_DEVICE);
+ if (dma_mapping_error(xqspi->dev, addr))
+ dev_err(xqspi->dev, "ERR:rxdma:memory not mapped\n");
+
+ xqspi->dma_rx_bytes = rx_bytes;
+ xqspi->dma_addr = addr;
+ zynqmp_gqspi_write(xqspi, GQSPI_QSPIDMA_DST_ADDR_OFST,
+ (u32)(addr & 0xffffffff));
+ addr = ((addr >> 16) >> 16);
+ zynqmp_gqspi_write(xqspi, GQSPI_QSPIDMA_DST_ADDR_MSB_OFST,
+ ((u32)addr) & 0xfff);
+
+ /* Enabling the DMA mode */
+ config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST);
+ config_reg &= ~GQSPI_CFG_MODE_EN_MASK;
+ config_reg |= GQSPI_CFG_MODE_EN_DMA_MASK;
+ zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg);
+
+ /* Switch to DMA mode */
+ xqspi->mode = GQSPI_MODE_DMA;
+
+ /* Write the number of bytes to transfer */
+ zynqmp_gqspi_write(xqspi, GQSPI_QSPIDMA_DST_SIZE_OFST, rx_bytes);
+}
+
+/**
+ * zynqmp_qspi_txrxsetup: This function checks the TX/RX buffers in
+ * the transfer and sets up the GENFIFO entries,
+ * TX FIFO as required.
+ * @xqspi: xqspi is a pointer to the GQSPI instance.
+ * @transfer: It is a pointer to the structure containing transfer data.
+ * @genfifoentry: genfifoentry is pointer to the variable in which
+ * GENFIFO mask is returned to calling function
+ */
+static void zynqmp_qspi_txrxsetup(struct zynqmp_qspi *xqspi,
+ struct spi_transfer *transfer,
+ u32 *genfifoentry)
+{
+ u32 config_reg;
+
+ /* Transmit */
+ if ((xqspi->txbuf != NULL) && (xqspi->rxbuf == NULL)) {
+ /* Setup data to be TXed */
+ *genfifoentry &= ~GQSPI_GENFIFO_RX;
+ *genfifoentry |= GQSPI_GENFIFO_DATA_XFER;
+ *genfifoentry |= GQSPI_GENFIFO_TX;
+ *genfifoentry |=
+ zynqmp_qspi_selectspimode(xqspi, transfer->tx_nbits);
+ xqspi->bytes_to_transfer = transfer->len;
+ if (xqspi->mode == GQSPI_MODE_DMA) {
+ config_reg = zynqmp_gqspi_read(xqspi,
+ GQSPI_CONFIG_OFST);
+ config_reg &= ~GQSPI_CFG_MODE_EN_MASK;
+ zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST,
+ config_reg);
+ xqspi->mode = GQSPI_MODE_IO;
+ }
+ zynqmp_qspi_filltxfifo(xqspi, GQSPI_TXD_DEPTH);
+ /* Discard RX data */
+ xqspi->bytes_to_receive = 0;
+ } else if ((xqspi->txbuf == NULL) && (xqspi->rxbuf != NULL)) {
+ /* Receive */
+
+ /* TX auto fill */
+ *genfifoentry &= ~GQSPI_GENFIFO_TX;
+ /* Setup RX */
+ *genfifoentry |= GQSPI_GENFIFO_DATA_XFER;
+ *genfifoentry |= GQSPI_GENFIFO_RX;
+ *genfifoentry |=
+ zynqmp_qspi_selectspimode(xqspi, transfer->rx_nbits);
+ xqspi->bytes_to_transfer = 0;
+ xqspi->bytes_to_receive = transfer->len;
+ zynq_qspi_setuprxdma(xqspi);
+ }
+}
+
+/**
+ * zynqmp_qspi_start_transfer: Initiates the QSPI transfer
+ * @master: Pointer to the spi_master structure which provides
+ * information about the controller.
+ * @qspi: Pointer to the spi_device structure
+ * @transfer: Pointer to the spi_transfer structure which provide information
+ * about next transfer parameters
+ *
+ * This function fills the TX FIFO, starts the QSPI transfer, and waits for the
+ * transfer to be completed.
+ *
+ * Return: Number of bytes transferred in the last transfer
+ */
+static int zynqmp_qspi_start_transfer(struct spi_master *master,
+ struct spi_device *qspi,
+ struct spi_transfer *transfer)
+{
+ struct zynqmp_qspi *xqspi = spi_master_get_devdata(master);
+ u32 genfifoentry = 0x0, transfer_len;
+
+ xqspi->txbuf = transfer->tx_buf;
+ xqspi->rxbuf = transfer->rx_buf;
+
+ zynqmp_qspi_setup_transfer(qspi, transfer);
+
+ genfifoentry |= xqspi->genfifocs;
+ genfifoentry |= xqspi->genfifobus;
+
+ zynqmp_qspi_txrxsetup(xqspi, transfer, &genfifoentry);
+
+ if (xqspi->mode == GQSPI_MODE_DMA)
+ transfer_len = xqspi->dma_rx_bytes;
+ else
+ transfer_len = transfer->len;
+
+ xqspi->genfifoentry = genfifoentry;
+ if ((transfer_len) < GQSPI_GENFIFO_IMM_DATA_MASK) {
+ genfifoentry &= ~GQSPI_GENFIFO_IMM_DATA_MASK;
+ genfifoentry |= transfer_len;
+ zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, genfifoentry);
+ } else {
+ int tempcount = transfer_len;
+ u32 exponent = 8; /* 2^8 = 256 */
+ u8 imm_data = tempcount & 0xFF;
+
+ tempcount &= ~(tempcount & 0xFF);
+ /* Immediate entry */
+ if (tempcount != 0) {
+ /* Exponent entries */
+ genfifoentry |= GQSPI_GENFIFO_EXP;
+ while (tempcount != 0) {
+ if (tempcount & GQSPI_GENFIFO_EXP_START) {
+ genfifoentry &=
+ ~GQSPI_GENFIFO_IMM_DATA_MASK;
+ genfifoentry |= exponent;
+ zynqmp_gqspi_write(xqspi,
+ GQSPI_GEN_FIFO_OFST,
+ genfifoentry);
+ }
+ tempcount = tempcount >> 1;
+ exponent++;
+ }
+ }
+ if (imm_data != 0) {
+ genfifoentry &= ~GQSPI_GENFIFO_EXP;
+ genfifoentry &= ~GQSPI_GENFIFO_IMM_DATA_MASK;
+ genfifoentry |= (u8) (imm_data & 0xFF);
+ zynqmp_gqspi_write(xqspi,
+ GQSPI_GEN_FIFO_OFST, genfifoentry);
+ }
+ }
+
+ if ((xqspi->mode == GQSPI_MODE_IO) &&
+ (xqspi->rxbuf != NULL)) {
+ /* Dummy generic FIFO entry */
+ zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, 0x0);
+ }
+
+ /* Since we are using manual mode */
+ zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST,
+ zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST) |
+ GQSPI_CFG_START_GEN_FIFO_MASK);
+
+ if (xqspi->txbuf != NULL)
+ /* Enable interrupts for TX */
+ zynqmp_gqspi_write(xqspi, GQSPI_IER_OFST,
+ GQSPI_IER_TXEMPTY_MASK |
+ GQSPI_IER_GENFIFOEMPTY_MASK |
+ GQSPI_IER_TXNOT_FULL_MASK);
+
+ if (xqspi->rxbuf != NULL) {
+ /* Enable interrupts for RX */
+ if (xqspi->mode == GQSPI_MODE_DMA) {
+ /* Enable DMA interrupts */
+ zynqmp_gqspi_write(xqspi,
+ GQSPI_QSPIDMA_DST_I_EN_OFST,
+ GQSPI_QSPIDMA_DST_I_EN_DONE_MASK);
+ } else {
+ zynqmp_gqspi_write(xqspi, GQSPI_IER_OFST,
+ GQSPI_IER_GENFIFOEMPTY_MASK |
+ GQSPI_IER_RXNEMPTY_MASK |
+ GQSPI_IER_RXEMPTY_MASK);
+ }
+ }
+
+ return transfer->len;
+}
+
+/**
+ * zynqmp_qspi_suspend: Suspend method for the QSPI driver
+ * @_dev: Address of the platform_device structure
+ *
+ * This function stops the QSPI driver queue and disables the QSPI controller
+ *
+ * Return: Always 0
+ */
+static int __maybe_unused zynqmp_qspi_suspend(struct device *dev)
+{
+ struct platform_device *pdev = container_of(dev,
+ struct platform_device,
+ dev);
+ struct spi_master *master = platform_get_drvdata(pdev);
+
+ spi_master_suspend(master);
+
+ zynqmp_unprepare_transfer_hardware(master);
+
+ return 0;
+}
+
+/**
+ * zynqmp_qspi_resume: Resume method for the QSPI driver
+ * @dev: Address of the platform_device structure
+ *
+ * The function starts the QSPI driver queue and initializes the QSPI
+ * controller
+ *
+ * Return: 0 on success; error value otherwise
+ */
+static int __maybe_unused zynqmp_qspi_resume(struct device *dev)
+{
+ struct platform_device *pdev = container_of(dev,
+ struct platform_device,
+ dev);
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct zynqmp_qspi *xqspi = spi_master_get_devdata(master);
+ int ret = 0;
+
+ ret = clk_enable(xqspi->pclk);
+ if (ret) {
+ dev_err(dev, "Cannot enable APB clock.\n");
+ return ret;
+ }
+
+ ret = clk_enable(xqspi->refclk);
+ if (ret) {
+ dev_err(dev, "Cannot enable device clock.\n");
+ clk_disable(xqspi->pclk);
+ return ret;
+ }
+
+ spi_master_resume(master);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(zynqmp_qspi_dev_pm_ops, zynqmp_qspi_suspend,
+ zynqmp_qspi_resume);
+
+/**
+ * zynqmp_qspi_probe: Probe method for the QSPI driver
+ * @pdev: Pointer to the platform_device structure
+ *
+ * This function initializes the driver data structures and the hardware.
+ *
+ * Return: 0 on success; error value otherwise
+ */
+static int zynqmp_qspi_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct spi_master *master;
+ struct zynqmp_qspi *xqspi;
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*xqspi));
+ if (!master)
+ return -ENOMEM;
+
+ xqspi = spi_master_get_devdata(master);
+ master->dev.of_node = pdev->dev.of_node;
+ platform_set_drvdata(pdev, master);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xqspi->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(xqspi->regs)) {
+ ret = PTR_ERR(xqspi->regs);
+ goto remove_master;
+ }
+
+ xqspi->dev = dev;
+ xqspi->pclk = devm_clk_get(&pdev->dev, "pclk");
+ if (IS_ERR(xqspi->pclk)) {
+ dev_err(dev, "pclk clock not found.\n");
+ ret = PTR_ERR(xqspi->pclk);
+ goto remove_master;
+ }
+
+ ret = clk_prepare_enable(xqspi->pclk);
+ if (ret) {
+ dev_err(dev, "Unable to enable APB clock.\n");
+ goto remove_master;
+ }
+
+ xqspi->refclk = devm_clk_get(&pdev->dev, "ref_clk");
+ if (IS_ERR(xqspi->refclk)) {
+ dev_err(dev, "ref_clk clock not found.\n");
+ ret = PTR_ERR(xqspi->refclk);
+ goto clk_dis_pclk;
+ }
+
+ ret = clk_prepare_enable(xqspi->refclk);
+ if (ret) {
+ dev_err(dev, "Unable to enable device clock.\n");
+ goto clk_dis_pclk;
+ }
+
+ /* QSPI controller initializations */
+ zynqmp_qspi_init_hw(xqspi);
+
+ xqspi->irq = platform_get_irq(pdev, 0);
+ if (xqspi->irq <= 0) {
+ ret = -ENXIO;
+ dev_err(dev, "irq resource not found\n");
+ goto clk_dis_all;
+ }
+ ret = devm_request_irq(&pdev->dev, xqspi->irq, zynqmp_qspi_irq,
+ 0, pdev->name, master);
+ if (ret != 0) {
+ ret = -ENXIO;
+ dev_err(dev, "request_irq failed\n");
+ goto clk_dis_all;
+ }
+
+ master->num_chipselect = GQSPI_DEFAULT_NUM_CS;
+
+ master->setup = zynqmp_qspi_setup;
+ master->set_cs = zynqmp_qspi_chipselect;
+ master->transfer_one = zynqmp_qspi_start_transfer;
+ master->prepare_transfer_hardware = zynqmp_prepare_transfer_hardware;
+ master->unprepare_transfer_hardware =
+ zynqmp_unprepare_transfer_hardware;
+ master->max_speed_hz = clk_get_rate(xqspi->refclk) / 2;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_RX_DUAL | SPI_RX_QUAD |
+ SPI_TX_DUAL | SPI_TX_QUAD;
+
+ if (master->dev.parent == NULL)
+ master->dev.parent = &master->dev;
+
+ ret = spi_register_master(master);
+ if (ret)
+ goto clk_dis_all;
+
+ return 0;
+
+clk_dis_all:
+ clk_disable_unprepare(xqspi->refclk);
+clk_dis_pclk:
+ clk_disable_unprepare(xqspi->pclk);
+remove_master:
+ spi_master_put(master);
+
+ return ret;
+}
+
+/**
+ * zynqmp_qspi_remove: Remove method for the QSPI driver
+ * @pdev: Pointer to the platform_device structure
+ *
+ * This function is called if a device is physically removed from the system or
+ * if the driver module is being unloaded. It frees all resources allocated to
+ * the device.
+ *
+ * Return: 0 Always
+ */
+static int zynqmp_qspi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct zynqmp_qspi *xqspi = spi_master_get_devdata(master);
+
+ zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, 0x0);
+ clk_disable_unprepare(xqspi->refclk);
+ clk_disable_unprepare(xqspi->pclk);
+
+ spi_unregister_master(master);
+
+ return 0;
+}
+
+static const struct of_device_id zynqmp_qspi_of_match[] = {
+ { .compatible = "xlnx,zynqmp-qspi-1.0", },
+ { /* End of table */ }
+};
+
+MODULE_DEVICE_TABLE(of, zynqmp_qspi_of_match);
+
+static struct platform_driver zynqmp_qspi_driver = {
+ .probe = zynqmp_qspi_probe,
+ .remove = zynqmp_qspi_remove,
+ .driver = {
+ .name = "zynqmp-qspi",
+ .of_match_table = zynqmp_qspi_of_match,
+ .pm = &zynqmp_qspi_dev_pm_ops,
+ },
+};
+
+module_platform_driver(zynqmp_qspi_driver);
+
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_DESCRIPTION("Xilinx Zynqmp QSPI driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 92c909e..dd616ff 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -95,37 +95,25 @@ MODULE_PARM_DESC(bufsiz, "data bytes in biggest supported SPI message");
/*-------------------------------------------------------------------------*/
-/*
- * We can't use the standard synchronous wrappers for file I/O; we
- * need to protect against async removal of the underlying spi_device.
- */
-static void spidev_complete(void *arg)
-{
- complete(arg);
-}
-
static ssize_t
spidev_sync(struct spidev_data *spidev, struct spi_message *message)
{
DECLARE_COMPLETION_ONSTACK(done);
int status;
-
- message->complete = spidev_complete;
- message->context = &done;
+ struct spi_device *spi;
spin_lock_irq(&spidev->spi_lock);
- if (spidev->spi == NULL)
+ spi = spidev->spi;
+ spin_unlock_irq(&spidev->spi_lock);
+
+ if (spi == NULL)
status = -ESHUTDOWN;
else
- status = spi_async(spidev->spi, message);
- spin_unlock_irq(&spidev->spi_lock);
+ status = spi_sync(spi, message);
+
+ if (status == 0)
+ status = message->actual_length;
- if (status == 0) {
- wait_for_completion(&done);
- status = message->status;
- if (status == 0)
- status = message->actual_length;
- }
return status;
}
@@ -647,7 +635,6 @@ err_find_dev:
static int spidev_release(struct inode *inode, struct file *filp)
{
struct spidev_data *spidev;
- int status = 0;
mutex_lock(&device_list_lock);
spidev = filp->private_data;
@@ -676,7 +663,7 @@ static int spidev_release(struct inode *inode, struct file *filp)
}
mutex_unlock(&device_list_lock);
- return status;
+ return 0;
}
static const struct file_operations spidev_fops = {