summaryrefslogtreecommitdiff
path: root/drivers/dma
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/Kconfig13
-rw-r--r--drivers/dma/Makefile3
-rw-r--r--drivers/dma/amba-pl08x.c10
-rw-r--r--drivers/dma/at_hdmac.c10
-rw-r--r--drivers/dma/at_hdmac_regs.h8
-rw-r--r--drivers/dma/bestcomm/Kconfig36
-rw-r--r--drivers/dma/bestcomm/Makefile14
-rw-r--r--drivers/dma/bestcomm/ata.c157
-rw-r--r--drivers/dma/bestcomm/bcom_ata_task.c67
-rw-r--r--drivers/dma/bestcomm/bcom_fec_rx_task.c78
-rw-r--r--drivers/dma/bestcomm/bcom_fec_tx_task.c91
-rw-r--r--drivers/dma/bestcomm/bcom_gen_bd_rx_task.c63
-rw-r--r--drivers/dma/bestcomm/bcom_gen_bd_tx_task.c69
-rw-r--r--drivers/dma/bestcomm/bestcomm.c531
-rw-r--r--drivers/dma/bestcomm/fec.c270
-rw-r--r--drivers/dma/bestcomm/gen_bd.c354
-rw-r--r--drivers/dma/bestcomm/sram.c178
-rw-r--r--drivers/dma/coh901318.c1306
-rw-r--r--drivers/dma/coh901318.h (renamed from drivers/dma/coh901318_lli.h)35
-rw-r--r--drivers/dma/coh901318_lli.c6
-rw-r--r--drivers/dma/dmaengine.c37
-rw-r--r--drivers/dma/dmatest.c22
-rw-r--r--drivers/dma/dw_dmac.c531
-rw-r--r--drivers/dma/dw_dmac_regs.h26
-rw-r--r--drivers/dma/edma.c61
-rw-r--r--drivers/dma/ep93xx_dma.c3
-rw-r--r--drivers/dma/imx-dma.c7
-rw-r--r--drivers/dma/ioat/dma.c11
-rw-r--r--drivers/dma/ioat/dma.h1
-rw-r--r--drivers/dma/ioat/dma_v2.c113
-rw-r--r--drivers/dma/ioat/dma_v3.c216
-rw-r--r--drivers/dma/ioat/hw.h11
-rw-r--r--drivers/dma/ioat/pci.c11
-rw-r--r--drivers/dma/iop-adma.c45
-rw-r--r--drivers/dma/ipu/ipu_idmac.c2
-rw-r--r--drivers/dma/ipu/ipu_irq.c1
-rw-r--r--drivers/dma/mmp_pdma.c13
-rw-r--r--drivers/dma/mmp_tdma.c7
-rw-r--r--drivers/dma/mv_xor.c40
-rw-r--r--drivers/dma/mxs-dma.c8
-rw-r--r--drivers/dma/of-dma.c267
-rw-r--r--drivers/dma/omap-dma.c20
-rw-r--r--drivers/dma/pch_dma.c13
-rw-r--r--drivers/dma/pl330.c101
-rw-r--r--drivers/dma/sh/shdma-base.c3
-rw-r--r--drivers/dma/sh/shdma.c2
-rw-r--r--drivers/dma/sirf-dma.c73
-rw-r--r--drivers/dma/ste_dma40.c491
-rw-r--r--drivers/dma/ste_dma40_ll.c29
-rw-r--r--drivers/dma/ste_dma40_ll.h130
-rw-r--r--drivers/dma/tegra20-apb-dma.c67
51 files changed, 4919 insertions, 742 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index d4c1218..80b6997 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -51,7 +51,7 @@ config ASYNC_TX_ENABLE_CHANNEL_SWITCH
config AMBA_PL08X
bool "ARM PrimeCell PL080 or PL081 support"
- depends on ARM_AMBA && EXPERIMENTAL
+ depends on ARM_AMBA
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
@@ -83,7 +83,6 @@ config INTEL_IOP_ADMA
config DW_DMAC
tristate "Synopsys DesignWare AHB DMA support"
- depends on HAVE_CLK
select DMA_ENGINE
default y if CPU_AT32AP7000
help
@@ -125,6 +124,8 @@ config MPC512X_DMA
---help---
Enable support for the Freescale MPC512x built-in DMA engine.
+source "drivers/dma/bestcomm/Kconfig"
+
config MV_XOR
bool "Marvell XOR engine support"
depends on PLAT_ORION
@@ -213,8 +214,8 @@ config TIMB_DMA
Enable support for the Timberdale FPGA DMA engine.
config SIRF_DMA
- tristate "CSR SiRFprimaII DMA support"
- depends on ARCH_PRIMA2
+ tristate "CSR SiRFprimaII/SiRFmarco DMA support"
+ depends on ARCH_SIRF
select DMA_ENGINE
help
Enable support for the CSR SiRFprimaII DMA engine.
@@ -326,6 +327,10 @@ config DMA_ENGINE
config DMA_VIRTUAL_CHANNELS
tristate
+config DMA_OF
+ def_bool y
+ depends on OF
+
comment "DMA Clients"
depends on DMA_ENGINE
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 7428fea..488e3ff 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -3,6 +3,8 @@ ccflags-$(CONFIG_DMADEVICES_VDEBUG) += -DVERBOSE_DEBUG
obj-$(CONFIG_DMA_ENGINE) += dmaengine.o
obj-$(CONFIG_DMA_VIRTUAL_CHANNELS) += virt-dma.o
+obj-$(CONFIG_DMA_OF) += of-dma.o
+
obj-$(CONFIG_NET_DMA) += iovlock.o
obj-$(CONFIG_INTEL_MID_DMAC) += intel_mid_dma.o
obj-$(CONFIG_DMATEST) += dmatest.o
@@ -10,6 +12,7 @@ obj-$(CONFIG_INTEL_IOATDMA) += ioat/
obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
obj-$(CONFIG_FSL_DMA) += fsldma.o
obj-$(CONFIG_MPC512X_DMA) += mpc512x_dma.o
+obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
obj-$(CONFIG_MV_XOR) += mv_xor.o
obj-$(CONFIG_DW_DMAC) += dw_dmac.o
obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index d1cc579..8bad254 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -83,7 +83,7 @@
#include <linux/pm_runtime.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
-#include <asm/hardware/pl080.h>
+#include <linux/amba/pl080.h>
#include "dmaengine.h"
#include "virt-dma.h"
@@ -1096,15 +1096,9 @@ static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x,
struct pl08x_dma_chan *plchan)
{
LIST_HEAD(head);
- struct pl08x_txd *txd;
vchan_get_all_descriptors(&plchan->vc, &head);
-
- while (!list_empty(&head)) {
- txd = list_first_entry(&head, struct pl08x_txd, vd.node);
- list_del(&txd->vd.node);
- pl08x_desc_free(&txd->vd);
- }
+ vchan_dma_desc_free_list(&plchan->vc, &head);
}
/*
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 13a02f4..6e13f26 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -778,7 +778,7 @@ err:
*/
static int
atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
- size_t period_len, enum dma_transfer_direction direction)
+ size_t period_len)
{
if (period_len > (ATC_BTSIZE_MAX << reg_width))
goto err_out;
@@ -786,8 +786,6 @@ atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
goto err_out;
if (unlikely(buf_addr & ((1 << reg_width) - 1)))
goto err_out;
- if (unlikely(!(direction & (DMA_DEV_TO_MEM | DMA_MEM_TO_DEV))))
- goto err_out;
return 0;
@@ -886,14 +884,16 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
return NULL;
}
+ if (unlikely(!is_slave_direction(direction)))
+ goto err_out;
+
if (sconfig->direction == DMA_MEM_TO_DEV)
reg_width = convert_buswidth(sconfig->dst_addr_width);
else
reg_width = convert_buswidth(sconfig->src_addr_width);
/* Check for too big/unaligned periods and unaligned DMA buffer */
- if (atc_dma_cyclic_check_values(reg_width, buf_addr,
- period_len, direction))
+ if (atc_dma_cyclic_check_values(reg_width, buf_addr, period_len))
goto err_out;
/* build cyclic linked list */
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index 116e4ad..0eb3c13 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -369,10 +369,10 @@ static void vdbg_dump_regs(struct at_dma_chan *atchan) {}
static void atc_dump_lli(struct at_dma_chan *atchan, struct at_lli *lli)
{
- dev_printk(KERN_CRIT, chan2dev(&atchan->chan_common),
- " desc: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
- lli->saddr, lli->daddr,
- lli->ctrla, lli->ctrlb, lli->dscr);
+ dev_crit(chan2dev(&atchan->chan_common),
+ " desc: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
+ lli->saddr, lli->daddr,
+ lli->ctrla, lli->ctrlb, lli->dscr);
}
diff --git a/drivers/dma/bestcomm/Kconfig b/drivers/dma/bestcomm/Kconfig
new file mode 100644
index 0000000..29e4270
--- /dev/null
+++ b/drivers/dma/bestcomm/Kconfig
@@ -0,0 +1,36 @@
+#
+# Kconfig options for Bestcomm
+#
+
+config PPC_BESTCOMM
+ tristate "Bestcomm DMA engine support"
+ depends on PPC_MPC52xx
+ default n
+ select PPC_LIB_RHEAP
+ help
+ BestComm is the name of the communication coprocessor found
+ on the Freescale MPC5200 family of processor. Its usage is
+ optional for some drivers (like ATA), but required for
+ others (like FEC).
+
+ If you want to use drivers that require DMA operations,
+ answer Y or M. Otherwise say N.
+
+config PPC_BESTCOMM_ATA
+ tristate
+ depends on PPC_BESTCOMM
+ help
+ This option enables the support for the ATA task.
+
+config PPC_BESTCOMM_FEC
+ tristate
+ depends on PPC_BESTCOMM
+ help
+ This option enables the support for the FEC tasks.
+
+config PPC_BESTCOMM_GEN_BD
+ tristate
+ depends on PPC_BESTCOMM
+ help
+ This option enables the support for the GenBD tasks.
+
diff --git a/drivers/dma/bestcomm/Makefile b/drivers/dma/bestcomm/Makefile
new file mode 100644
index 0000000..aed2df2
--- /dev/null
+++ b/drivers/dma/bestcomm/Makefile
@@ -0,0 +1,14 @@
+#
+# Makefile for BestComm & co
+#
+
+bestcomm-core-objs := bestcomm.o sram.o
+bestcomm-ata-objs := ata.o bcom_ata_task.o
+bestcomm-fec-objs := fec.o bcom_fec_rx_task.o bcom_fec_tx_task.o
+bestcomm-gen-bd-objs := gen_bd.o bcom_gen_bd_rx_task.o bcom_gen_bd_tx_task.o
+
+obj-$(CONFIG_PPC_BESTCOMM) += bestcomm-core.o
+obj-$(CONFIG_PPC_BESTCOMM_ATA) += bestcomm-ata.o
+obj-$(CONFIG_PPC_BESTCOMM_FEC) += bestcomm-fec.o
+obj-$(CONFIG_PPC_BESTCOMM_GEN_BD) += bestcomm-gen-bd.o
+
diff --git a/drivers/dma/bestcomm/ata.c b/drivers/dma/bestcomm/ata.c
new file mode 100644
index 0000000..2fd87f8
--- /dev/null
+++ b/drivers/dma/bestcomm/ata.c
@@ -0,0 +1,157 @@
+/*
+ * Bestcomm ATA task driver
+ *
+ *
+ * Patterned after bestcomm/fec.c by Dale Farnsworth <dfarnsworth@mvista.com>
+ * 2003-2004 (c) MontaVista, Software, Inc.
+ *
+ * Copyright (C) 2006-2007 Sylvain Munaut <tnt@246tNt.com>
+ * Copyright (C) 2006 Freescale - John Rigby
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <asm/io.h>
+
+#include <linux/fsl/bestcomm/bestcomm.h>
+#include <linux/fsl/bestcomm/bestcomm_priv.h>
+#include <linux/fsl/bestcomm/ata.h>
+
+
+/* ======================================================================== */
+/* Task image/var/inc */
+/* ======================================================================== */
+
+/* ata task image */
+extern u32 bcom_ata_task[];
+
+/* ata task vars that need to be set before enabling the task */
+struct bcom_ata_var {
+ u32 enable; /* (u16*) address of task's control register */
+ u32 bd_base; /* (struct bcom_bd*) beginning of ring buffer */
+ u32 bd_last; /* (struct bcom_bd*) end of ring buffer */
+ u32 bd_start; /* (struct bcom_bd*) current bd */
+ u32 buffer_size; /* size of receive buffer */
+};
+
+/* ata task incs that need to be set before enabling the task */
+struct bcom_ata_inc {
+ u16 pad0;
+ s16 incr_bytes;
+ u16 pad1;
+ s16 incr_dst;
+ u16 pad2;
+ s16 incr_src;
+};
+
+
+/* ======================================================================== */
+/* Task support code */
+/* ======================================================================== */
+
+struct bcom_task *
+bcom_ata_init(int queue_len, int maxbufsize)
+{
+ struct bcom_task *tsk;
+ struct bcom_ata_var *var;
+ struct bcom_ata_inc *inc;
+
+ /* Prefetch breaks ATA DMA. Turn it off for ATA DMA */
+ bcom_disable_prefetch();
+
+ tsk = bcom_task_alloc(queue_len, sizeof(struct bcom_ata_bd), 0);
+ if (!tsk)
+ return NULL;
+
+ tsk->flags = BCOM_FLAGS_NONE;
+
+ bcom_ata_reset_bd(tsk);
+
+ var = (struct bcom_ata_var *) bcom_task_var(tsk->tasknum);
+ inc = (struct bcom_ata_inc *) bcom_task_inc(tsk->tasknum);
+
+ if (bcom_load_image(tsk->tasknum, bcom_ata_task)) {
+ bcom_task_free(tsk);
+ return NULL;
+ }
+
+ var->enable = bcom_eng->regs_base +
+ offsetof(struct mpc52xx_sdma, tcr[tsk->tasknum]);
+ var->bd_base = tsk->bd_pa;
+ var->bd_last = tsk->bd_pa + ((tsk->num_bd-1) * tsk->bd_size);
+ var->bd_start = tsk->bd_pa;
+ var->buffer_size = maxbufsize;
+
+ /* Configure some stuff */
+ bcom_set_task_pragma(tsk->tasknum, BCOM_ATA_PRAGMA);
+ bcom_set_task_auto_start(tsk->tasknum, tsk->tasknum);
+
+ out_8(&bcom_eng->regs->ipr[BCOM_INITIATOR_ATA_RX], BCOM_IPR_ATA_RX);
+ out_8(&bcom_eng->regs->ipr[BCOM_INITIATOR_ATA_TX], BCOM_IPR_ATA_TX);
+
+ out_be32(&bcom_eng->regs->IntPend, 1<<tsk->tasknum); /* Clear ints */
+
+ return tsk;
+}
+EXPORT_SYMBOL_GPL(bcom_ata_init);
+
+void bcom_ata_rx_prepare(struct bcom_task *tsk)
+{
+ struct bcom_ata_inc *inc;
+
+ inc = (struct bcom_ata_inc *) bcom_task_inc(tsk->tasknum);
+
+ inc->incr_bytes = -(s16)sizeof(u32);
+ inc->incr_src = 0;
+ inc->incr_dst = sizeof(u32);
+
+ bcom_set_initiator(tsk->tasknum, BCOM_INITIATOR_ATA_RX);
+}
+EXPORT_SYMBOL_GPL(bcom_ata_rx_prepare);
+
+void bcom_ata_tx_prepare(struct bcom_task *tsk)
+{
+ struct bcom_ata_inc *inc;
+
+ inc = (struct bcom_ata_inc *) bcom_task_inc(tsk->tasknum);
+
+ inc->incr_bytes = -(s16)sizeof(u32);
+ inc->incr_src = sizeof(u32);
+ inc->incr_dst = 0;
+
+ bcom_set_initiator(tsk->tasknum, BCOM_INITIATOR_ATA_TX);
+}
+EXPORT_SYMBOL_GPL(bcom_ata_tx_prepare);
+
+void bcom_ata_reset_bd(struct bcom_task *tsk)
+{
+ struct bcom_ata_var *var;
+
+ /* Reset all BD */
+ memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size);
+
+ tsk->index = 0;
+ tsk->outdex = 0;
+
+ var = (struct bcom_ata_var *) bcom_task_var(tsk->tasknum);
+ var->bd_start = var->bd_base;
+}
+EXPORT_SYMBOL_GPL(bcom_ata_reset_bd);
+
+void bcom_ata_release(struct bcom_task *tsk)
+{
+ /* Nothing special for the ATA tasks */
+ bcom_task_free(tsk);
+}
+EXPORT_SYMBOL_GPL(bcom_ata_release);
+
+
+MODULE_DESCRIPTION("BestComm ATA task driver");
+MODULE_AUTHOR("John Rigby");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/dma/bestcomm/bcom_ata_task.c b/drivers/dma/bestcomm/bcom_ata_task.c
new file mode 100644
index 0000000..cc6049a
--- /dev/null
+++ b/drivers/dma/bestcomm/bcom_ata_task.c
@@ -0,0 +1,67 @@
+/*
+ * Bestcomm ATA task microcode
+ *
+ * Copyright (c) 2004 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Created based on bestcom/code_dma/image_rtos1/dma_image.hex
+ */
+
+#include <asm/types.h>
+
+/*
+ * The header consists of the following fields:
+ * u32 magic;
+ * u8 desc_size;
+ * u8 var_size;
+ * u8 inc_size;
+ * u8 first_var;
+ * u8 reserved[8];
+ *
+ * The size fields contain the number of 32-bit words.
+ */
+
+u32 bcom_ata_task[] = {
+ /* header */
+ 0x4243544b,
+ 0x0e060709,
+ 0x00000000,
+ 0x00000000,
+
+ /* Task descriptors */
+ 0x8198009b, /* LCD: idx0 = var3; idx0 <= var2; idx0 += inc3 */
+ 0x13e00c08, /* DRD1A: var3 = var1; FN=0 MORE init=31 WS=0 RS=0 */
+ 0xb8000264, /* LCD: idx1 = *idx0, idx2 = var0; idx1 < var9; idx1 += inc4, idx2 += inc4 */
+ 0x10000f00, /* DRD1A: var3 = idx0; FN=0 MORE init=0 WS=0 RS=0 */
+ 0x60140002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT init=0 WS=2 RS=2 */
+ 0x0c8cfc8a, /* DRD2B1: *idx2 = EU3(); EU3(*idx2,var10) */
+ 0xd8988240, /* LCDEXT: idx1 = idx1; idx1 > var9; idx1 += inc0 */
+ 0xf845e011, /* LCDEXT: idx2 = *(idx0 + var00000015); ; idx2 += inc2 */
+ 0xb845e00a, /* LCD: idx3 = *(idx0 + var00000019); ; idx3 += inc1 */
+ 0x0bfecf90, /* DRD1A: *idx3 = *idx2; FN=0 TFD init=31 WS=3 RS=3 */
+ 0x9898802d, /* LCD: idx1 = idx1; idx1 once var0; idx1 += inc5 */
+ 0x64000005, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=5 INT EXT init=0 WS=0 RS=0 */
+ 0x0c0cf849, /* DRD2B1: *idx0 = EU3(); EU3(idx1,var9) */
+ 0x000001f8, /* NOP */
+
+ /* VAR[9]-VAR[14] */
+ 0x40000000,
+ 0x7fff7fff,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+
+ /* INC[0]-INC[6] */
+ 0x40000000,
+ 0xe0000000,
+ 0xe0000000,
+ 0xa000000c,
+ 0x20000000,
+ 0x00000000,
+ 0x00000000,
+};
+
diff --git a/drivers/dma/bestcomm/bcom_fec_rx_task.c b/drivers/dma/bestcomm/bcom_fec_rx_task.c
new file mode 100644
index 0000000..a1ad6a0
--- /dev/null
+++ b/drivers/dma/bestcomm/bcom_fec_rx_task.c
@@ -0,0 +1,78 @@
+/*
+ * Bestcomm FEC RX task microcode
+ *
+ * Copyright (c) 2004 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Automatically created based on BestCommAPI-2.2/code_dma/image_rtos1/dma_image.hex
+ * on Tue Mar 22 11:19:38 2005 GMT
+ */
+
+#include <asm/types.h>
+
+/*
+ * The header consists of the following fields:
+ * u32 magic;
+ * u8 desc_size;
+ * u8 var_size;
+ * u8 inc_size;
+ * u8 first_var;
+ * u8 reserved[8];
+ *
+ * The size fields contain the number of 32-bit words.
+ */
+
+u32 bcom_fec_rx_task[] = {
+ /* header */
+ 0x4243544b,
+ 0x18060709,
+ 0x00000000,
+ 0x00000000,
+
+ /* Task descriptors */
+ 0x808220e3, /* LCD: idx0 = var1, idx1 = var4; idx1 <= var3; idx0 += inc4, idx1 += inc3 */
+ 0x10601010, /* DRD1A: var4 = var2; FN=0 MORE init=3 WS=0 RS=0 */
+ 0xb8800264, /* LCD: idx2 = *idx1, idx3 = var0; idx2 < var9; idx2 += inc4, idx3 += inc4 */
+ 0x10001308, /* DRD1A: var4 = idx1; FN=0 MORE init=0 WS=0 RS=0 */
+ 0x60140002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT init=0 WS=2 RS=2 */
+ 0x0cccfcca, /* DRD2B1: *idx3 = EU3(); EU3(*idx3,var10) */
+ 0x80004000, /* LCDEXT: idx2 = 0x00000000; ; */
+ 0xb8c58029, /* LCD: idx3 = *(idx1 + var00000015); idx3 once var0; idx3 += inc5 */
+ 0x60000002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT init=0 WS=0 RS=0 */
+ 0x088cf8cc, /* DRD2B1: idx2 = EU3(); EU3(idx3,var12) */
+ 0x991982f2, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var11; idx2 += inc6, idx3 += inc2 */
+ 0x006acf80, /* DRD1A: *idx3 = *idx0; FN=0 init=3 WS=1 RS=1 */
+ 0x80004000, /* LCDEXT: idx2 = 0x00000000; ; */
+ 0x9999802d, /* LCD: idx3 = idx3; idx3 once var0; idx3 += inc5 */
+ 0x70000002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT MORE init=0 WS=0 RS=0 */
+ 0x034cfc4e, /* DRD2B1: var13 = EU3(); EU3(*idx1,var14) */
+ 0x00008868, /* DRD1A: idx2 = var13; FN=0 init=0 WS=0 RS=0 */
+ 0x99198341, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var13; idx2 += inc0, idx3 += inc1 */
+ 0x007ecf80, /* DRD1A: *idx3 = *idx0; FN=0 init=3 WS=3 RS=3 */
+ 0x99198272, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var9; idx2 += inc6, idx3 += inc2 */
+ 0x046acf80, /* DRD1A: *idx3 = *idx0; FN=0 INT init=3 WS=1 RS=1 */
+ 0x9819002d, /* LCD: idx2 = idx0; idx2 once var0; idx2 += inc5 */
+ 0x0060c790, /* DRD1A: *idx1 = *idx2; FN=0 init=3 WS=0 RS=0 */
+ 0x000001f8, /* NOP */
+
+ /* VAR[9]-VAR[14] */
+ 0x40000000,
+ 0x7fff7fff,
+ 0x00000000,
+ 0x00000003,
+ 0x40000008,
+ 0x43ffffff,
+
+ /* INC[0]-INC[6] */
+ 0x40000000,
+ 0xe0000000,
+ 0xe0000000,
+ 0xa0000008,
+ 0x20000000,
+ 0x00000000,
+ 0x4000ffff,
+};
+
diff --git a/drivers/dma/bestcomm/bcom_fec_tx_task.c b/drivers/dma/bestcomm/bcom_fec_tx_task.c
new file mode 100644
index 0000000..b1c495c
--- /dev/null
+++ b/drivers/dma/bestcomm/bcom_fec_tx_task.c
@@ -0,0 +1,91 @@
+/*
+ * Bestcomm FEC TX task microcode
+ *
+ * Copyright (c) 2004 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Automatically created based on BestCommAPI-2.2/code_dma/image_rtos1/dma_image.hex
+ * on Tue Mar 22 11:19:29 2005 GMT
+ */
+
+#include <asm/types.h>
+
+/*
+ * The header consists of the following fields:
+ * u32 magic;
+ * u8 desc_size;
+ * u8 var_size;
+ * u8 inc_size;
+ * u8 first_var;
+ * u8 reserved[8];
+ *
+ * The size fields contain the number of 32-bit words.
+ */
+
+u32 bcom_fec_tx_task[] = {
+ /* header */
+ 0x4243544b,
+ 0x2407070d,
+ 0x00000000,
+ 0x00000000,
+
+ /* Task descriptors */
+ 0x8018001b, /* LCD: idx0 = var0; idx0 <= var0; idx0 += inc3 */
+ 0x60000005, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=5 EXT init=0 WS=0 RS=0 */
+ 0x01ccfc0d, /* DRD2B1: var7 = EU3(); EU3(*idx0,var13) */
+ 0x8082a123, /* LCD: idx0 = var1, idx1 = var5; idx1 <= var4; idx0 += inc4, idx1 += inc3 */
+ 0x10801418, /* DRD1A: var5 = var3; FN=0 MORE init=4 WS=0 RS=0 */
+ 0xf88103a4, /* LCDEXT: idx2 = *idx1, idx3 = var2; idx2 < var14; idx2 += inc4, idx3 += inc4 */
+ 0x801a6024, /* LCD: idx4 = var0; ; idx4 += inc4 */
+ 0x10001708, /* DRD1A: var5 = idx1; FN=0 MORE init=0 WS=0 RS=0 */
+ 0x60140002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT init=0 WS=2 RS=2 */
+ 0x0cccfccf, /* DRD2B1: *idx3 = EU3(); EU3(*idx3,var15) */
+ 0x991a002c, /* LCD: idx2 = idx2, idx3 = idx4; idx2 once var0; idx2 += inc5, idx3 += inc4 */
+ 0x70000002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT MORE init=0 WS=0 RS=0 */
+ 0x024cfc4d, /* DRD2B1: var9 = EU3(); EU3(*idx1,var13) */
+ 0x60000003, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=3 EXT init=0 WS=0 RS=0 */
+ 0x0cccf247, /* DRD2B1: *idx3 = EU3(); EU3(var9,var7) */
+ 0x80004000, /* LCDEXT: idx2 = 0x00000000; ; */
+ 0xb8c80029, /* LCD: idx3 = *(idx1 + var0000001a); idx3 once var0; idx3 += inc5 */
+ 0x70000002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT MORE init=0 WS=0 RS=0 */
+ 0x088cf8d1, /* DRD2B1: idx2 = EU3(); EU3(idx3,var17) */
+ 0x00002f10, /* DRD1A: var11 = idx2; FN=0 init=0 WS=0 RS=0 */
+ 0x99198432, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var16; idx2 += inc6, idx3 += inc2 */
+ 0x008ac398, /* DRD1A: *idx0 = *idx3; FN=0 init=4 WS=1 RS=1 */
+ 0x80004000, /* LCDEXT: idx2 = 0x00000000; ; */
+ 0x9999802d, /* LCD: idx3 = idx3; idx3 once var0; idx3 += inc5 */
+ 0x70000002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT MORE init=0 WS=0 RS=0 */
+ 0x048cfc53, /* DRD2B1: var18 = EU3(); EU3(*idx1,var19) */
+ 0x60000008, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=8 EXT init=0 WS=0 RS=0 */
+ 0x088cf48b, /* DRD2B1: idx2 = EU3(); EU3(var18,var11) */
+ 0x99198481, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var18; idx2 += inc0, idx3 += inc1 */
+ 0x009ec398, /* DRD1A: *idx0 = *idx3; FN=0 init=4 WS=3 RS=3 */
+ 0x991983b2, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var14; idx2 += inc6, idx3 += inc2 */
+ 0x088ac398, /* DRD1A: *idx0 = *idx3; FN=0 TFD init=4 WS=1 RS=1 */
+ 0x9919002d, /* LCD: idx2 = idx2; idx2 once var0; idx2 += inc5 */
+ 0x60000005, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=5 EXT init=0 WS=0 RS=0 */
+ 0x0c4cf88e, /* DRD2B1: *idx1 = EU3(); EU3(idx2,var14) */
+ 0x000001f8, /* NOP */
+
+ /* VAR[13]-VAR[19] */
+ 0x0c000000,
+ 0x40000000,
+ 0x7fff7fff,
+ 0x00000000,
+ 0x00000003,
+ 0x40000004,
+ 0x43ffffff,
+
+ /* INC[0]-INC[6] */
+ 0x40000000,
+ 0xe0000000,
+ 0xe0000000,
+ 0xa0000008,
+ 0x20000000,
+ 0x00000000,
+ 0x4000ffff,
+};
+
diff --git a/drivers/dma/bestcomm/bcom_gen_bd_rx_task.c b/drivers/dma/bestcomm/bcom_gen_bd_rx_task.c
new file mode 100644
index 0000000..efee022
--- /dev/null
+++ b/drivers/dma/bestcomm/bcom_gen_bd_rx_task.c
@@ -0,0 +1,63 @@
+/*
+ * Bestcomm GenBD RX task microcode
+ *
+ * Copyright (C) 2006 AppSpec Computer Technologies Corp.
+ * Jeff Gibbons <jeff.gibbons@appspec.com>
+ * Copyright (c) 2004 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Based on BestCommAPI-2.2/code_dma/image_rtos1/dma_image.hex
+ * on Tue Mar 4 10:14:12 2006 GMT
+ *
+ */
+
+#include <asm/types.h>
+
+/*
+ * The header consists of the following fields:
+ * u32 magic;
+ * u8 desc_size;
+ * u8 var_size;
+ * u8 inc_size;
+ * u8 first_var;
+ * u8 reserved[8];
+ *
+ * The size fields contain the number of 32-bit words.
+ */
+
+u32 bcom_gen_bd_rx_task[] = {
+ /* header */
+ 0x4243544b,
+ 0x0d020409,
+ 0x00000000,
+ 0x00000000,
+
+ /* Task descriptors */
+ 0x808220da, /* LCD: idx0 = var1, idx1 = var4; idx1 <= var3; idx0 += inc3, idx1 += inc2 */
+ 0x13e01010, /* DRD1A: var4 = var2; FN=0 MORE init=31 WS=0 RS=0 */
+ 0xb880025b, /* LCD: idx2 = *idx1, idx3 = var0; idx2 < var9; idx2 += inc3, idx3 += inc3 */
+ 0x10001308, /* DRD1A: var4 = idx1; FN=0 MORE init=0 WS=0 RS=0 */
+ 0x60140002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT init=0 WS=2 RS=2 */
+ 0x0cccfcca, /* DRD2B1: *idx3 = EU3(); EU3(*idx3,var10) */
+ 0xd9190240, /* LCDEXT: idx2 = idx2; idx2 > var9; idx2 += inc0 */
+ 0xb8c5e009, /* LCD: idx3 = *(idx1 + var00000015); ; idx3 += inc1 */
+ 0x07fecf80, /* DRD1A: *idx3 = *idx0; FN=0 INT init=31 WS=3 RS=3 */
+ 0x99190024, /* LCD: idx2 = idx2; idx2 once var0; idx2 += inc4 */
+ 0x60000005, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=5 EXT init=0 WS=0 RS=0 */
+ 0x0c4cf889, /* DRD2B1: *idx1 = EU3(); EU3(idx2,var9) */
+ 0x000001f8, /* NOP */
+
+ /* VAR[9]-VAR[10] */
+ 0x40000000,
+ 0x7fff7fff,
+
+ /* INC[0]-INC[3] */
+ 0x40000000,
+ 0xe0000000,
+ 0xa0000008,
+ 0x20000000,
+};
+
diff --git a/drivers/dma/bestcomm/bcom_gen_bd_tx_task.c b/drivers/dma/bestcomm/bcom_gen_bd_tx_task.c
new file mode 100644
index 0000000..c605aa4
--- /dev/null
+++ b/drivers/dma/bestcomm/bcom_gen_bd_tx_task.c
@@ -0,0 +1,69 @@
+/*
+ * Bestcomm GenBD TX task microcode
+ *
+ * Copyright (C) 2006 AppSpec Computer Technologies Corp.
+ * Jeff Gibbons <jeff.gibbons@appspec.com>
+ * Copyright (c) 2004 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Based on BestCommAPI-2.2/code_dma/image_rtos1/dma_image.hex
+ * on Tue Mar 4 10:14:12 2006 GMT
+ *
+ */
+
+#include <asm/types.h>
+
+/*
+ * The header consists of the following fields:
+ * u32 magic;
+ * u8 desc_size;
+ * u8 var_size;
+ * u8 inc_size;
+ * u8 first_var;
+ * u8 reserved[8];
+ *
+ * The size fields contain the number of 32-bit words.
+ */
+
+u32 bcom_gen_bd_tx_task[] = {
+ /* header */
+ 0x4243544b,
+ 0x0f040609,
+ 0x00000000,
+ 0x00000000,
+
+ /* Task descriptors */
+ 0x800220e3, /* LCD: idx0 = var0, idx1 = var4; idx1 <= var3; idx0 += inc4, idx1 += inc3 */
+ 0x13e01010, /* DRD1A: var4 = var2; FN=0 MORE init=31 WS=0 RS=0 */
+ 0xb8808264, /* LCD: idx2 = *idx1, idx3 = var1; idx2 < var9; idx2 += inc4, idx3 += inc4 */
+ 0x10001308, /* DRD1A: var4 = idx1; FN=0 MORE init=0 WS=0 RS=0 */
+ 0x60140002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT init=0 WS=2 RS=2 */
+ 0x0cccfcca, /* DRD2B1: *idx3 = EU3(); EU3(*idx3,var10) */
+ 0xd9190300, /* LCDEXT: idx2 = idx2; idx2 > var12; idx2 += inc0 */
+ 0xb8c5e009, /* LCD: idx3 = *(idx1 + var00000015); ; idx3 += inc1 */
+ 0x03fec398, /* DRD1A: *idx0 = *idx3; FN=0 init=31 WS=3 RS=3 */
+ 0x9919826a, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var9; idx2 += inc5, idx3 += inc2 */
+ 0x0feac398, /* DRD1A: *idx0 = *idx3; FN=0 TFD INT init=31 WS=1 RS=1 */
+ 0x99190036, /* LCD: idx2 = idx2; idx2 once var0; idx2 += inc6 */
+ 0x60000005, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=5 EXT init=0 WS=0 RS=0 */
+ 0x0c4cf889, /* DRD2B1: *idx1 = EU3(); EU3(idx2,var9) */
+ 0x000001f8, /* NOP */
+
+ /* VAR[9]-VAR[12] */
+ 0x40000000,
+ 0x7fff7fff,
+ 0x00000000,
+ 0x40000004,
+
+ /* INC[0]-INC[5] */
+ 0x40000000,
+ 0xe0000000,
+ 0xe0000000,
+ 0xa0000008,
+ 0x20000000,
+ 0x4000ffff,
+};
+
diff --git a/drivers/dma/bestcomm/bestcomm.c b/drivers/dma/bestcomm/bestcomm.c
new file mode 100644
index 0000000..a8c2e29
--- /dev/null
+++ b/drivers/dma/bestcomm/bestcomm.c
@@ -0,0 +1,531 @@
+/*
+ * Driver for MPC52xx processor BestComm peripheral controller
+ *
+ *
+ * Copyright (C) 2006-2007 Sylvain Munaut <tnt@246tNt.com>
+ * Copyright (C) 2005 Varma Electronics Oy,
+ * ( by Andrey Volkov <avolkov@varma-el.com> )
+ * Copyright (C) 2003-2004 MontaVista, Software, Inc.
+ * ( by Dale Farnsworth <dfarnsworth@mvista.com> )
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/mpc52xx.h>
+
+#include <linux/fsl/bestcomm/sram.h>
+#include <linux/fsl/bestcomm/bestcomm_priv.h>
+#include "linux/fsl/bestcomm/bestcomm.h"
+
+#define DRIVER_NAME "bestcomm-core"
+
+/* MPC5200 device tree match tables */
+static struct of_device_id mpc52xx_sram_ids[] = {
+ { .compatible = "fsl,mpc5200-sram", },
+ { .compatible = "mpc5200-sram", },
+ {}
+};
+
+
+struct bcom_engine *bcom_eng = NULL;
+EXPORT_SYMBOL_GPL(bcom_eng); /* needed for inline functions */
+
+/* ======================================================================== */
+/* Public and private API */
+/* ======================================================================== */
+
+/* Private API */
+
+struct bcom_task *
+bcom_task_alloc(int bd_count, int bd_size, int priv_size)
+{
+ int i, tasknum = -1;
+ struct bcom_task *tsk;
+
+ /* Don't try to do anything if bestcomm init failed */
+ if (!bcom_eng)
+ return NULL;
+
+ /* Get and reserve a task num */
+ spin_lock(&bcom_eng->lock);
+
+ for (i=0; i<BCOM_MAX_TASKS; i++)
+ if (!bcom_eng->tdt[i].stop) { /* we use stop as a marker */
+ bcom_eng->tdt[i].stop = 0xfffffffful; /* dummy addr */
+ tasknum = i;
+ break;
+ }
+
+ spin_unlock(&bcom_eng->lock);
+
+ if (tasknum < 0)
+ return NULL;
+
+ /* Allocate our structure */
+ tsk = kzalloc(sizeof(struct bcom_task) + priv_size, GFP_KERNEL);
+ if (!tsk)
+ goto error;
+
+ tsk->tasknum = tasknum;
+ if (priv_size)
+ tsk->priv = (void*)tsk + sizeof(struct bcom_task);
+
+ /* Get IRQ of that task */
+ tsk->irq = irq_of_parse_and_map(bcom_eng->ofnode, tsk->tasknum);
+ if (tsk->irq == NO_IRQ)
+ goto error;
+
+ /* Init the BDs, if needed */
+ if (bd_count) {
+ tsk->cookie = kmalloc(sizeof(void*) * bd_count, GFP_KERNEL);
+ if (!tsk->cookie)
+ goto error;
+
+ tsk->bd = bcom_sram_alloc(bd_count * bd_size, 4, &tsk->bd_pa);
+ if (!tsk->bd)
+ goto error;
+ memset(tsk->bd, 0x00, bd_count * bd_size);
+
+ tsk->num_bd = bd_count;
+ tsk->bd_size = bd_size;
+ }
+
+ return tsk;
+
+error:
+ if (tsk) {
+ if (tsk->irq != NO_IRQ)
+ irq_dispose_mapping(tsk->irq);
+ bcom_sram_free(tsk->bd);
+ kfree(tsk->cookie);
+ kfree(tsk);
+ }
+
+ bcom_eng->tdt[tasknum].stop = 0;
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(bcom_task_alloc);
+
+void
+bcom_task_free(struct bcom_task *tsk)
+{
+ /* Stop the task */
+ bcom_disable_task(tsk->tasknum);
+
+ /* Clear TDT */
+ bcom_eng->tdt[tsk->tasknum].start = 0;
+ bcom_eng->tdt[tsk->tasknum].stop = 0;
+
+ /* Free everything */
+ irq_dispose_mapping(tsk->irq);
+ bcom_sram_free(tsk->bd);
+ kfree(tsk->cookie);
+ kfree(tsk);
+}
+EXPORT_SYMBOL_GPL(bcom_task_free);
+
+int
+bcom_load_image(int task, u32 *task_image)
+{
+ struct bcom_task_header *hdr = (struct bcom_task_header *)task_image;
+ struct bcom_tdt *tdt;
+ u32 *desc, *var, *inc;
+ u32 *desc_src, *var_src, *inc_src;
+
+ /* Safety checks */
+ if (hdr->magic != BCOM_TASK_MAGIC) {
+ printk(KERN_ERR DRIVER_NAME
+ ": Trying to load invalid microcode\n");
+ return -EINVAL;
+ }
+
+ if ((task < 0) || (task >= BCOM_MAX_TASKS)) {
+ printk(KERN_ERR DRIVER_NAME
+ ": Trying to load invalid task %d\n", task);
+ return -EINVAL;
+ }
+
+ /* Initial load or reload */
+ tdt = &bcom_eng->tdt[task];
+
+ if (tdt->start) {
+ desc = bcom_task_desc(task);
+ if (hdr->desc_size != bcom_task_num_descs(task)) {
+ printk(KERN_ERR DRIVER_NAME
+ ": Trying to reload wrong task image "
+ "(%d size %d/%d)!\n",
+ task,
+ hdr->desc_size,
+ bcom_task_num_descs(task));
+ return -EINVAL;
+ }
+ } else {
+ phys_addr_t start_pa;
+
+ desc = bcom_sram_alloc(hdr->desc_size * sizeof(u32), 4, &start_pa);
+ if (!desc)
+ return -ENOMEM;
+
+ tdt->start = start_pa;
+ tdt->stop = start_pa + ((hdr->desc_size-1) * sizeof(u32));
+ }
+
+ var = bcom_task_var(task);
+ inc = bcom_task_inc(task);
+
+ /* Clear & copy */
+ memset(var, 0x00, BCOM_VAR_SIZE);
+ memset(inc, 0x00, BCOM_INC_SIZE);
+
+ desc_src = (u32 *)(hdr + 1);
+ var_src = desc_src + hdr->desc_size;
+ inc_src = var_src + hdr->var_size;
+
+ memcpy(desc, desc_src, hdr->desc_size * sizeof(u32));
+ memcpy(var + hdr->first_var, var_src, hdr->var_size * sizeof(u32));
+ memcpy(inc, inc_src, hdr->inc_size * sizeof(u32));
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(bcom_load_image);
+
+void
+bcom_set_initiator(int task, int initiator)
+{
+ int i;
+ int num_descs;
+ u32 *desc;
+ int next_drd_has_initiator;
+
+ bcom_set_tcr_initiator(task, initiator);
+
+ /* Just setting tcr is apparently not enough due to some problem */
+ /* with it. So we just go thru all the microcode and replace in */
+ /* the DRD directly */
+
+ desc = bcom_task_desc(task);
+ next_drd_has_initiator = 1;
+ num_descs = bcom_task_num_descs(task);
+
+ for (i=0; i<num_descs; i++, desc++) {
+ if (!bcom_desc_is_drd(*desc))
+ continue;
+ if (next_drd_has_initiator)
+ if (bcom_desc_initiator(*desc) != BCOM_INITIATOR_ALWAYS)
+ bcom_set_desc_initiator(desc, initiator);
+ next_drd_has_initiator = !bcom_drd_is_extended(*desc);
+ }
+}
+EXPORT_SYMBOL_GPL(bcom_set_initiator);
+
+
+/* Public API */
+
+void
+bcom_enable(struct bcom_task *tsk)
+{
+ bcom_enable_task(tsk->tasknum);
+}
+EXPORT_SYMBOL_GPL(bcom_enable);
+
+void
+bcom_disable(struct bcom_task *tsk)
+{
+ bcom_disable_task(tsk->tasknum);
+}
+EXPORT_SYMBOL_GPL(bcom_disable);
+
+
+/* ======================================================================== */
+/* Engine init/cleanup */
+/* ======================================================================== */
+
+/* Function Descriptor table */
+/* this will need to be updated if Freescale changes their task code FDT */
+static u32 fdt_ops[] = {
+ 0xa0045670, /* FDT[48] - load_acc() */
+ 0x80045670, /* FDT[49] - unload_acc() */
+ 0x21800000, /* FDT[50] - and() */
+ 0x21e00000, /* FDT[51] - or() */
+ 0x21500000, /* FDT[52] - xor() */
+ 0x21400000, /* FDT[53] - andn() */
+ 0x21500000, /* FDT[54] - not() */
+ 0x20400000, /* FDT[55] - add() */
+ 0x20500000, /* FDT[56] - sub() */
+ 0x20800000, /* FDT[57] - lsh() */
+ 0x20a00000, /* FDT[58] - rsh() */
+ 0xc0170000, /* FDT[59] - crc8() */
+ 0xc0145670, /* FDT[60] - crc16() */
+ 0xc0345670, /* FDT[61] - crc32() */
+ 0xa0076540, /* FDT[62] - endian32() */
+ 0xa0000760, /* FDT[63] - endian16() */
+};
+
+
+static int bcom_engine_init(void)
+{
+ int task;
+ phys_addr_t tdt_pa, ctx_pa, var_pa, fdt_pa;
+ unsigned int tdt_size, ctx_size, var_size, fdt_size;
+
+ /* Allocate & clear SRAM zones for FDT, TDTs, contexts and vars/incs */
+ tdt_size = BCOM_MAX_TASKS * sizeof(struct bcom_tdt);
+ ctx_size = BCOM_MAX_TASKS * BCOM_CTX_SIZE;
+ var_size = BCOM_MAX_TASKS * (BCOM_VAR_SIZE + BCOM_INC_SIZE);
+ fdt_size = BCOM_FDT_SIZE;
+
+ bcom_eng->tdt = bcom_sram_alloc(tdt_size, sizeof(u32), &tdt_pa);
+ bcom_eng->ctx = bcom_sram_alloc(ctx_size, BCOM_CTX_ALIGN, &ctx_pa);
+ bcom_eng->var = bcom_sram_alloc(var_size, BCOM_VAR_ALIGN, &var_pa);
+ bcom_eng->fdt = bcom_sram_alloc(fdt_size, BCOM_FDT_ALIGN, &fdt_pa);
+
+ if (!bcom_eng->tdt || !bcom_eng->ctx || !bcom_eng->var || !bcom_eng->fdt) {
+ printk(KERN_ERR "DMA: SRAM alloc failed in engine init !\n");
+
+ bcom_sram_free(bcom_eng->tdt);
+ bcom_sram_free(bcom_eng->ctx);
+ bcom_sram_free(bcom_eng->var);
+ bcom_sram_free(bcom_eng->fdt);
+
+ return -ENOMEM;
+ }
+
+ memset(bcom_eng->tdt, 0x00, tdt_size);
+ memset(bcom_eng->ctx, 0x00, ctx_size);
+ memset(bcom_eng->var, 0x00, var_size);
+ memset(bcom_eng->fdt, 0x00, fdt_size);
+
+ /* Copy the FDT for the EU#3 */
+ memcpy(&bcom_eng->fdt[48], fdt_ops, sizeof(fdt_ops));
+
+ /* Initialize Task base structure */
+ for (task=0; task<BCOM_MAX_TASKS; task++)
+ {
+ out_be16(&bcom_eng->regs->tcr[task], 0);
+ out_8(&bcom_eng->regs->ipr[task], 0);
+
+ bcom_eng->tdt[task].context = ctx_pa;
+ bcom_eng->tdt[task].var = var_pa;
+ bcom_eng->tdt[task].fdt = fdt_pa;
+
+ var_pa += BCOM_VAR_SIZE + BCOM_INC_SIZE;
+ ctx_pa += BCOM_CTX_SIZE;
+ }
+
+ out_be32(&bcom_eng->regs->taskBar, tdt_pa);
+
+ /* Init 'always' initiator */
+ out_8(&bcom_eng->regs->ipr[BCOM_INITIATOR_ALWAYS], BCOM_IPR_ALWAYS);
+
+ /* Disable COMM Bus Prefetch on the original 5200; it's broken */
+ if ((mfspr(SPRN_SVR) & MPC5200_SVR_MASK) == MPC5200_SVR)
+ bcom_disable_prefetch();
+
+ /* Init lock */
+ spin_lock_init(&bcom_eng->lock);
+
+ return 0;
+}
+
+static void
+bcom_engine_cleanup(void)
+{
+ int task;
+
+ /* Stop all tasks */
+ for (task=0; task<BCOM_MAX_TASKS; task++)
+ {
+ out_be16(&bcom_eng->regs->tcr[task], 0);
+ out_8(&bcom_eng->regs->ipr[task], 0);
+ }
+
+ out_be32(&bcom_eng->regs->taskBar, 0ul);
+
+ /* Release the SRAM zones */
+ bcom_sram_free(bcom_eng->tdt);
+ bcom_sram_free(bcom_eng->ctx);
+ bcom_sram_free(bcom_eng->var);
+ bcom_sram_free(bcom_eng->fdt);
+}
+
+
+/* ======================================================================== */
+/* OF platform driver */
+/* ======================================================================== */
+
+static int mpc52xx_bcom_probe(struct platform_device *op)
+{
+ struct device_node *ofn_sram;
+ struct resource res_bcom;
+
+ int rv;
+
+ /* Inform user we're ok so far */
+ printk(KERN_INFO "DMA: MPC52xx BestComm driver\n");
+
+ /* Get the bestcomm node */
+ of_node_get(op->dev.of_node);
+
+ /* Prepare SRAM */
+ ofn_sram = of_find_matching_node(NULL, mpc52xx_sram_ids);
+ if (!ofn_sram) {
+ printk(KERN_ERR DRIVER_NAME ": "
+ "No SRAM found in device tree\n");
+ rv = -ENODEV;
+ goto error_ofput;
+ }
+ rv = bcom_sram_init(ofn_sram, DRIVER_NAME);
+ of_node_put(ofn_sram);
+
+ if (rv) {
+ printk(KERN_ERR DRIVER_NAME ": "
+ "Error in SRAM init\n");
+ goto error_ofput;
+ }
+
+ /* Get a clean struct */
+ bcom_eng = kzalloc(sizeof(struct bcom_engine), GFP_KERNEL);
+ if (!bcom_eng) {
+ printk(KERN_ERR DRIVER_NAME ": "
+ "Can't allocate state structure\n");
+ rv = -ENOMEM;
+ goto error_sramclean;
+ }
+
+ /* Save the node */
+ bcom_eng->ofnode = op->dev.of_node;
+
+ /* Get, reserve & map io */
+ if (of_address_to_resource(op->dev.of_node, 0, &res_bcom)) {
+ printk(KERN_ERR DRIVER_NAME ": "
+ "Can't get resource\n");
+ rv = -EINVAL;
+ goto error_sramclean;
+ }
+
+ if (!request_mem_region(res_bcom.start, resource_size(&res_bcom),
+ DRIVER_NAME)) {
+ printk(KERN_ERR DRIVER_NAME ": "
+ "Can't request registers region\n");
+ rv = -EBUSY;
+ goto error_sramclean;
+ }
+
+ bcom_eng->regs_base = res_bcom.start;
+ bcom_eng->regs = ioremap(res_bcom.start, sizeof(struct mpc52xx_sdma));
+ if (!bcom_eng->regs) {
+ printk(KERN_ERR DRIVER_NAME ": "
+ "Can't map registers\n");
+ rv = -ENOMEM;
+ goto error_release;
+ }
+
+ /* Now, do the real init */
+ rv = bcom_engine_init();
+ if (rv)
+ goto error_unmap;
+
+ /* Done ! */
+ printk(KERN_INFO "DMA: MPC52xx BestComm engine @%08lx ok !\n",
+ (long)bcom_eng->regs_base);
+
+ return 0;
+
+ /* Error path */
+error_unmap:
+ iounmap(bcom_eng->regs);
+error_release:
+ release_mem_region(res_bcom.start, sizeof(struct mpc52xx_sdma));
+error_sramclean:
+ kfree(bcom_eng);
+ bcom_sram_cleanup();
+error_ofput:
+ of_node_put(op->dev.of_node);
+
+ printk(KERN_ERR "DMA: MPC52xx BestComm init failed !\n");
+
+ return rv;
+}
+
+
+static int mpc52xx_bcom_remove(struct platform_device *op)
+{
+ /* Clean up the engine */
+ bcom_engine_cleanup();
+
+ /* Cleanup SRAM */
+ bcom_sram_cleanup();
+
+ /* Release regs */
+ iounmap(bcom_eng->regs);
+ release_mem_region(bcom_eng->regs_base, sizeof(struct mpc52xx_sdma));
+
+ /* Release the node */
+ of_node_put(bcom_eng->ofnode);
+
+ /* Release memory */
+ kfree(bcom_eng);
+ bcom_eng = NULL;
+
+ return 0;
+}
+
+static struct of_device_id mpc52xx_bcom_of_match[] = {
+ { .compatible = "fsl,mpc5200-bestcomm", },
+ { .compatible = "mpc5200-bestcomm", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, mpc52xx_bcom_of_match);
+
+
+static struct platform_driver mpc52xx_bcom_of_platform_driver = {
+ .probe = mpc52xx_bcom_probe,
+ .remove = mpc52xx_bcom_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = mpc52xx_bcom_of_match,
+ },
+};
+
+
+/* ======================================================================== */
+/* Module */
+/* ======================================================================== */
+
+static int __init
+mpc52xx_bcom_init(void)
+{
+ return platform_driver_register(&mpc52xx_bcom_of_platform_driver);
+}
+
+static void __exit
+mpc52xx_bcom_exit(void)
+{
+ platform_driver_unregister(&mpc52xx_bcom_of_platform_driver);
+}
+
+/* If we're not a module, we must make sure everything is setup before */
+/* anyone tries to use us ... that's why we use subsys_initcall instead */
+/* of module_init. */
+subsys_initcall(mpc52xx_bcom_init);
+module_exit(mpc52xx_bcom_exit);
+
+MODULE_DESCRIPTION("Freescale MPC52xx BestComm DMA");
+MODULE_AUTHOR("Sylvain Munaut <tnt@246tNt.com>");
+MODULE_AUTHOR("Andrey Volkov <avolkov@varma-el.com>");
+MODULE_AUTHOR("Dale Farnsworth <dfarnsworth@mvista.com>");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/dma/bestcomm/fec.c b/drivers/dma/bestcomm/fec.c
new file mode 100644
index 0000000..7f1fb1c
--- /dev/null
+++ b/drivers/dma/bestcomm/fec.c
@@ -0,0 +1,270 @@
+/*
+ * Bestcomm FEC tasks driver
+ *
+ *
+ * Copyright (C) 2006-2007 Sylvain Munaut <tnt@246tNt.com>
+ * Copyright (C) 2003-2004 MontaVista, Software, Inc.
+ * ( by Dale Farnsworth <dfarnsworth@mvista.com> )
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <asm/io.h>
+
+#include <linux/fsl/bestcomm/bestcomm.h>
+#include <linux/fsl/bestcomm/bestcomm_priv.h>
+#include <linux/fsl/bestcomm/fec.h>
+
+
+/* ======================================================================== */
+/* Task image/var/inc */
+/* ======================================================================== */
+
+/* fec tasks images */
+extern u32 bcom_fec_rx_task[];
+extern u32 bcom_fec_tx_task[];
+
+/* rx task vars that need to be set before enabling the task */
+struct bcom_fec_rx_var {
+ u32 enable; /* (u16*) address of task's control register */
+ u32 fifo; /* (u32*) address of fec's fifo */
+ u32 bd_base; /* (struct bcom_bd*) beginning of ring buffer */
+ u32 bd_last; /* (struct bcom_bd*) end of ring buffer */
+ u32 bd_start; /* (struct bcom_bd*) current bd */
+ u32 buffer_size; /* size of receive buffer */
+};
+
+/* rx task incs that need to be set before enabling the task */
+struct bcom_fec_rx_inc {
+ u16 pad0;
+ s16 incr_bytes;
+ u16 pad1;
+ s16 incr_dst;
+ u16 pad2;
+ s16 incr_dst_ma;
+};
+
+/* tx task vars that need to be set before enabling the task */
+struct bcom_fec_tx_var {
+ u32 DRD; /* (u32*) address of self-modified DRD */
+ u32 fifo; /* (u32*) address of fec's fifo */
+ u32 enable; /* (u16*) address of task's control register */
+ u32 bd_base; /* (struct bcom_bd*) beginning of ring buffer */
+ u32 bd_last; /* (struct bcom_bd*) end of ring buffer */
+ u32 bd_start; /* (struct bcom_bd*) current bd */
+ u32 buffer_size; /* set by uCode for each packet */
+};
+
+/* tx task incs that need to be set before enabling the task */
+struct bcom_fec_tx_inc {
+ u16 pad0;
+ s16 incr_bytes;
+ u16 pad1;
+ s16 incr_src;
+ u16 pad2;
+ s16 incr_src_ma;
+};
+
+/* private structure in the task */
+struct bcom_fec_priv {
+ phys_addr_t fifo;
+ int maxbufsize;
+};
+
+
+/* ======================================================================== */
+/* Task support code */
+/* ======================================================================== */
+
+struct bcom_task *
+bcom_fec_rx_init(int queue_len, phys_addr_t fifo, int maxbufsize)
+{
+ struct bcom_task *tsk;
+ struct bcom_fec_priv *priv;
+
+ tsk = bcom_task_alloc(queue_len, sizeof(struct bcom_fec_bd),
+ sizeof(struct bcom_fec_priv));
+ if (!tsk)
+ return NULL;
+
+ tsk->flags = BCOM_FLAGS_NONE;
+
+ priv = tsk->priv;
+ priv->fifo = fifo;
+ priv->maxbufsize = maxbufsize;
+
+ if (bcom_fec_rx_reset(tsk)) {
+ bcom_task_free(tsk);
+ return NULL;
+ }
+
+ return tsk;
+}
+EXPORT_SYMBOL_GPL(bcom_fec_rx_init);
+
+int
+bcom_fec_rx_reset(struct bcom_task *tsk)
+{
+ struct bcom_fec_priv *priv = tsk->priv;
+ struct bcom_fec_rx_var *var;
+ struct bcom_fec_rx_inc *inc;
+
+ /* Shutdown the task */
+ bcom_disable_task(tsk->tasknum);
+
+ /* Reset the microcode */
+ var = (struct bcom_fec_rx_var *) bcom_task_var(tsk->tasknum);
+ inc = (struct bcom_fec_rx_inc *) bcom_task_inc(tsk->tasknum);
+
+ if (bcom_load_image(tsk->tasknum, bcom_fec_rx_task))
+ return -1;
+
+ var->enable = bcom_eng->regs_base +
+ offsetof(struct mpc52xx_sdma, tcr[tsk->tasknum]);
+ var->fifo = (u32) priv->fifo;
+ var->bd_base = tsk->bd_pa;
+ var->bd_last = tsk->bd_pa + ((tsk->num_bd-1) * tsk->bd_size);
+ var->bd_start = tsk->bd_pa;
+ var->buffer_size = priv->maxbufsize;
+
+ inc->incr_bytes = -(s16)sizeof(u32); /* These should be in the */
+ inc->incr_dst = sizeof(u32); /* task image, but we stick */
+ inc->incr_dst_ma= sizeof(u8); /* to the official ones */
+
+ /* Reset the BDs */
+ tsk->index = 0;
+ tsk->outdex = 0;
+
+ memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size);
+
+ /* Configure some stuff */
+ bcom_set_task_pragma(tsk->tasknum, BCOM_FEC_RX_BD_PRAGMA);
+ bcom_set_task_auto_start(tsk->tasknum, tsk->tasknum);
+
+ out_8(&bcom_eng->regs->ipr[BCOM_INITIATOR_FEC_RX], BCOM_IPR_FEC_RX);
+
+ out_be32(&bcom_eng->regs->IntPend, 1<<tsk->tasknum); /* Clear ints */
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(bcom_fec_rx_reset);
+
+void
+bcom_fec_rx_release(struct bcom_task *tsk)
+{
+ /* Nothing special for the FEC tasks */
+ bcom_task_free(tsk);
+}
+EXPORT_SYMBOL_GPL(bcom_fec_rx_release);
+
+
+
+ /* Return 2nd to last DRD */
+ /* This is an ugly hack, but at least it's only done
+ once at initialization */
+static u32 *self_modified_drd(int tasknum)
+{
+ u32 *desc;
+ int num_descs;
+ int drd_count;
+ int i;
+
+ num_descs = bcom_task_num_descs(tasknum);
+ desc = bcom_task_desc(tasknum) + num_descs - 1;
+ drd_count = 0;
+ for (i=0; i<num_descs; i++, desc--)
+ if (bcom_desc_is_drd(*desc) && ++drd_count == 3)
+ break;
+ return desc;
+}
+
+struct bcom_task *
+bcom_fec_tx_init(int queue_len, phys_addr_t fifo)
+{
+ struct bcom_task *tsk;
+ struct bcom_fec_priv *priv;
+
+ tsk = bcom_task_alloc(queue_len, sizeof(struct bcom_fec_bd),
+ sizeof(struct bcom_fec_priv));
+ if (!tsk)
+ return NULL;
+
+ tsk->flags = BCOM_FLAGS_ENABLE_TASK;
+
+ priv = tsk->priv;
+ priv->fifo = fifo;
+
+ if (bcom_fec_tx_reset(tsk)) {
+ bcom_task_free(tsk);
+ return NULL;
+ }
+
+ return tsk;
+}
+EXPORT_SYMBOL_GPL(bcom_fec_tx_init);
+
+int
+bcom_fec_tx_reset(struct bcom_task *tsk)
+{
+ struct bcom_fec_priv *priv = tsk->priv;
+ struct bcom_fec_tx_var *var;
+ struct bcom_fec_tx_inc *inc;
+
+ /* Shutdown the task */
+ bcom_disable_task(tsk->tasknum);
+
+ /* Reset the microcode */
+ var = (struct bcom_fec_tx_var *) bcom_task_var(tsk->tasknum);
+ inc = (struct bcom_fec_tx_inc *) bcom_task_inc(tsk->tasknum);
+
+ if (bcom_load_image(tsk->tasknum, bcom_fec_tx_task))
+ return -1;
+
+ var->enable = bcom_eng->regs_base +
+ offsetof(struct mpc52xx_sdma, tcr[tsk->tasknum]);
+ var->fifo = (u32) priv->fifo;
+ var->DRD = bcom_sram_va2pa(self_modified_drd(tsk->tasknum));
+ var->bd_base = tsk->bd_pa;
+ var->bd_last = tsk->bd_pa + ((tsk->num_bd-1) * tsk->bd_size);
+ var->bd_start = tsk->bd_pa;
+
+ inc->incr_bytes = -(s16)sizeof(u32); /* These should be in the */
+ inc->incr_src = sizeof(u32); /* task image, but we stick */
+ inc->incr_src_ma= sizeof(u8); /* to the official ones */
+
+ /* Reset the BDs */
+ tsk->index = 0;
+ tsk->outdex = 0;
+
+ memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size);
+
+ /* Configure some stuff */
+ bcom_set_task_pragma(tsk->tasknum, BCOM_FEC_TX_BD_PRAGMA);
+ bcom_set_task_auto_start(tsk->tasknum, tsk->tasknum);
+
+ out_8(&bcom_eng->regs->ipr[BCOM_INITIATOR_FEC_TX], BCOM_IPR_FEC_TX);
+
+ out_be32(&bcom_eng->regs->IntPend, 1<<tsk->tasknum); /* Clear ints */
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(bcom_fec_tx_reset);
+
+void
+bcom_fec_tx_release(struct bcom_task *tsk)
+{
+ /* Nothing special for the FEC tasks */
+ bcom_task_free(tsk);
+}
+EXPORT_SYMBOL_GPL(bcom_fec_tx_release);
+
+
+MODULE_DESCRIPTION("BestComm FEC tasks driver");
+MODULE_AUTHOR("Dale Farnsworth <dfarnsworth@mvista.com>");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/dma/bestcomm/gen_bd.c b/drivers/dma/bestcomm/gen_bd.c
new file mode 100644
index 0000000..1a5b22d
--- /dev/null
+++ b/drivers/dma/bestcomm/gen_bd.c
@@ -0,0 +1,354 @@
+/*
+ * Driver for MPC52xx processor BestComm General Buffer Descriptor
+ *
+ * Copyright (C) 2007 Sylvain Munaut <tnt@246tNt.com>
+ * Copyright (C) 2006 AppSpec Computer Technologies Corp.
+ * Jeff Gibbons <jeff.gibbons@appspec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <asm/errno.h>
+#include <asm/io.h>
+
+#include <asm/mpc52xx.h>
+#include <asm/mpc52xx_psc.h>
+
+#include <linux/fsl/bestcomm/bestcomm.h>
+#include <linux/fsl/bestcomm/bestcomm_priv.h>
+#include <linux/fsl/bestcomm/gen_bd.h>
+
+
+/* ======================================================================== */
+/* Task image/var/inc */
+/* ======================================================================== */
+
+/* gen_bd tasks images */
+extern u32 bcom_gen_bd_rx_task[];
+extern u32 bcom_gen_bd_tx_task[];
+
+/* rx task vars that need to be set before enabling the task */
+struct bcom_gen_bd_rx_var {
+ u32 enable; /* (u16*) address of task's control register */
+ u32 fifo; /* (u32*) address of gen_bd's fifo */
+ u32 bd_base; /* (struct bcom_bd*) beginning of ring buffer */
+ u32 bd_last; /* (struct bcom_bd*) end of ring buffer */
+ u32 bd_start; /* (struct bcom_bd*) current bd */
+ u32 buffer_size; /* size of receive buffer */
+};
+
+/* rx task incs that need to be set before enabling the task */
+struct bcom_gen_bd_rx_inc {
+ u16 pad0;
+ s16 incr_bytes;
+ u16 pad1;
+ s16 incr_dst;
+};
+
+/* tx task vars that need to be set before enabling the task */
+struct bcom_gen_bd_tx_var {
+ u32 fifo; /* (u32*) address of gen_bd's fifo */
+ u32 enable; /* (u16*) address of task's control register */
+ u32 bd_base; /* (struct bcom_bd*) beginning of ring buffer */
+ u32 bd_last; /* (struct bcom_bd*) end of ring buffer */
+ u32 bd_start; /* (struct bcom_bd*) current bd */
+ u32 buffer_size; /* set by uCode for each packet */
+};
+
+/* tx task incs that need to be set before enabling the task */
+struct bcom_gen_bd_tx_inc {
+ u16 pad0;
+ s16 incr_bytes;
+ u16 pad1;
+ s16 incr_src;
+ u16 pad2;
+ s16 incr_src_ma;
+};
+
+/* private structure */
+struct bcom_gen_bd_priv {
+ phys_addr_t fifo;
+ int initiator;
+ int ipr;
+ int maxbufsize;
+};
+
+
+/* ======================================================================== */
+/* Task support code */
+/* ======================================================================== */
+
+struct bcom_task *
+bcom_gen_bd_rx_init(int queue_len, phys_addr_t fifo,
+ int initiator, int ipr, int maxbufsize)
+{
+ struct bcom_task *tsk;
+ struct bcom_gen_bd_priv *priv;
+
+ tsk = bcom_task_alloc(queue_len, sizeof(struct bcom_gen_bd),
+ sizeof(struct bcom_gen_bd_priv));
+ if (!tsk)
+ return NULL;
+
+ tsk->flags = BCOM_FLAGS_NONE;
+
+ priv = tsk->priv;
+ priv->fifo = fifo;
+ priv->initiator = initiator;
+ priv->ipr = ipr;
+ priv->maxbufsize = maxbufsize;
+
+ if (bcom_gen_bd_rx_reset(tsk)) {
+ bcom_task_free(tsk);
+ return NULL;
+ }
+
+ return tsk;
+}
+EXPORT_SYMBOL_GPL(bcom_gen_bd_rx_init);
+
+int
+bcom_gen_bd_rx_reset(struct bcom_task *tsk)
+{
+ struct bcom_gen_bd_priv *priv = tsk->priv;
+ struct bcom_gen_bd_rx_var *var;
+ struct bcom_gen_bd_rx_inc *inc;
+
+ /* Shutdown the task */
+ bcom_disable_task(tsk->tasknum);
+
+ /* Reset the microcode */
+ var = (struct bcom_gen_bd_rx_var *) bcom_task_var(tsk->tasknum);
+ inc = (struct bcom_gen_bd_rx_inc *) bcom_task_inc(tsk->tasknum);
+
+ if (bcom_load_image(tsk->tasknum, bcom_gen_bd_rx_task))
+ return -1;
+
+ var->enable = bcom_eng->regs_base +
+ offsetof(struct mpc52xx_sdma, tcr[tsk->tasknum]);
+ var->fifo = (u32) priv->fifo;
+ var->bd_base = tsk->bd_pa;
+ var->bd_last = tsk->bd_pa + ((tsk->num_bd-1) * tsk->bd_size);
+ var->bd_start = tsk->bd_pa;
+ var->buffer_size = priv->maxbufsize;
+
+ inc->incr_bytes = -(s16)sizeof(u32);
+ inc->incr_dst = sizeof(u32);
+
+ /* Reset the BDs */
+ tsk->index = 0;
+ tsk->outdex = 0;
+
+ memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size);
+
+ /* Configure some stuff */
+ bcom_set_task_pragma(tsk->tasknum, BCOM_GEN_RX_BD_PRAGMA);
+ bcom_set_task_auto_start(tsk->tasknum, tsk->tasknum);
+
+ out_8(&bcom_eng->regs->ipr[priv->initiator], priv->ipr);
+ bcom_set_initiator(tsk->tasknum, priv->initiator);
+
+ out_be32(&bcom_eng->regs->IntPend, 1<<tsk->tasknum); /* Clear ints */
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(bcom_gen_bd_rx_reset);
+
+void
+bcom_gen_bd_rx_release(struct bcom_task *tsk)
+{
+ /* Nothing special for the GenBD tasks */
+ bcom_task_free(tsk);
+}
+EXPORT_SYMBOL_GPL(bcom_gen_bd_rx_release);
+
+
+extern struct bcom_task *
+bcom_gen_bd_tx_init(int queue_len, phys_addr_t fifo,
+ int initiator, int ipr)
+{
+ struct bcom_task *tsk;
+ struct bcom_gen_bd_priv *priv;
+
+ tsk = bcom_task_alloc(queue_len, sizeof(struct bcom_gen_bd),
+ sizeof(struct bcom_gen_bd_priv));
+ if (!tsk)
+ return NULL;
+
+ tsk->flags = BCOM_FLAGS_NONE;
+
+ priv = tsk->priv;
+ priv->fifo = fifo;
+ priv->initiator = initiator;
+ priv->ipr = ipr;
+
+ if (bcom_gen_bd_tx_reset(tsk)) {
+ bcom_task_free(tsk);
+ return NULL;
+ }
+
+ return tsk;
+}
+EXPORT_SYMBOL_GPL(bcom_gen_bd_tx_init);
+
+int
+bcom_gen_bd_tx_reset(struct bcom_task *tsk)
+{
+ struct bcom_gen_bd_priv *priv = tsk->priv;
+ struct bcom_gen_bd_tx_var *var;
+ struct bcom_gen_bd_tx_inc *inc;
+
+ /* Shutdown the task */
+ bcom_disable_task(tsk->tasknum);
+
+ /* Reset the microcode */
+ var = (struct bcom_gen_bd_tx_var *) bcom_task_var(tsk->tasknum);
+ inc = (struct bcom_gen_bd_tx_inc *) bcom_task_inc(tsk->tasknum);
+
+ if (bcom_load_image(tsk->tasknum, bcom_gen_bd_tx_task))
+ return -1;
+
+ var->enable = bcom_eng->regs_base +
+ offsetof(struct mpc52xx_sdma, tcr[tsk->tasknum]);
+ var->fifo = (u32) priv->fifo;
+ var->bd_base = tsk->bd_pa;
+ var->bd_last = tsk->bd_pa + ((tsk->num_bd-1) * tsk->bd_size);
+ var->bd_start = tsk->bd_pa;
+
+ inc->incr_bytes = -(s16)sizeof(u32);
+ inc->incr_src = sizeof(u32);
+ inc->incr_src_ma = sizeof(u8);
+
+ /* Reset the BDs */
+ tsk->index = 0;
+ tsk->outdex = 0;
+
+ memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size);
+
+ /* Configure some stuff */
+ bcom_set_task_pragma(tsk->tasknum, BCOM_GEN_TX_BD_PRAGMA);
+ bcom_set_task_auto_start(tsk->tasknum, tsk->tasknum);
+
+ out_8(&bcom_eng->regs->ipr[priv->initiator], priv->ipr);
+ bcom_set_initiator(tsk->tasknum, priv->initiator);
+
+ out_be32(&bcom_eng->regs->IntPend, 1<<tsk->tasknum); /* Clear ints */
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(bcom_gen_bd_tx_reset);
+
+void
+bcom_gen_bd_tx_release(struct bcom_task *tsk)
+{
+ /* Nothing special for the GenBD tasks */
+ bcom_task_free(tsk);
+}
+EXPORT_SYMBOL_GPL(bcom_gen_bd_tx_release);
+
+/* ---------------------------------------------------------------------
+ * PSC support code
+ */
+
+/**
+ * bcom_psc_parameters - Bestcomm initialization value table for PSC devices
+ *
+ * This structure is only used internally. It is a lookup table for PSC
+ * specific parameters to bestcomm tasks.
+ */
+static struct bcom_psc_params {
+ int rx_initiator;
+ int rx_ipr;
+ int tx_initiator;
+ int tx_ipr;
+} bcom_psc_params[] = {
+ [0] = {
+ .rx_initiator = BCOM_INITIATOR_PSC1_RX,
+ .rx_ipr = BCOM_IPR_PSC1_RX,
+ .tx_initiator = BCOM_INITIATOR_PSC1_TX,
+ .tx_ipr = BCOM_IPR_PSC1_TX,
+ },
+ [1] = {
+ .rx_initiator = BCOM_INITIATOR_PSC2_RX,
+ .rx_ipr = BCOM_IPR_PSC2_RX,
+ .tx_initiator = BCOM_INITIATOR_PSC2_TX,
+ .tx_ipr = BCOM_IPR_PSC2_TX,
+ },
+ [2] = {
+ .rx_initiator = BCOM_INITIATOR_PSC3_RX,
+ .rx_ipr = BCOM_IPR_PSC3_RX,
+ .tx_initiator = BCOM_INITIATOR_PSC3_TX,
+ .tx_ipr = BCOM_IPR_PSC3_TX,
+ },
+ [3] = {
+ .rx_initiator = BCOM_INITIATOR_PSC4_RX,
+ .rx_ipr = BCOM_IPR_PSC4_RX,
+ .tx_initiator = BCOM_INITIATOR_PSC4_TX,
+ .tx_ipr = BCOM_IPR_PSC4_TX,
+ },
+ [4] = {
+ .rx_initiator = BCOM_INITIATOR_PSC5_RX,
+ .rx_ipr = BCOM_IPR_PSC5_RX,
+ .tx_initiator = BCOM_INITIATOR_PSC5_TX,
+ .tx_ipr = BCOM_IPR_PSC5_TX,
+ },
+ [5] = {
+ .rx_initiator = BCOM_INITIATOR_PSC6_RX,
+ .rx_ipr = BCOM_IPR_PSC6_RX,
+ .tx_initiator = BCOM_INITIATOR_PSC6_TX,
+ .tx_ipr = BCOM_IPR_PSC6_TX,
+ },
+};
+
+/**
+ * bcom_psc_gen_bd_rx_init - Allocate a receive bcom_task for a PSC port
+ * @psc_num: Number of the PSC to allocate a task for
+ * @queue_len: number of buffer descriptors to allocate for the task
+ * @fifo: physical address of FIFO register
+ * @maxbufsize: Maximum receive data size in bytes.
+ *
+ * Allocate a bestcomm task structure for receiving data from a PSC.
+ */
+struct bcom_task * bcom_psc_gen_bd_rx_init(unsigned psc_num, int queue_len,
+ phys_addr_t fifo, int maxbufsize)
+{
+ if (psc_num >= MPC52xx_PSC_MAXNUM)
+ return NULL;
+
+ return bcom_gen_bd_rx_init(queue_len, fifo,
+ bcom_psc_params[psc_num].rx_initiator,
+ bcom_psc_params[psc_num].rx_ipr,
+ maxbufsize);
+}
+EXPORT_SYMBOL_GPL(bcom_psc_gen_bd_rx_init);
+
+/**
+ * bcom_psc_gen_bd_tx_init - Allocate a transmit bcom_task for a PSC port
+ * @psc_num: Number of the PSC to allocate a task for
+ * @queue_len: number of buffer descriptors to allocate for the task
+ * @fifo: physical address of FIFO register
+ *
+ * Allocate a bestcomm task structure for transmitting data to a PSC.
+ */
+struct bcom_task *
+bcom_psc_gen_bd_tx_init(unsigned psc_num, int queue_len, phys_addr_t fifo)
+{
+ struct psc;
+ return bcom_gen_bd_tx_init(queue_len, fifo,
+ bcom_psc_params[psc_num].tx_initiator,
+ bcom_psc_params[psc_num].tx_ipr);
+}
+EXPORT_SYMBOL_GPL(bcom_psc_gen_bd_tx_init);
+
+
+MODULE_DESCRIPTION("BestComm General Buffer Descriptor tasks driver");
+MODULE_AUTHOR("Jeff Gibbons <jeff.gibbons@appspec.com>");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/dma/bestcomm/sram.c b/drivers/dma/bestcomm/sram.c
new file mode 100644
index 0000000..5e2ed30
--- /dev/null
+++ b/drivers/dma/bestcomm/sram.c
@@ -0,0 +1,178 @@
+/*
+ * Simple memory allocator for on-board SRAM
+ *
+ *
+ * Maintainer : Sylvain Munaut <tnt@246tNt.com>
+ *
+ * Copyright (C) 2005 Sylvain Munaut <tnt@246tNt.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/of.h>
+
+#include <asm/io.h>
+#include <asm/mmu.h>
+
+#include <linux/fsl/bestcomm/sram.h>
+
+
+/* Struct keeping our 'state' */
+struct bcom_sram *bcom_sram = NULL;
+EXPORT_SYMBOL_GPL(bcom_sram); /* needed for inline functions */
+
+
+/* ======================================================================== */
+/* Public API */
+/* ======================================================================== */
+/* DO NOT USE in interrupts, if needed in irq handler, we should use the
+ _irqsave version of the spin_locks */
+
+int bcom_sram_init(struct device_node *sram_node, char *owner)
+{
+ int rv;
+ const u32 *regaddr_p;
+ u64 regaddr64, size64;
+ unsigned int psize;
+
+ /* Create our state struct */
+ if (bcom_sram) {
+ printk(KERN_ERR "%s: bcom_sram_init: "
+ "Already initialized !\n", owner);
+ return -EBUSY;
+ }
+
+ bcom_sram = kmalloc(sizeof(struct bcom_sram), GFP_KERNEL);
+ if (!bcom_sram) {
+ printk(KERN_ERR "%s: bcom_sram_init: "
+ "Couldn't allocate internal state !\n", owner);
+ return -ENOMEM;
+ }
+
+ /* Get address and size of the sram */
+ regaddr_p = of_get_address(sram_node, 0, &size64, NULL);
+ if (!regaddr_p) {
+ printk(KERN_ERR "%s: bcom_sram_init: "
+ "Invalid device node !\n", owner);
+ rv = -EINVAL;
+ goto error_free;
+ }
+
+ regaddr64 = of_translate_address(sram_node, regaddr_p);
+
+ bcom_sram->base_phys = (phys_addr_t) regaddr64;
+ bcom_sram->size = (unsigned int) size64;
+
+ /* Request region */
+ if (!request_mem_region(bcom_sram->base_phys, bcom_sram->size, owner)) {
+ printk(KERN_ERR "%s: bcom_sram_init: "
+ "Couldn't request region !\n", owner);
+ rv = -EBUSY;
+ goto error_free;
+ }
+
+ /* Map SRAM */
+ /* sram is not really __iomem */
+ bcom_sram->base_virt = (void*) ioremap(bcom_sram->base_phys, bcom_sram->size);
+
+ if (!bcom_sram->base_virt) {
+ printk(KERN_ERR "%s: bcom_sram_init: "
+ "Map error SRAM zone 0x%08lx (0x%0x)!\n",
+ owner, (long)bcom_sram->base_phys, bcom_sram->size );
+ rv = -ENOMEM;
+ goto error_release;
+ }
+
+ /* Create an rheap (defaults to 32 bits word alignment) */
+ bcom_sram->rh = rh_create(4);
+
+ /* Attach the free zones */
+#if 0
+ /* Currently disabled ... for future use only */
+ reg_addr_p = of_get_property(sram_node, "available", &psize);
+#else
+ regaddr_p = NULL;
+ psize = 0;
+#endif
+
+ if (!regaddr_p || !psize) {
+ /* Attach the whole zone */
+ rh_attach_region(bcom_sram->rh, 0, bcom_sram->size);
+ } else {
+ /* Attach each zone independently */
+ while (psize >= 2 * sizeof(u32)) {
+ phys_addr_t zbase = of_translate_address(sram_node, regaddr_p);
+ rh_attach_region(bcom_sram->rh, zbase - bcom_sram->base_phys, regaddr_p[1]);
+ regaddr_p += 2;
+ psize -= 2 * sizeof(u32);
+ }
+ }
+
+ /* Init our spinlock */
+ spin_lock_init(&bcom_sram->lock);
+
+ return 0;
+
+error_release:
+ release_mem_region(bcom_sram->base_phys, bcom_sram->size);
+error_free:
+ kfree(bcom_sram);
+ bcom_sram = NULL;
+
+ return rv;
+}
+EXPORT_SYMBOL_GPL(bcom_sram_init);
+
+void bcom_sram_cleanup(void)
+{
+ /* Free resources */
+ if (bcom_sram) {
+ rh_destroy(bcom_sram->rh);
+ iounmap((void __iomem *)bcom_sram->base_virt);
+ release_mem_region(bcom_sram->base_phys, bcom_sram->size);
+ kfree(bcom_sram);
+ bcom_sram = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(bcom_sram_cleanup);
+
+void* bcom_sram_alloc(int size, int align, phys_addr_t *phys)
+{
+ unsigned long offset;
+
+ spin_lock(&bcom_sram->lock);
+ offset = rh_alloc_align(bcom_sram->rh, size, align, NULL);
+ spin_unlock(&bcom_sram->lock);
+
+ if (IS_ERR_VALUE(offset))
+ return NULL;
+
+ *phys = bcom_sram->base_phys + offset;
+ return bcom_sram->base_virt + offset;
+}
+EXPORT_SYMBOL_GPL(bcom_sram_alloc);
+
+void bcom_sram_free(void *ptr)
+{
+ unsigned long offset;
+
+ if (!ptr)
+ return;
+
+ offset = ptr - bcom_sram->base_virt;
+
+ spin_lock(&bcom_sram->lock);
+ rh_free(bcom_sram->rh, offset);
+ spin_unlock(&bcom_sram->lock);
+}
+EXPORT_SYMBOL_GPL(bcom_sram_free);
+
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index aa384e5..797940e 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -21,11 +21,1241 @@
#include <linux/io.h>
#include <linux/uaccess.h>
#include <linux/debugfs.h>
-#include <mach/coh901318.h>
+#include <linux/platform_data/dma-coh901318.h>
-#include "coh901318_lli.h"
+#include "coh901318.h"
#include "dmaengine.h"
+#define COH901318_MOD32_MASK (0x1F)
+#define COH901318_WORD_MASK (0xFFFFFFFF)
+/* INT_STATUS - Interrupt Status Registers 32bit (R/-) */
+#define COH901318_INT_STATUS1 (0x0000)
+#define COH901318_INT_STATUS2 (0x0004)
+/* TC_INT_STATUS - Terminal Count Interrupt Status Registers 32bit (R/-) */
+#define COH901318_TC_INT_STATUS1 (0x0008)
+#define COH901318_TC_INT_STATUS2 (0x000C)
+/* TC_INT_CLEAR - Terminal Count Interrupt Clear Registers 32bit (-/W) */
+#define COH901318_TC_INT_CLEAR1 (0x0010)
+#define COH901318_TC_INT_CLEAR2 (0x0014)
+/* RAW_TC_INT_STATUS - Raw Term Count Interrupt Status Registers 32bit (R/-) */
+#define COH901318_RAW_TC_INT_STATUS1 (0x0018)
+#define COH901318_RAW_TC_INT_STATUS2 (0x001C)
+/* BE_INT_STATUS - Bus Error Interrupt Status Registers 32bit (R/-) */
+#define COH901318_BE_INT_STATUS1 (0x0020)
+#define COH901318_BE_INT_STATUS2 (0x0024)
+/* BE_INT_CLEAR - Bus Error Interrupt Clear Registers 32bit (-/W) */
+#define COH901318_BE_INT_CLEAR1 (0x0028)
+#define COH901318_BE_INT_CLEAR2 (0x002C)
+/* RAW_BE_INT_STATUS - Raw Term Count Interrupt Status Registers 32bit (R/-) */
+#define COH901318_RAW_BE_INT_STATUS1 (0x0030)
+#define COH901318_RAW_BE_INT_STATUS2 (0x0034)
+
+/*
+ * CX_CFG - Channel Configuration Registers 32bit (R/W)
+ */
+#define COH901318_CX_CFG (0x0100)
+#define COH901318_CX_CFG_SPACING (0x04)
+/* Channel enable activates tha dma job */
+#define COH901318_CX_CFG_CH_ENABLE (0x00000001)
+#define COH901318_CX_CFG_CH_DISABLE (0x00000000)
+/* Request Mode */
+#define COH901318_CX_CFG_RM_MASK (0x00000006)
+#define COH901318_CX_CFG_RM_MEMORY_TO_MEMORY (0x0 << 1)
+#define COH901318_CX_CFG_RM_PRIMARY_TO_MEMORY (0x1 << 1)
+#define COH901318_CX_CFG_RM_MEMORY_TO_PRIMARY (0x1 << 1)
+#define COH901318_CX_CFG_RM_PRIMARY_TO_SECONDARY (0x3 << 1)
+#define COH901318_CX_CFG_RM_SECONDARY_TO_PRIMARY (0x3 << 1)
+/* Linked channel request field. RM must == 11 */
+#define COH901318_CX_CFG_LCRF_SHIFT 3
+#define COH901318_CX_CFG_LCRF_MASK (0x000001F8)
+#define COH901318_CX_CFG_LCR_DISABLE (0x00000000)
+/* Terminal Counter Interrupt Request Mask */
+#define COH901318_CX_CFG_TC_IRQ_ENABLE (0x00000200)
+#define COH901318_CX_CFG_TC_IRQ_DISABLE (0x00000000)
+/* Bus Error interrupt Mask */
+#define COH901318_CX_CFG_BE_IRQ_ENABLE (0x00000400)
+#define COH901318_CX_CFG_BE_IRQ_DISABLE (0x00000000)
+
+/*
+ * CX_STAT - Channel Status Registers 32bit (R/-)
+ */
+#define COH901318_CX_STAT (0x0200)
+#define COH901318_CX_STAT_SPACING (0x04)
+#define COH901318_CX_STAT_RBE_IRQ_IND (0x00000008)
+#define COH901318_CX_STAT_RTC_IRQ_IND (0x00000004)
+#define COH901318_CX_STAT_ACTIVE (0x00000002)
+#define COH901318_CX_STAT_ENABLED (0x00000001)
+
+/*
+ * CX_CTRL - Channel Control Registers 32bit (R/W)
+ */
+#define COH901318_CX_CTRL (0x0400)
+#define COH901318_CX_CTRL_SPACING (0x10)
+/* Transfer Count Enable */
+#define COH901318_CX_CTRL_TC_ENABLE (0x00001000)
+#define COH901318_CX_CTRL_TC_DISABLE (0x00000000)
+/* Transfer Count Value 0 - 4095 */
+#define COH901318_CX_CTRL_TC_VALUE_MASK (0x00000FFF)
+/* Burst count */
+#define COH901318_CX_CTRL_BURST_COUNT_MASK (0x0000E000)
+#define COH901318_CX_CTRL_BURST_COUNT_64_BYTES (0x7 << 13)
+#define COH901318_CX_CTRL_BURST_COUNT_48_BYTES (0x6 << 13)
+#define COH901318_CX_CTRL_BURST_COUNT_32_BYTES (0x5 << 13)
+#define COH901318_CX_CTRL_BURST_COUNT_16_BYTES (0x4 << 13)
+#define COH901318_CX_CTRL_BURST_COUNT_8_BYTES (0x3 << 13)
+#define COH901318_CX_CTRL_BURST_COUNT_4_BYTES (0x2 << 13)
+#define COH901318_CX_CTRL_BURST_COUNT_2_BYTES (0x1 << 13)
+#define COH901318_CX_CTRL_BURST_COUNT_1_BYTE (0x0 << 13)
+/* Source bus size */
+#define COH901318_CX_CTRL_SRC_BUS_SIZE_MASK (0x00030000)
+#define COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS (0x2 << 16)
+#define COH901318_CX_CTRL_SRC_BUS_SIZE_16_BITS (0x1 << 16)
+#define COH901318_CX_CTRL_SRC_BUS_SIZE_8_BITS (0x0 << 16)
+/* Source address increment */
+#define COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE (0x00040000)
+#define COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE (0x00000000)
+/* Destination Bus Size */
+#define COH901318_CX_CTRL_DST_BUS_SIZE_MASK (0x00180000)
+#define COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS (0x2 << 19)
+#define COH901318_CX_CTRL_DST_BUS_SIZE_16_BITS (0x1 << 19)
+#define COH901318_CX_CTRL_DST_BUS_SIZE_8_BITS (0x0 << 19)
+/* Destination address increment */
+#define COH901318_CX_CTRL_DST_ADDR_INC_ENABLE (0x00200000)
+#define COH901318_CX_CTRL_DST_ADDR_INC_DISABLE (0x00000000)
+/* Master Mode (Master2 is only connected to MSL) */
+#define COH901318_CX_CTRL_MASTER_MODE_MASK (0x00C00000)
+#define COH901318_CX_CTRL_MASTER_MODE_M2R_M1W (0x3 << 22)
+#define COH901318_CX_CTRL_MASTER_MODE_M1R_M2W (0x2 << 22)
+#define COH901318_CX_CTRL_MASTER_MODE_M2RW (0x1 << 22)
+#define COH901318_CX_CTRL_MASTER_MODE_M1RW (0x0 << 22)
+/* Terminal Count flag to PER enable */
+#define COH901318_CX_CTRL_TCP_ENABLE (0x01000000)
+#define COH901318_CX_CTRL_TCP_DISABLE (0x00000000)
+/* Terminal Count flags to CPU enable */
+#define COH901318_CX_CTRL_TC_IRQ_ENABLE (0x02000000)
+#define COH901318_CX_CTRL_TC_IRQ_DISABLE (0x00000000)
+/* Hand shake to peripheral */
+#define COH901318_CX_CTRL_HSP_ENABLE (0x04000000)
+#define COH901318_CX_CTRL_HSP_DISABLE (0x00000000)
+#define COH901318_CX_CTRL_HSS_ENABLE (0x08000000)
+#define COH901318_CX_CTRL_HSS_DISABLE (0x00000000)
+/* DMA mode */
+#define COH901318_CX_CTRL_DDMA_MASK (0x30000000)
+#define COH901318_CX_CTRL_DDMA_LEGACY (0x0 << 28)
+#define COH901318_CX_CTRL_DDMA_DEMAND_DMA1 (0x1 << 28)
+#define COH901318_CX_CTRL_DDMA_DEMAND_DMA2 (0x2 << 28)
+/* Primary Request Data Destination */
+#define COH901318_CX_CTRL_PRDD_MASK (0x40000000)
+#define COH901318_CX_CTRL_PRDD_DEST (0x1 << 30)
+#define COH901318_CX_CTRL_PRDD_SOURCE (0x0 << 30)
+
+/*
+ * CX_SRC_ADDR - Channel Source Address Registers 32bit (R/W)
+ */
+#define COH901318_CX_SRC_ADDR (0x0404)
+#define COH901318_CX_SRC_ADDR_SPACING (0x10)
+
+/*
+ * CX_DST_ADDR - Channel Destination Address Registers 32bit R/W
+ */
+#define COH901318_CX_DST_ADDR (0x0408)
+#define COH901318_CX_DST_ADDR_SPACING (0x10)
+
+/*
+ * CX_LNK_ADDR - Channel Link Address Registers 32bit (R/W)
+ */
+#define COH901318_CX_LNK_ADDR (0x040C)
+#define COH901318_CX_LNK_ADDR_SPACING (0x10)
+#define COH901318_CX_LNK_LINK_IMMEDIATE (0x00000001)
+
+/**
+ * struct coh901318_params - parameters for DMAC configuration
+ * @config: DMA config register
+ * @ctrl_lli_last: DMA control register for the last lli in the list
+ * @ctrl_lli: DMA control register for an lli
+ * @ctrl_lli_chained: DMA control register for a chained lli
+ */
+struct coh901318_params {
+ u32 config;
+ u32 ctrl_lli_last;
+ u32 ctrl_lli;
+ u32 ctrl_lli_chained;
+};
+
+/**
+ * struct coh_dma_channel - dma channel base
+ * @name: ascii name of dma channel
+ * @number: channel id number
+ * @desc_nbr_max: number of preallocated descriptors
+ * @priority_high: prio of channel, 0 low otherwise high.
+ * @param: configuration parameters
+ */
+struct coh_dma_channel {
+ const char name[32];
+ const int number;
+ const int desc_nbr_max;
+ const int priority_high;
+ const struct coh901318_params param;
+};
+
+/**
+ * struct powersave - DMA power save structure
+ * @lock: lock protecting data in this struct
+ * @started_channels: bit mask indicating active dma channels
+ */
+struct powersave {
+ spinlock_t lock;
+ u64 started_channels;
+};
+
+/* points out all dma slave channels.
+ * Syntax is [A1, B1, A2, B2, .... ,-1,-1]
+ * Select all channels from A to B, end of list is marked with -1,-1
+ */
+static int dma_slave_channels[] = {
+ U300_DMA_MSL_TX_0, U300_DMA_SPI_RX,
+ U300_DMA_UART1_TX, U300_DMA_UART1_RX, -1, -1};
+
+/* points out all dma memcpy channels. */
+static int dma_memcpy_channels[] = {
+ U300_DMA_GENERAL_PURPOSE_0, U300_DMA_GENERAL_PURPOSE_8, -1, -1};
+
+#define flags_memcpy_config (COH901318_CX_CFG_CH_DISABLE | \
+ COH901318_CX_CFG_RM_MEMORY_TO_MEMORY | \
+ COH901318_CX_CFG_LCR_DISABLE | \
+ COH901318_CX_CFG_TC_IRQ_ENABLE | \
+ COH901318_CX_CFG_BE_IRQ_ENABLE)
+#define flags_memcpy_lli_chained (COH901318_CX_CTRL_TC_ENABLE | \
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES | \
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | \
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | \
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | \
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | \
+ COH901318_CX_CTRL_MASTER_MODE_M1RW | \
+ COH901318_CX_CTRL_TCP_DISABLE | \
+ COH901318_CX_CTRL_TC_IRQ_DISABLE | \
+ COH901318_CX_CTRL_HSP_DISABLE | \
+ COH901318_CX_CTRL_HSS_DISABLE | \
+ COH901318_CX_CTRL_DDMA_LEGACY | \
+ COH901318_CX_CTRL_PRDD_SOURCE)
+#define flags_memcpy_lli (COH901318_CX_CTRL_TC_ENABLE | \
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES | \
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | \
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | \
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | \
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | \
+ COH901318_CX_CTRL_MASTER_MODE_M1RW | \
+ COH901318_CX_CTRL_TCP_DISABLE | \
+ COH901318_CX_CTRL_TC_IRQ_DISABLE | \
+ COH901318_CX_CTRL_HSP_DISABLE | \
+ COH901318_CX_CTRL_HSS_DISABLE | \
+ COH901318_CX_CTRL_DDMA_LEGACY | \
+ COH901318_CX_CTRL_PRDD_SOURCE)
+#define flags_memcpy_lli_last (COH901318_CX_CTRL_TC_ENABLE | \
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES | \
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | \
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | \
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | \
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | \
+ COH901318_CX_CTRL_MASTER_MODE_M1RW | \
+ COH901318_CX_CTRL_TCP_DISABLE | \
+ COH901318_CX_CTRL_TC_IRQ_ENABLE | \
+ COH901318_CX_CTRL_HSP_DISABLE | \
+ COH901318_CX_CTRL_HSS_DISABLE | \
+ COH901318_CX_CTRL_DDMA_LEGACY | \
+ COH901318_CX_CTRL_PRDD_SOURCE)
+
+const struct coh_dma_channel chan_config[U300_DMA_CHANNELS] = {
+ {
+ .number = U300_DMA_MSL_TX_0,
+ .name = "MSL TX 0",
+ .priority_high = 0,
+ },
+ {
+ .number = U300_DMA_MSL_TX_1,
+ .name = "MSL TX 1",
+ .priority_high = 0,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ },
+ {
+ .number = U300_DMA_MSL_TX_2,
+ .name = "MSL TX 2",
+ .priority_high = 0,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ .desc_nbr_max = 10,
+ },
+ {
+ .number = U300_DMA_MSL_TX_3,
+ .name = "MSL TX 3",
+ .priority_high = 0,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ },
+ {
+ .number = U300_DMA_MSL_TX_4,
+ .name = "MSL TX 4",
+ .priority_high = 0,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ },
+ {
+ .number = U300_DMA_MSL_TX_5,
+ .name = "MSL TX 5",
+ .priority_high = 0,
+ },
+ {
+ .number = U300_DMA_MSL_TX_6,
+ .name = "MSL TX 6",
+ .priority_high = 0,
+ },
+ {
+ .number = U300_DMA_MSL_RX_0,
+ .name = "MSL RX 0",
+ .priority_high = 0,
+ },
+ {
+ .number = U300_DMA_MSL_RX_1,
+ .name = "MSL RX 1",
+ .priority_high = 0,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
+ COH901318_CX_CTRL_PRDD_DEST,
+ .param.ctrl_lli = 0,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
+ COH901318_CX_CTRL_PRDD_DEST,
+ },
+ {
+ .number = U300_DMA_MSL_RX_2,
+ .name = "MSL RX 2",
+ .priority_high = 0,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
+ COH901318_CX_CTRL_PRDD_DEST,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
+ COH901318_CX_CTRL_PRDD_DEST,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
+ COH901318_CX_CTRL_PRDD_DEST,
+ },
+ {
+ .number = U300_DMA_MSL_RX_3,
+ .name = "MSL RX 3",
+ .priority_high = 0,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
+ COH901318_CX_CTRL_PRDD_DEST,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
+ COH901318_CX_CTRL_PRDD_DEST,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
+ COH901318_CX_CTRL_PRDD_DEST,
+ },
+ {
+ .number = U300_DMA_MSL_RX_4,
+ .name = "MSL RX 4",
+ .priority_high = 0,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
+ COH901318_CX_CTRL_PRDD_DEST,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
+ COH901318_CX_CTRL_PRDD_DEST,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
+ COH901318_CX_CTRL_PRDD_DEST,
+ },
+ {
+ .number = U300_DMA_MSL_RX_5,
+ .name = "MSL RX 5",
+ .priority_high = 0,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
+ COH901318_CX_CTRL_PRDD_DEST,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
+ COH901318_CX_CTRL_PRDD_DEST,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
+ COH901318_CX_CTRL_PRDD_DEST,
+ },
+ {
+ .number = U300_DMA_MSL_RX_6,
+ .name = "MSL RX 6",
+ .priority_high = 0,
+ },
+ /*
+ * Don't set up device address, burst count or size of src
+ * or dst bus for this peripheral - handled by PrimeCell
+ * DMA extension.
+ */
+ {
+ .number = U300_DMA_MMCSD_RX_TX,
+ .name = "MMCSD RX TX",
+ .priority_high = 0,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+
+ },
+ {
+ .number = U300_DMA_MSPRO_TX,
+ .name = "MSPRO TX",
+ .priority_high = 0,
+ },
+ {
+ .number = U300_DMA_MSPRO_RX,
+ .name = "MSPRO RX",
+ .priority_high = 0,
+ },
+ /*
+ * Don't set up device address, burst count or size of src
+ * or dst bus for this peripheral - handled by PrimeCell
+ * DMA extension.
+ */
+ {
+ .number = U300_DMA_UART0_TX,
+ .name = "UART0 TX",
+ .priority_high = 0,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+ },
+ {
+ .number = U300_DMA_UART0_RX,
+ .name = "UART0 RX",
+ .priority_high = 0,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+ },
+ {
+ .number = U300_DMA_APEX_TX,
+ .name = "APEX TX",
+ .priority_high = 0,
+ },
+ {
+ .number = U300_DMA_APEX_RX,
+ .name = "APEX RX",
+ .priority_high = 0,
+ },
+ {
+ .number = U300_DMA_PCM_I2S0_TX,
+ .name = "PCM I2S0 TX",
+ .priority_high = 1,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ },
+ {
+ .number = U300_DMA_PCM_I2S0_RX,
+ .name = "PCM I2S0 RX",
+ .priority_high = 1,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_DEST,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_DEST,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_DEST,
+ },
+ {
+ .number = U300_DMA_PCM_I2S1_TX,
+ .name = "PCM I2S1 TX",
+ .priority_high = 1,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ },
+ {
+ .number = U300_DMA_PCM_I2S1_RX,
+ .name = "PCM I2S1 RX",
+ .priority_high = 1,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_DEST,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_DEST,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_DEST,
+ },
+ {
+ .number = U300_DMA_XGAM_CDI,
+ .name = "XGAM CDI",
+ .priority_high = 0,
+ },
+ {
+ .number = U300_DMA_XGAM_PDI,
+ .name = "XGAM PDI",
+ .priority_high = 0,
+ },
+ /*
+ * Don't set up device address, burst count or size of src
+ * or dst bus for this peripheral - handled by PrimeCell
+ * DMA extension.
+ */
+ {
+ .number = U300_DMA_SPI_TX,
+ .name = "SPI TX",
+ .priority_high = 0,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+ },
+ {
+ .number = U300_DMA_SPI_RX,
+ .name = "SPI RX",
+ .priority_high = 0,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+
+ },
+ {
+ .number = U300_DMA_GENERAL_PURPOSE_0,
+ .name = "GENERAL 00",
+ .priority_high = 0,
+
+ .param.config = flags_memcpy_config,
+ .param.ctrl_lli_chained = flags_memcpy_lli_chained,
+ .param.ctrl_lli = flags_memcpy_lli,
+ .param.ctrl_lli_last = flags_memcpy_lli_last,
+ },
+ {
+ .number = U300_DMA_GENERAL_PURPOSE_1,
+ .name = "GENERAL 01",
+ .priority_high = 0,
+
+ .param.config = flags_memcpy_config,
+ .param.ctrl_lli_chained = flags_memcpy_lli_chained,
+ .param.ctrl_lli = flags_memcpy_lli,
+ .param.ctrl_lli_last = flags_memcpy_lli_last,
+ },
+ {
+ .number = U300_DMA_GENERAL_PURPOSE_2,
+ .name = "GENERAL 02",
+ .priority_high = 0,
+
+ .param.config = flags_memcpy_config,
+ .param.ctrl_lli_chained = flags_memcpy_lli_chained,
+ .param.ctrl_lli = flags_memcpy_lli,
+ .param.ctrl_lli_last = flags_memcpy_lli_last,
+ },
+ {
+ .number = U300_DMA_GENERAL_PURPOSE_3,
+ .name = "GENERAL 03",
+ .priority_high = 0,
+
+ .param.config = flags_memcpy_config,
+ .param.ctrl_lli_chained = flags_memcpy_lli_chained,
+ .param.ctrl_lli = flags_memcpy_lli,
+ .param.ctrl_lli_last = flags_memcpy_lli_last,
+ },
+ {
+ .number = U300_DMA_GENERAL_PURPOSE_4,
+ .name = "GENERAL 04",
+ .priority_high = 0,
+
+ .param.config = flags_memcpy_config,
+ .param.ctrl_lli_chained = flags_memcpy_lli_chained,
+ .param.ctrl_lli = flags_memcpy_lli,
+ .param.ctrl_lli_last = flags_memcpy_lli_last,
+ },
+ {
+ .number = U300_DMA_GENERAL_PURPOSE_5,
+ .name = "GENERAL 05",
+ .priority_high = 0,
+
+ .param.config = flags_memcpy_config,
+ .param.ctrl_lli_chained = flags_memcpy_lli_chained,
+ .param.ctrl_lli = flags_memcpy_lli,
+ .param.ctrl_lli_last = flags_memcpy_lli_last,
+ },
+ {
+ .number = U300_DMA_GENERAL_PURPOSE_6,
+ .name = "GENERAL 06",
+ .priority_high = 0,
+
+ .param.config = flags_memcpy_config,
+ .param.ctrl_lli_chained = flags_memcpy_lli_chained,
+ .param.ctrl_lli = flags_memcpy_lli,
+ .param.ctrl_lli_last = flags_memcpy_lli_last,
+ },
+ {
+ .number = U300_DMA_GENERAL_PURPOSE_7,
+ .name = "GENERAL 07",
+ .priority_high = 0,
+
+ .param.config = flags_memcpy_config,
+ .param.ctrl_lli_chained = flags_memcpy_lli_chained,
+ .param.ctrl_lli = flags_memcpy_lli,
+ .param.ctrl_lli_last = flags_memcpy_lli_last,
+ },
+ {
+ .number = U300_DMA_GENERAL_PURPOSE_8,
+ .name = "GENERAL 08",
+ .priority_high = 0,
+
+ .param.config = flags_memcpy_config,
+ .param.ctrl_lli_chained = flags_memcpy_lli_chained,
+ .param.ctrl_lli = flags_memcpy_lli,
+ .param.ctrl_lli_last = flags_memcpy_lli_last,
+ },
+ {
+ .number = U300_DMA_UART1_TX,
+ .name = "UART1 TX",
+ .priority_high = 0,
+ },
+ {
+ .number = U300_DMA_UART1_RX,
+ .name = "UART1 RX",
+ .priority_high = 0,
+ }
+};
+
#define COHC_2_DEV(cohc) (&cohc->chan.dev->device)
#ifdef VERBOSE_DEBUG
@@ -54,7 +1284,6 @@ struct coh901318_base {
struct dma_device dma_slave;
struct dma_device dma_memcpy;
struct coh901318_chan *chans;
- struct coh901318_platform *platform;
};
struct coh901318_chan {
@@ -75,8 +1304,8 @@ struct coh901318_chan {
unsigned long nbr_active_done;
unsigned long busy;
- u32 runtime_addr;
- u32 runtime_ctrl;
+ u32 addr;
+ u32 ctrl;
struct coh901318_base *base;
};
@@ -122,7 +1351,7 @@ static int coh901318_debugfs_read(struct file *file, char __user *buf,
tmp += sprintf(tmp, "DMA -- enabled dma channels\n");
- for (i = 0; i < debugfs_dma_base->platform->max_channels; i++)
+ for (i = 0; i < U300_DMA_CHANNELS; i++)
if (started_channels & (1 << i))
tmp += sprintf(tmp, "channel %d\n", i);
@@ -187,25 +1416,16 @@ static inline struct coh901318_chan *to_coh901318_chan(struct dma_chan *chan)
return container_of(chan, struct coh901318_chan, chan);
}
-static inline dma_addr_t
-cohc_dev_addr(struct coh901318_chan *cohc)
-{
- /* Runtime supplied address will take precedence */
- if (cohc->runtime_addr)
- return cohc->runtime_addr;
- return cohc->base->platform->chan_conf[cohc->id].dev_addr;
-}
-
static inline const struct coh901318_params *
cohc_chan_param(struct coh901318_chan *cohc)
{
- return &cohc->base->platform->chan_conf[cohc->id].param;
+ return &chan_config[cohc->id].param;
}
static inline const struct coh_dma_channel *
cohc_chan_conf(struct coh901318_chan *cohc)
{
- return &cohc->base->platform->chan_conf[cohc->id];
+ return &chan_config[cohc->id];
}
static void enable_powersave(struct coh901318_chan *cohc)
@@ -217,12 +1437,6 @@ static void enable_powersave(struct coh901318_chan *cohc)
pm->started_channels &= ~(1ULL << cohc->id);
- if (!pm->started_channels) {
- /* DMA no longer intends to access memory */
- cohc->base->platform->access_memory_state(cohc->base->dev,
- false);
- }
-
spin_unlock_irqrestore(&pm->lock, flags);
}
static void disable_powersave(struct coh901318_chan *cohc)
@@ -232,12 +1446,6 @@ static void disable_powersave(struct coh901318_chan *cohc)
spin_lock_irqsave(&pm->lock, flags);
- if (!pm->started_channels) {
- /* DMA intends to access memory */
- cohc->base->platform->access_memory_state(cohc->base->dev,
- true);
- }
-
pm->started_channels |= (1ULL << cohc->id);
spin_unlock_irqrestore(&pm->lock, flags);
@@ -596,7 +1804,7 @@ static int coh901318_config(struct coh901318_chan *cohc,
if (param)
p = param;
else
- p = &cohc->base->platform->chan_conf[channel].param;
+ p = cohc_chan_param(cohc);
/* Clear any pending BE or TC interrupt */
if (channel < 32) {
@@ -1052,9 +2260,9 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
* sure the bits you set per peripheral channel are
* cleared in the default config from the platform.
*/
- ctrl_chained |= cohc->runtime_ctrl;
- ctrl_last |= cohc->runtime_ctrl;
- ctrl |= cohc->runtime_ctrl;
+ ctrl_chained |= cohc->ctrl;
+ ctrl_last |= cohc->ctrl;
+ ctrl |= cohc->ctrl;
if (direction == DMA_MEM_TO_DEV) {
u32 tx_flags = COH901318_CX_CTRL_PRDD_SOURCE |
@@ -1103,7 +2311,7 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
/* initiate allocated lli list */
ret = coh901318_lli_fill_sg(&cohc->base->pool, lli, sgl, sg_len,
- cohc_dev_addr(cohc),
+ cohc->addr,
ctrl_chained,
ctrl,
ctrl_last,
@@ -1147,7 +2355,9 @@ coh901318_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
enum dma_status ret;
ret = dma_cookie_status(chan, cookie, txstate);
- /* FIXME: should be conditional on ret != DMA_SUCCESS? */
+ if (ret == DMA_SUCCESS)
+ return ret;
+
dma_set_residue(txstate, coh901318_get_bytes_left(chan));
if (ret == DMA_IN_PROGRESS && cohc->stopped)
@@ -1244,7 +2454,7 @@ static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
dma_addr_t addr;
enum dma_slave_buswidth addr_width;
u32 maxburst;
- u32 runtime_ctrl = 0;
+ u32 ctrl = 0;
int i = 0;
/* We only support mem to per or per to mem transfers */
@@ -1265,7 +2475,7 @@ static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
addr_width);
switch (addr_width) {
case DMA_SLAVE_BUSWIDTH_1_BYTE:
- runtime_ctrl |=
+ ctrl |=
COH901318_CX_CTRL_SRC_BUS_SIZE_8_BITS |
COH901318_CX_CTRL_DST_BUS_SIZE_8_BITS;
@@ -1277,7 +2487,7 @@ static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
break;
case DMA_SLAVE_BUSWIDTH_2_BYTES:
- runtime_ctrl |=
+ ctrl |=
COH901318_CX_CTRL_SRC_BUS_SIZE_16_BITS |
COH901318_CX_CTRL_DST_BUS_SIZE_16_BITS;
@@ -1290,7 +2500,7 @@ static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
break;
case DMA_SLAVE_BUSWIDTH_4_BYTES:
/* Direction doesn't matter here, it's 32/32 bits */
- runtime_ctrl |=
+ ctrl |=
COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS;
@@ -1307,13 +2517,13 @@ static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
return;
}
- runtime_ctrl |= burst_sizes[i].reg;
+ ctrl |= burst_sizes[i].reg;
dev_dbg(COHC_2_DEV(cohc),
"selected burst size %d bytes for address width %d bytes, maxburst %d\n",
burst_sizes[i].burst_8bit, addr_width, maxburst);
- cohc->runtime_addr = addr;
- cohc->runtime_ctrl = runtime_ctrl;
+ cohc->addr = addr;
+ cohc->ctrl = ctrl;
}
static int
@@ -1431,7 +2641,6 @@ void coh901318_base_init(struct dma_device *dma, const int *pick_chans,
static int __init coh901318_probe(struct platform_device *pdev)
{
int err = 0;
- struct coh901318_platform *pdata;
struct coh901318_base *base;
int irq;
struct resource *io;
@@ -1447,13 +2656,9 @@ static int __init coh901318_probe(struct platform_device *pdev)
pdev->dev.driver->name) == NULL)
return -ENOMEM;
- pdata = pdev->dev.platform_data;
- if (!pdata)
- return -ENODEV;
-
base = devm_kzalloc(&pdev->dev,
ALIGN(sizeof(struct coh901318_base), 4) +
- pdata->max_channels *
+ U300_DMA_CHANNELS *
sizeof(struct coh901318_chan),
GFP_KERNEL);
if (!base)
@@ -1466,7 +2671,6 @@ static int __init coh901318_probe(struct platform_device *pdev)
return -ENOMEM;
base->dev = &pdev->dev;
- base->platform = pdata;
spin_lock_init(&base->pm.lock);
base->pm.started_channels = 0;
@@ -1488,7 +2692,7 @@ static int __init coh901318_probe(struct platform_device *pdev)
return err;
/* init channels for device transfers */
- coh901318_base_init(&base->dma_slave, base->platform->chans_slave,
+ coh901318_base_init(&base->dma_slave, dma_slave_channels,
base);
dma_cap_zero(base->dma_slave.cap_mask);
@@ -1508,7 +2712,7 @@ static int __init coh901318_probe(struct platform_device *pdev)
goto err_register_slave;
/* init channels for memcpy */
- coh901318_base_init(&base->dma_memcpy, base->platform->chans_memcpy,
+ coh901318_base_init(&base->dma_memcpy, dma_memcpy_channels,
base);
dma_cap_zero(base->dma_memcpy.cap_mask);
diff --git a/drivers/dma/coh901318_lli.h b/drivers/dma/coh901318.h
index abff371..95ce1e2 100644
--- a/drivers/dma/coh901318_lli.h
+++ b/drivers/dma/coh901318.h
@@ -1,16 +1,15 @@
/*
- * driver/dma/coh901318_lli.h
- *
- * Copyright (C) 2007-2009 ST-Ericsson
+ * Copyright (C) 2007-2013 ST-Ericsson
* License terms: GNU General Public License (GPL) version 2
- * Support functions for handling lli for coh901318
+ * DMA driver for COH 901 318
* Author: Per Friden <per.friden@stericsson.com>
*/
-#ifndef COH901318_LLI_H
-#define COH901318_LLI_H
+#ifndef COH901318_H
+#define COH901318_H
-#include <mach/coh901318.h>
+#define MAX_DMA_PACKET_SIZE_SHIFT 11
+#define MAX_DMA_PACKET_SIZE (1 << MAX_DMA_PACKET_SIZE_SHIFT)
struct device;
@@ -24,7 +23,25 @@ struct coh901318_pool {
#endif
};
-struct device;
+/**
+ * struct coh901318_lli - linked list item for DMAC
+ * @control: control settings for DMAC
+ * @src_addr: transfer source address
+ * @dst_addr: transfer destination address
+ * @link_addr: physical address to next lli
+ * @virt_link_addr: virtual address of next lli (only used by pool_free)
+ * @phy_this: physical address of current lli (only used by pool_free)
+ */
+struct coh901318_lli {
+ u32 control;
+ dma_addr_t src_addr;
+ dma_addr_t dst_addr;
+ dma_addr_t link_addr;
+
+ void *virt_link_addr;
+ dma_addr_t phy_this;
+};
+
/**
* coh901318_pool_create() - Creates an dma pool for lli:s
* @pool: pool handle
@@ -121,4 +138,4 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
u32 ctrl, u32 ctrl_last,
enum dma_transfer_direction dir, u32 ctrl_irq_mask);
-#endif /* COH901318_LLI_H */
+#endif /* COH901318_H */
diff --git a/drivers/dma/coh901318_lli.c b/drivers/dma/coh901318_lli.c
index 780e042..702112d 100644
--- a/drivers/dma/coh901318_lli.c
+++ b/drivers/dma/coh901318_lli.c
@@ -11,9 +11,9 @@
#include <linux/memory.h>
#include <linux/gfp.h>
#include <linux/dmapool.h>
-#include <mach/coh901318.h>
+#include <linux/dmaengine.h>
-#include "coh901318_lli.h"
+#include "coh901318.h"
#if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_U300_DEBUG))
#define DEBUGFS_POOL_COUNTER_RESET(pool) (pool->debugfs_pool_counter = 0)
@@ -61,7 +61,7 @@ coh901318_lli_alloc(struct coh901318_pool *pool, unsigned int len)
dma_addr_t phy;
if (len == 0)
- goto err;
+ return NULL;
spin_lock(&pool->lock);
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index a815d44..b2728d6 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -62,6 +62,7 @@
#include <linux/rculist.h>
#include <linux/idr.h>
#include <linux/slab.h>
+#include <linux/of_dma.h>
static DEFINE_MUTEX(dma_list_mutex);
static DEFINE_IDR(dma_idr);
@@ -266,7 +267,10 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
pr_err("%s: timeout!\n", __func__);
return DMA_ERROR;
}
- } while (status == DMA_IN_PROGRESS);
+ if (status != DMA_IN_PROGRESS)
+ break;
+ cpu_relax();
+ } while (1);
return status;
}
@@ -546,6 +550,21 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v
}
EXPORT_SYMBOL_GPL(__dma_request_channel);
+/**
+ * dma_request_slave_channel - try to allocate an exclusive slave channel
+ * @dev: pointer to client device structure
+ * @name: slave channel name
+ */
+struct dma_chan *dma_request_slave_channel(struct device *dev, char *name)
+{
+ /* If device-tree is present get slave info from here */
+ if (dev->of_node)
+ return of_dma_request_slave_channel(dev->of_node, name);
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(dma_request_slave_channel);
+
void dma_release_channel(struct dma_chan *chan)
{
mutex_lock(&dma_list_mutex);
@@ -667,18 +686,14 @@ static int get_dma_id(struct dma_device *device)
{
int rc;
- idr_retry:
- if (!idr_pre_get(&dma_idr, GFP_KERNEL))
- return -ENOMEM;
mutex_lock(&dma_list_mutex);
- rc = idr_get_new(&dma_idr, NULL, &device->dev_id);
- mutex_unlock(&dma_list_mutex);
- if (rc == -EAGAIN)
- goto idr_retry;
- else if (rc != 0)
- return rc;
- return 0;
+ rc = idr_alloc(&dma_idr, NULL, 0, 0, GFP_KERNEL);
+ if (rc >= 0)
+ device->dev_id = rc;
+
+ mutex_unlock(&dma_list_mutex);
+ return rc < 0 ? rc : 0;
}
/**
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 64b048d..a2c8904 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -242,6 +242,13 @@ static inline void unmap_dst(struct device *dev, dma_addr_t *addr, size_t len,
dma_unmap_single(dev, addr[count], len, DMA_BIDIRECTIONAL);
}
+static unsigned int min_odd(unsigned int x, unsigned int y)
+{
+ unsigned int val = min(x, y);
+
+ return val % 2 ? val : val - 1;
+}
+
/*
* This function repeatedly tests DMA transfers of various lengths and
* offsets for a given operation type until it is told to exit by
@@ -262,6 +269,7 @@ static int dmatest_func(void *data)
struct dmatest_thread *thread = data;
struct dmatest_done done = { .wait = &done_wait };
struct dma_chan *chan;
+ struct dma_device *dev;
const char *thread_name;
unsigned int src_off, dst_off, len;
unsigned int error_count;
@@ -283,13 +291,16 @@ static int dmatest_func(void *data)
smp_rmb();
chan = thread->chan;
+ dev = chan->device;
if (thread->type == DMA_MEMCPY)
src_cnt = dst_cnt = 1;
else if (thread->type == DMA_XOR) {
- src_cnt = xor_sources | 1; /* force odd to ensure dst = src */
+ /* force odd to ensure dst = src */
+ src_cnt = min_odd(xor_sources | 1, dev->max_xor);
dst_cnt = 1;
} else if (thread->type == DMA_PQ) {
- src_cnt = pq_sources | 1; /* force odd to ensure dst = src */
+ /* force odd to ensure dst = src */
+ src_cnt = min_odd(pq_sources | 1, dma_maxpq(dev, 0));
dst_cnt = 2;
for (i = 0; i < src_cnt; i++)
pq_coefs[i] = 1;
@@ -327,7 +338,6 @@ static int dmatest_func(void *data)
while (!kthread_should_stop()
&& !(iterations && total_tests >= iterations)) {
- struct dma_device *dev = chan->device;
struct dma_async_tx_descriptor *tx = NULL;
dma_addr_t dma_srcs[src_cnt];
dma_addr_t dma_dsts[dst_cnt];
@@ -526,7 +536,9 @@ err_srcs:
thread_name, total_tests, failed_tests, ret);
/* terminate all transfers on specified channels */
- chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
+ if (ret)
+ dmaengine_terminate_all(chan);
+
if (iterations > 0)
while (!kthread_should_stop()) {
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit);
@@ -551,7 +563,7 @@ static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
}
/* terminate all transfers on specified channels */
- dtc->chan->device->device_control(dtc->chan, DMA_TERMINATE_ALL, 0);
+ dmaengine_terminate_all(dtc->chan);
kfree(dtc);
}
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 3e8ba02..c599558 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -1,6 +1,5 @@
/*
- * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on
- * AVR32 systems.)
+ * Core driver for the Synopsys DesignWare DMA Controller
*
* Copyright (C) 2007-2008 Atmel Corporation
* Copyright (C) 2010-2011 ST Microelectronics
@@ -9,15 +8,19 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/err.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/of.h>
+#include <linux/of_dma.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -46,15 +49,32 @@ static inline unsigned int dwc_get_sms(struct dw_dma_slave *slave)
return slave ? slave->src_master : 1;
}
+#define SRC_MASTER 0
+#define DST_MASTER 1
+
+static inline unsigned int dwc_get_master(struct dma_chan *chan, int master)
+{
+ struct dw_dma *dw = to_dw_dma(chan->device);
+ struct dw_dma_slave *dws = chan->private;
+ unsigned int m;
+
+ if (master == SRC_MASTER)
+ m = dwc_get_sms(dws);
+ else
+ m = dwc_get_dms(dws);
+
+ return min_t(unsigned int, dw->nr_masters - 1, m);
+}
+
#define DWC_DEFAULT_CTLLO(_chan) ({ \
- struct dw_dma_slave *__slave = (_chan->private); \
struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \
struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \
- int _dms = dwc_get_dms(__slave); \
- int _sms = dwc_get_sms(__slave); \
- u8 _smsize = __slave ? _sconfig->src_maxburst : \
+ bool _is_slave = is_slave_direction(_dwc->direction); \
+ int _dms = dwc_get_master(_chan, DST_MASTER); \
+ int _sms = dwc_get_master(_chan, SRC_MASTER); \
+ u8 _smsize = _is_slave ? _sconfig->src_maxburst : \
DW_DMA_MSIZE_16; \
- u8 _dmsize = __slave ? _sconfig->dst_maxburst : \
+ u8 _dmsize = _is_slave ? _sconfig->dst_maxburst : \
DW_DMA_MSIZE_16; \
\
(DWC_CTLL_DST_MSIZE(_dmsize) \
@@ -72,15 +92,14 @@ static inline unsigned int dwc_get_sms(struct dw_dma_slave *slave)
*/
#define NR_DESCS_PER_CHANNEL 64
-/*----------------------------------------------------------------------*/
+static inline unsigned int dwc_get_data_width(struct dma_chan *chan, int master)
+{
+ struct dw_dma *dw = to_dw_dma(chan->device);
-/*
- * Because we're not relying on writeback from the controller (it may not
- * even be configured into the core!) we don't need to use dma_pool. These
- * descriptors -- and associated data -- are cacheable. We do need to make
- * sure their dcache entries are written back before handing them off to
- * the controller, though.
- */
+ return dw->data_width[dwc_get_master(chan, master)];
+}
+
+/*----------------------------------------------------------------------*/
static struct device *chan2dev(struct dma_chan *chan)
{
@@ -93,7 +112,7 @@ static struct device *chan2parent(struct dma_chan *chan)
static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
{
- return list_entry(dwc->active_list.next, struct dw_desc, desc_node);
+ return to_dw_desc(dwc->active_list.next);
}
static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
@@ -120,19 +139,6 @@ static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
return ret;
}
-static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc)
-{
- struct dw_desc *child;
-
- list_for_each_entry(child, &desc->tx_list, desc_node)
- dma_sync_single_for_cpu(chan2parent(&dwc->chan),
- child->txd.phys, sizeof(child->lli),
- DMA_TO_DEVICE);
- dma_sync_single_for_cpu(chan2parent(&dwc->chan),
- desc->txd.phys, sizeof(desc->lli),
- DMA_TO_DEVICE);
-}
-
/*
* Move a descriptor, including any children, to the free list.
* `desc' must not be on any lists.
@@ -144,8 +150,6 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
if (desc) {
struct dw_desc *child;
- dwc_sync_desc_for_cpu(dwc, desc);
-
spin_lock_irqsave(&dwc->lock, flags);
list_for_each_entry(child, &desc->tx_list, desc_node)
dev_vdbg(chan2dev(&dwc->chan),
@@ -168,7 +172,13 @@ static void dwc_initialize(struct dw_dma_chan *dwc)
if (dwc->initialized == true)
return;
- if (dws) {
+ if (dws && dws->cfg_hi == ~0 && dws->cfg_lo == ~0) {
+ /* autoconfigure based on request line from DT */
+ if (dwc->direction == DMA_MEM_TO_DEV)
+ cfghi = DWC_CFGH_DST_PER(dwc->request_line);
+ else if (dwc->direction == DMA_DEV_TO_MEM)
+ cfghi = DWC_CFGH_SRC_PER(dwc->request_line);
+ } else if (dws) {
/*
* We need controller-specific data to set up slave
* transfers.
@@ -178,9 +188,9 @@ static void dwc_initialize(struct dw_dma_chan *dwc)
cfghi = dws->cfg_hi;
cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK;
} else {
- if (dwc->dma_sconfig.direction == DMA_MEM_TO_DEV)
+ if (dwc->direction == DMA_MEM_TO_DEV)
cfghi = DWC_CFGH_DST_PER(dwc->dma_sconfig.slave_id);
- else if (dwc->dma_sconfig.direction == DMA_DEV_TO_MEM)
+ else if (dwc->direction == DMA_DEV_TO_MEM)
cfghi = DWC_CFGH_SRC_PER(dwc->dma_sconfig.slave_id);
}
@@ -222,7 +232,6 @@ static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc)
channel_readl(dwc, CTL_LO));
}
-
static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc)
{
channel_clear_bit(dw, CH_EN, dwc->mask);
@@ -248,6 +257,9 @@ static inline void dwc_do_single_block(struct dw_dma_chan *dwc,
channel_writel(dwc, CTL_LO, ctllo);
channel_writel(dwc, CTL_HI, desc->lli.ctlhi);
channel_set_bit(dw, CH_EN, dwc->mask);
+
+ /* Move pointer to next descriptor */
+ dwc->tx_node_active = dwc->tx_node_active->next;
}
/* Called with dwc->lock held and bh disabled */
@@ -278,9 +290,10 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
dwc_initialize(dwc);
- dwc->tx_list = &first->tx_list;
- dwc->tx_node_active = first->tx_list.next;
+ dwc->residue = first->total_len;
+ dwc->tx_node_active = &first->tx_list;
+ /* Submit first block */
dwc_do_single_block(dwc, first);
return;
@@ -316,8 +329,6 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
param = txd->callback_param;
}
- dwc_sync_desc_for_cpu(dwc, desc);
-
/* async_tx_ack */
list_for_each_entry(child, &desc->tx_list, desc_node)
async_tx_ack(&child->txd);
@@ -326,29 +337,29 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
list_splice_init(&desc->tx_list, &dwc->free_list);
list_move(&desc->desc_node, &dwc->free_list);
- if (!dwc->chan.private) {
+ if (!is_slave_direction(dwc->direction)) {
struct device *parent = chan2parent(&dwc->chan);
if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
dma_unmap_single(parent, desc->lli.dar,
- desc->len, DMA_FROM_DEVICE);
+ desc->total_len, DMA_FROM_DEVICE);
else
dma_unmap_page(parent, desc->lli.dar,
- desc->len, DMA_FROM_DEVICE);
+ desc->total_len, DMA_FROM_DEVICE);
}
if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
dma_unmap_single(parent, desc->lli.sar,
- desc->len, DMA_TO_DEVICE);
+ desc->total_len, DMA_TO_DEVICE);
else
dma_unmap_page(parent, desc->lli.sar,
- desc->len, DMA_TO_DEVICE);
+ desc->total_len, DMA_TO_DEVICE);
}
}
spin_unlock_irqrestore(&dwc->lock, flags);
- if (callback_required && callback)
+ if (callback)
callback(param);
}
@@ -383,6 +394,15 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
dwc_descriptor_complete(dwc, desc, true);
}
+/* Returns how many bytes were already received from source */
+static inline u32 dwc_get_sent(struct dw_dma_chan *dwc)
+{
+ u32 ctlhi = channel_readl(dwc, CTL_HI);
+ u32 ctllo = channel_readl(dwc, CTL_LO);
+
+ return (ctlhi & DWC_CTLH_BLOCK_TS_MASK) * (1 << (ctllo >> 4 & 7));
+}
+
static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
{
dma_addr_t llp;
@@ -398,6 +418,39 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
if (status_xfer & dwc->mask) {
/* Everything we've submitted is done */
dma_writel(dw, CLEAR.XFER, dwc->mask);
+
+ if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
+ struct list_head *head, *active = dwc->tx_node_active;
+
+ /*
+ * We are inside first active descriptor.
+ * Otherwise something is really wrong.
+ */
+ desc = dwc_first_active(dwc);
+
+ head = &desc->tx_list;
+ if (active != head) {
+ /* Update desc to reflect last sent one */
+ if (active != head->next)
+ desc = to_dw_desc(active->prev);
+
+ dwc->residue -= desc->len;
+
+ child = to_dw_desc(active);
+
+ /* Submit next block */
+ dwc_do_single_block(dwc, child);
+
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ return;
+ }
+
+ /* We are done here */
+ clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
+ }
+
+ dwc->residue = 0;
+
spin_unlock_irqrestore(&dwc->lock, flags);
dwc_complete_all(dw, dwc);
@@ -405,6 +458,13 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
}
if (list_empty(&dwc->active_list)) {
+ dwc->residue = 0;
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ return;
+ }
+
+ if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
+ dev_vdbg(chan2dev(&dwc->chan), "%s: soft LLP mode\n", __func__);
spin_unlock_irqrestore(&dwc->lock, flags);
return;
}
@@ -413,6 +473,9 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
(unsigned long long)llp);
list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
+ /* initial residue value */
+ dwc->residue = desc->total_len;
+
/* check first descriptors addr */
if (desc->txd.phys == llp) {
spin_unlock_irqrestore(&dwc->lock, flags);
@@ -422,16 +485,21 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
/* check first descriptors llp */
if (desc->lli.llp == llp) {
/* This one is currently in progress */
+ dwc->residue -= dwc_get_sent(dwc);
spin_unlock_irqrestore(&dwc->lock, flags);
return;
}
- list_for_each_entry(child, &desc->tx_list, desc_node)
+ dwc->residue -= desc->len;
+ list_for_each_entry(child, &desc->tx_list, desc_node) {
if (child->lli.llp == llp) {
/* Currently in progress */
+ dwc->residue -= dwc_get_sent(dwc);
spin_unlock_irqrestore(&dwc->lock, flags);
return;
}
+ dwc->residue -= child->len;
+ }
/*
* No descriptors so far seem to be in progress, i.e.
@@ -457,9 +525,8 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
{
- dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
- " desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
- lli->sar, lli->dar, lli->llp, lli->ctlhi, lli->ctllo);
+ dev_crit(chan2dev(&dwc->chan), " desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
+ lli->sar, lli->dar, lli->llp, lli->ctlhi, lli->ctllo);
}
static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
@@ -487,16 +554,14 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
dwc_dostart(dwc, dwc_first_active(dwc));
/*
- * KERN_CRITICAL may seem harsh, but since this only happens
+ * WARN may seem harsh, but since this only happens
* when someone submits a bad physical address in a
* descriptor, we should consider ourselves lucky that the
* controller flagged an error instead of scribbling over
* random memory locations.
*/
- dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
- "Bad descriptor submitted for DMA!\n");
- dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
- " cookie: %d\n", bad_desc->txd.cookie);
+ dev_WARN(chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n"
+ " cookie: %d\n", bad_desc->txd.cookie);
dwc_dump_lli(dwc, &bad_desc->lli);
list_for_each_entry(child, &bad_desc->tx_list, desc_node)
dwc_dump_lli(dwc, &child->lli);
@@ -597,36 +662,8 @@ static void dw_dma_tasklet(unsigned long data)
dwc_handle_cyclic(dw, dwc, status_err, status_xfer);
else if (status_err & (1 << i))
dwc_handle_error(dw, dwc);
- else if (status_xfer & (1 << i)) {
- unsigned long flags;
-
- spin_lock_irqsave(&dwc->lock, flags);
- if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
- if (dwc->tx_node_active != dwc->tx_list) {
- struct dw_desc *desc =
- list_entry(dwc->tx_node_active,
- struct dw_desc,
- desc_node);
-
- dma_writel(dw, CLEAR.XFER, dwc->mask);
-
- /* move pointer to next descriptor */
- dwc->tx_node_active =
- dwc->tx_node_active->next;
-
- dwc_do_single_block(dwc, desc);
-
- spin_unlock_irqrestore(&dwc->lock, flags);
- continue;
- } else {
- /* we are done here */
- clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
- }
- }
- spin_unlock_irqrestore(&dwc->lock, flags);
-
+ else if (status_xfer & (1 << i))
dwc_scan_descriptors(dw, dwc);
- }
}
/*
@@ -708,7 +745,6 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
size_t len, unsigned long flags)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
- struct dw_dma_slave *dws = chan->private;
struct dw_desc *desc;
struct dw_desc *first;
struct dw_desc *prev;
@@ -729,8 +765,10 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
return NULL;
}
- data_width = min_t(unsigned int, dwc->dw->data_width[dwc_get_sms(dws)],
- dwc->dw->data_width[dwc_get_dms(dws)]);
+ dwc->direction = DMA_MEM_TO_MEM;
+
+ data_width = min_t(unsigned int, dwc_get_data_width(chan, SRC_MASTER),
+ dwc_get_data_width(chan, DST_MASTER));
src_width = dst_width = min_t(unsigned int, data_width,
dwc_fast_fls(src | dest | len));
@@ -755,32 +793,25 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
desc->lli.dar = dest + offset;
desc->lli.ctllo = ctllo;
desc->lli.ctlhi = xfer_count;
+ desc->len = xfer_count << src_width;
if (!first) {
first = desc;
} else {
prev->lli.llp = desc->txd.phys;
- dma_sync_single_for_device(chan2parent(chan),
- prev->txd.phys, sizeof(prev->lli),
- DMA_TO_DEVICE);
list_add_tail(&desc->desc_node,
&first->tx_list);
}
prev = desc;
}
-
if (flags & DMA_PREP_INTERRUPT)
/* Trigger interrupt after last block */
prev->lli.ctllo |= DWC_CTLL_INT_EN;
prev->lli.llp = 0;
- dma_sync_single_for_device(chan2parent(chan),
- prev->txd.phys, sizeof(prev->lli),
- DMA_TO_DEVICE);
-
first->txd.flags = flags;
- first->len = len;
+ first->total_len = len;
return &first->txd;
@@ -795,7 +826,6 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
unsigned long flags, void *context)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
- struct dw_dma_slave *dws = chan->private;
struct dma_slave_config *sconfig = &dwc->dma_sconfig;
struct dw_desc *prev;
struct dw_desc *first;
@@ -810,9 +840,11 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
dev_vdbg(chan2dev(chan), "%s\n", __func__);
- if (unlikely(!dws || !sg_len))
+ if (unlikely(!is_slave_direction(direction) || !sg_len))
return NULL;
+ dwc->direction = direction;
+
prev = first = NULL;
switch (direction) {
@@ -827,7 +859,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
DWC_CTLL_FC(DW_DMA_FC_D_M2P);
- data_width = dwc->dw->data_width[dwc_get_sms(dws)];
+ data_width = dwc_get_data_width(chan, SRC_MASTER);
for_each_sg(sgl, sg, sg_len, i) {
struct dw_desc *desc;
@@ -860,15 +892,12 @@ slave_sg_todev_fill_desc:
}
desc->lli.ctlhi = dlen >> mem_width;
+ desc->len = dlen;
if (!first) {
first = desc;
} else {
prev->lli.llp = desc->txd.phys;
- dma_sync_single_for_device(chan2parent(chan),
- prev->txd.phys,
- sizeof(prev->lli),
- DMA_TO_DEVICE);
list_add_tail(&desc->desc_node,
&first->tx_list);
}
@@ -890,7 +919,7 @@ slave_sg_todev_fill_desc:
ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
DWC_CTLL_FC(DW_DMA_FC_D_P2M);
- data_width = dwc->dw->data_width[dwc_get_dms(dws)];
+ data_width = dwc_get_data_width(chan, DST_MASTER);
for_each_sg(sgl, sg, sg_len, i) {
struct dw_desc *desc;
@@ -922,15 +951,12 @@ slave_sg_fromdev_fill_desc:
len = 0;
}
desc->lli.ctlhi = dlen >> reg_width;
+ desc->len = dlen;
if (!first) {
first = desc;
} else {
prev->lli.llp = desc->txd.phys;
- dma_sync_single_for_device(chan2parent(chan),
- prev->txd.phys,
- sizeof(prev->lli),
- DMA_TO_DEVICE);
list_add_tail(&desc->desc_node,
&first->tx_list);
}
@@ -950,11 +976,7 @@ slave_sg_fromdev_fill_desc:
prev->lli.ctllo |= DWC_CTLL_INT_EN;
prev->lli.llp = 0;
- dma_sync_single_for_device(chan2parent(chan),
- prev->txd.phys, sizeof(prev->lli),
- DMA_TO_DEVICE);
-
- first->len = total_len;
+ first->total_len = total_len;
return &first->txd;
@@ -984,11 +1006,12 @@ set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
- /* Check if it is chan is configured for slave transfers */
- if (!chan->private)
+ /* Check if chan will be configured for slave transfers */
+ if (!is_slave_direction(sconfig->direction))
return -EINVAL;
memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
+ dwc->direction = sconfig->direction;
convert_burst(&dwc->dma_sconfig.src_maxburst);
convert_burst(&dwc->dma_sconfig.dst_maxburst);
@@ -996,6 +1019,26 @@ set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
return 0;
}
+static inline void dwc_chan_pause(struct dw_dma_chan *dwc)
+{
+ u32 cfglo = channel_readl(dwc, CFG_LO);
+
+ channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
+ while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY))
+ cpu_relax();
+
+ dwc->paused = true;
+}
+
+static inline void dwc_chan_resume(struct dw_dma_chan *dwc)
+{
+ u32 cfglo = channel_readl(dwc, CFG_LO);
+
+ channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
+
+ dwc->paused = false;
+}
+
static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
unsigned long arg)
{
@@ -1003,18 +1046,13 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
struct dw_dma *dw = to_dw_dma(chan->device);
struct dw_desc *desc, *_desc;
unsigned long flags;
- u32 cfglo;
LIST_HEAD(list);
if (cmd == DMA_PAUSE) {
spin_lock_irqsave(&dwc->lock, flags);
- cfglo = channel_readl(dwc, CFG_LO);
- channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
- while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY))
- cpu_relax();
+ dwc_chan_pause(dwc);
- dwc->paused = true;
spin_unlock_irqrestore(&dwc->lock, flags);
} else if (cmd == DMA_RESUME) {
if (!dwc->paused)
@@ -1022,9 +1060,7 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
spin_lock_irqsave(&dwc->lock, flags);
- cfglo = channel_readl(dwc, CFG_LO);
- channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
- dwc->paused = false;
+ dwc_chan_resume(dwc);
spin_unlock_irqrestore(&dwc->lock, flags);
} else if (cmd == DMA_TERMINATE_ALL) {
@@ -1034,7 +1070,7 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
dwc_chan_disable(dw, dwc);
- dwc->paused = false;
+ dwc_chan_resume(dwc);
/* active_list entries will end up before queued entries */
list_splice_init(&dwc->queue, &list);
@@ -1054,6 +1090,21 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
return 0;
}
+static inline u32 dwc_get_residue(struct dw_dma_chan *dwc)
+{
+ unsigned long flags;
+ u32 residue;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+
+ residue = dwc->residue;
+ if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags) && residue)
+ residue -= dwc_get_sent(dwc);
+
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ return residue;
+}
+
static enum dma_status
dwc_tx_status(struct dma_chan *chan,
dma_cookie_t cookie,
@@ -1070,7 +1121,7 @@ dwc_tx_status(struct dma_chan *chan,
}
if (ret != DMA_SUCCESS)
- dma_set_residue(txstate, dwc_first_active(dwc)->len);
+ dma_set_residue(txstate, dwc_get_residue(dwc));
if (dwc->paused)
return DMA_PAUSED;
@@ -1113,22 +1164,22 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
spin_lock_irqsave(&dwc->lock, flags);
i = dwc->descs_allocated;
while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) {
+ dma_addr_t phys;
+
spin_unlock_irqrestore(&dwc->lock, flags);
- desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL);
- if (!desc) {
- dev_info(chan2dev(chan),
- "only allocated %d descriptors\n", i);
- spin_lock_irqsave(&dwc->lock, flags);
- break;
- }
+ desc = dma_pool_alloc(dw->desc_pool, GFP_ATOMIC, &phys);
+ if (!desc)
+ goto err_desc_alloc;
+
+ memset(desc, 0, sizeof(struct dw_desc));
INIT_LIST_HEAD(&desc->tx_list);
dma_async_tx_descriptor_init(&desc->txd, chan);
desc->txd.tx_submit = dwc_tx_submit;
desc->txd.flags = DMA_CTRL_ACK;
- desc->txd.phys = dma_map_single(chan2parent(chan), &desc->lli,
- sizeof(desc->lli), DMA_TO_DEVICE);
+ desc->txd.phys = phys;
+
dwc_desc_put(dwc, desc);
spin_lock_irqsave(&dwc->lock, flags);
@@ -1140,6 +1191,11 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
return i;
+
+err_desc_alloc:
+ dev_info(chan2dev(chan), "only allocated %d descriptors\n", i);
+
+ return i;
}
static void dwc_free_chan_resources(struct dma_chan *chan)
@@ -1171,14 +1227,71 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
list_for_each_entry_safe(desc, _desc, &list, desc_node) {
dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
- dma_unmap_single(chan2parent(chan), desc->txd.phys,
- sizeof(desc->lli), DMA_TO_DEVICE);
- kfree(desc);
+ dma_pool_free(dw->desc_pool, desc, desc->txd.phys);
}
dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
}
+struct dw_dma_filter_args {
+ struct dw_dma *dw;
+ unsigned int req;
+ unsigned int src;
+ unsigned int dst;
+};
+
+static bool dw_dma_generic_filter(struct dma_chan *chan, void *param)
+{
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ struct dw_dma *dw = to_dw_dma(chan->device);
+ struct dw_dma_filter_args *fargs = param;
+ struct dw_dma_slave *dws = &dwc->slave;
+
+ /* ensure the device matches our channel */
+ if (chan->device != &fargs->dw->dma)
+ return false;
+
+ dws->dma_dev = dw->dma.dev;
+ dws->cfg_hi = ~0;
+ dws->cfg_lo = ~0;
+ dws->src_master = fargs->src;
+ dws->dst_master = fargs->dst;
+
+ dwc->request_line = fargs->req;
+
+ chan->private = dws;
+
+ return true;
+}
+
+static struct dma_chan *dw_dma_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct dw_dma *dw = ofdma->of_dma_data;
+ struct dw_dma_filter_args fargs = {
+ .dw = dw,
+ };
+ dma_cap_mask_t cap;
+
+ if (dma_spec->args_count != 3)
+ return NULL;
+
+ fargs.req = be32_to_cpup(dma_spec->args+0);
+ fargs.src = be32_to_cpup(dma_spec->args+1);
+ fargs.dst = be32_to_cpup(dma_spec->args+2);
+
+ if (WARN_ON(fargs.req >= DW_DMA_MAX_NR_REQUESTS ||
+ fargs.src >= dw->nr_masters ||
+ fargs.dst >= dw->nr_masters))
+ return NULL;
+
+ dma_cap_zero(cap);
+ dma_cap_set(DMA_SLAVE, cap);
+
+ /* TODO: there should be a simpler way to do this */
+ return dma_request_channel(cap, dw_dma_generic_filter, &fargs);
+}
+
/* --------------------- Cyclic DMA API extensions -------------------- */
/**
@@ -1298,6 +1411,11 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
retval = ERR_PTR(-EINVAL);
+ if (unlikely(!is_slave_direction(direction)))
+ goto out_err;
+
+ dwc->direction = direction;
+
if (direction == DMA_MEM_TO_DEV)
reg_width = __ffs(sconfig->dst_addr_width);
else
@@ -1312,8 +1430,6 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
goto out_err;
if (unlikely(buf_addr & ((1 << reg_width) - 1)))
goto out_err;
- if (unlikely(!(direction & (DMA_MEM_TO_DEV | DMA_DEV_TO_MEM))))
- goto out_err;
retval = ERR_PTR(-ENOMEM);
@@ -1371,20 +1487,14 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
desc->lli.ctlhi = (period_len >> reg_width);
cdesc->desc[i] = desc;
- if (last) {
+ if (last)
last->lli.llp = desc->txd.phys;
- dma_sync_single_for_device(chan2parent(chan),
- last->txd.phys, sizeof(last->lli),
- DMA_TO_DEVICE);
- }
last = desc;
}
/* lets make a cyclic list */
last->lli.llp = cdesc->desc[0]->txd.phys;
- dma_sync_single_for_device(chan2parent(chan), last->txd.phys,
- sizeof(last->lli), DMA_TO_DEVICE);
dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%llx len %zu "
"period %zu periods %d\n", (unsigned long long)buf_addr,
@@ -1462,6 +1572,60 @@ static void dw_dma_off(struct dw_dma *dw)
dw->chan[i].initialized = false;
}
+#ifdef CONFIG_OF
+static struct dw_dma_platform_data *
+dw_dma_parse_dt(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct dw_dma_platform_data *pdata;
+ u32 tmp, arr[4];
+
+ if (!np) {
+ dev_err(&pdev->dev, "Missing DT data\n");
+ return NULL;
+ }
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return NULL;
+
+ if (of_property_read_u32(np, "dma-channels", &pdata->nr_channels))
+ return NULL;
+
+ if (of_property_read_bool(np, "is_private"))
+ pdata->is_private = true;
+
+ if (!of_property_read_u32(np, "chan_allocation_order", &tmp))
+ pdata->chan_allocation_order = (unsigned char)tmp;
+
+ if (!of_property_read_u32(np, "chan_priority", &tmp))
+ pdata->chan_priority = tmp;
+
+ if (!of_property_read_u32(np, "block_size", &tmp))
+ pdata->block_size = tmp;
+
+ if (!of_property_read_u32(np, "dma-masters", &tmp)) {
+ if (tmp > 4)
+ return NULL;
+
+ pdata->nr_masters = tmp;
+ }
+
+ if (!of_property_read_u32_array(np, "data_width", arr,
+ pdata->nr_masters))
+ for (tmp = 0; tmp < pdata->nr_masters; tmp++)
+ pdata->data_width[tmp] = arr[tmp];
+
+ return pdata;
+}
+#else
+static inline struct dw_dma_platform_data *
+dw_dma_parse_dt(struct platform_device *pdev)
+{
+ return NULL;
+}
+#endif
+
static int dw_probe(struct platform_device *pdev)
{
struct dw_dma_platform_data *pdata;
@@ -1477,10 +1641,6 @@ static int dw_probe(struct platform_device *pdev)
int err;
int i;
- pdata = dev_get_platdata(&pdev->dev);
- if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS)
- return -EINVAL;
-
io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!io)
return -EINVAL;
@@ -1489,13 +1649,37 @@ static int dw_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- regs = devm_request_and_ioremap(&pdev->dev, io);
- if (!regs)
- return -EBUSY;
+ regs = devm_ioremap_resource(&pdev->dev, io);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ /* Apply default dma_mask if needed */
+ if (!pdev->dev.dma_mask) {
+ pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+ pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ }
dw_params = dma_read_byaddr(regs, DW_PARAMS);
autocfg = dw_params >> DW_PARAMS_EN & 0x1;
+ dev_dbg(&pdev->dev, "DW_PARAMS: 0x%08x\n", dw_params);
+
+ pdata = dev_get_platdata(&pdev->dev);
+ if (!pdata)
+ pdata = dw_dma_parse_dt(pdev);
+
+ if (!pdata && autocfg) {
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ /* Fill platform data with the default values */
+ pdata->is_private = true;
+ pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING;
+ pdata->chan_priority = CHAN_PRIORITY_ASCENDING;
+ } else if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS)
+ return -EINVAL;
+
if (autocfg)
nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 0x7) + 1;
else
@@ -1543,6 +1727,14 @@ static int dw_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dw);
+ /* create a pool of consistent memory blocks for hardware descriptors */
+ dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", &pdev->dev,
+ sizeof(struct dw_desc), 4, 0);
+ if (!dw->desc_pool) {
+ dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
+ return -ENOMEM;
+ }
+
tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
INIT_LIST_HEAD(&dw->dma.channels);
@@ -1574,7 +1766,7 @@ static int dw_probe(struct platform_device *pdev)
channel_clear_bit(dw, CH_EN, dwc->mask);
- dwc->dw = dw;
+ dwc->direction = DMA_TRANS_NONE;
/* hardware configuration */
if (autocfg) {
@@ -1583,6 +1775,9 @@ static int dw_probe(struct platform_device *pdev)
dwc_params = dma_read_byaddr(regs + r * sizeof(u32),
DWC_PARAMS);
+ dev_dbg(&pdev->dev, "DWC_PARAMS[%d]: 0x%08x\n", i,
+ dwc_params);
+
/* Decode maximum block size for given channel. The
* stored 4 bit value represents blocks from 0x00 for 3
* up to 0x0a for 4095. */
@@ -1626,11 +1821,19 @@ static int dw_probe(struct platform_device *pdev)
dma_writel(dw, CFG, DW_CFG_DMA_EN);
- printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n",
- dev_name(&pdev->dev), nr_channels);
+ dev_info(&pdev->dev, "DesignWare DMA Controller, %d channels\n",
+ nr_channels);
dma_async_device_register(&dw->dma);
+ if (pdev->dev.of_node) {
+ err = of_dma_controller_register(pdev->dev.of_node,
+ dw_dma_xlate, dw);
+ if (err && err != -ENODEV)
+ dev_err(&pdev->dev,
+ "could not register of_dma_controller\n");
+ }
+
return 0;
}
@@ -1639,6 +1842,8 @@ static int dw_remove(struct platform_device *pdev)
struct dw_dma *dw = platform_get_drvdata(pdev);
struct dw_dma_chan *dwc, *_dwc;
+ if (pdev->dev.of_node)
+ of_dma_controller_free(pdev->dev.of_node);
dw_dma_off(dw);
dma_async_device_unregister(&dw->dma);
@@ -1657,7 +1862,7 @@ static void dw_shutdown(struct platform_device *pdev)
{
struct dw_dma *dw = platform_get_drvdata(pdev);
- dw_dma_off(platform_get_drvdata(pdev));
+ dw_dma_off(dw);
clk_disable_unprepare(dw->clk);
}
@@ -1666,7 +1871,7 @@ static int dw_suspend_noirq(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct dw_dma *dw = platform_get_drvdata(pdev);
- dw_dma_off(platform_get_drvdata(pdev));
+ dw_dma_off(dw);
clk_disable_unprepare(dw->clk);
return 0;
@@ -1679,6 +1884,7 @@ static int dw_resume_noirq(struct device *dev)
clk_prepare_enable(dw->clk);
dma_writel(dw, CFG, DW_CFG_DMA_EN);
+
return 0;
}
@@ -1699,7 +1905,13 @@ static const struct of_device_id dw_dma_id_table[] = {
MODULE_DEVICE_TABLE(of, dw_dma_id_table);
#endif
+static const struct platform_device_id dw_dma_ids[] = {
+ { "INTL9C60", 0 },
+ { }
+};
+
static struct platform_driver dw_driver = {
+ .probe = dw_probe,
.remove = dw_remove,
.shutdown = dw_shutdown,
.driver = {
@@ -1707,11 +1919,12 @@ static struct platform_driver dw_driver = {
.pm = &dw_dev_pm_ops,
.of_match_table = of_match_ptr(dw_dma_id_table),
},
+ .id_table = dw_dma_ids,
};
static int __init dw_init(void)
{
- return platform_driver_probe(&dw_driver, dw_probe);
+ return platform_driver_register(&dw_driver);
}
subsys_initcall(dw_init);
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h
index 8896559..cf0ce5c 100644
--- a/drivers/dma/dw_dmac_regs.h
+++ b/drivers/dma/dw_dmac_regs.h
@@ -9,9 +9,11 @@
* published by the Free Software Foundation.
*/
+#include <linux/dmaengine.h>
#include <linux/dw_dmac.h>
#define DW_DMA_MAX_NR_CHANNELS 8
+#define DW_DMA_MAX_NR_REQUESTS 16
/* flow controller */
enum dw_dma_fc {
@@ -184,15 +186,15 @@ enum dw_dmac_flags {
};
struct dw_dma_chan {
- struct dma_chan chan;
- void __iomem *ch_regs;
- u8 mask;
- u8 priority;
- bool paused;
- bool initialized;
+ struct dma_chan chan;
+ void __iomem *ch_regs;
+ u8 mask;
+ u8 priority;
+ enum dma_transfer_direction direction;
+ bool paused;
+ bool initialized;
/* software emulation of the LLP transfers */
- struct list_head *tx_list;
struct list_head *tx_node_active;
spinlock_t lock;
@@ -202,6 +204,7 @@ struct dw_dma_chan {
struct list_head active_list;
struct list_head queue;
struct list_head free_list;
+ u32 residue;
struct dw_cyclic_desc *cdesc;
unsigned int descs_allocated;
@@ -209,12 +212,11 @@ struct dw_dma_chan {
/* hardware configuration */
unsigned int block_size;
bool nollp;
+ unsigned int request_line;
+ struct dw_dma_slave slave;
/* configuration passed via DMA_SLAVE_CONFIG */
struct dma_slave_config dma_sconfig;
-
- /* backlink to dw_dma */
- struct dw_dma *dw;
};
static inline struct dw_dma_chan_regs __iomem *
@@ -236,6 +238,7 @@ static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan)
struct dw_dma {
struct dma_device dma;
void __iomem *regs;
+ struct dma_pool *desc_pool;
struct tasklet_struct tasklet;
struct clk *clk;
@@ -293,8 +296,11 @@ struct dw_desc {
struct list_head tx_list;
struct dma_async_tx_descriptor txd;
size_t len;
+ size_t total_len;
};
+#define to_dw_desc(h) list_entry(h, struct dw_desc, desc_node)
+
static inline struct dw_desc *
txd_to_dw_desc(struct dma_async_tx_descriptor *txd)
{
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index f424298..cd7e328 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -69,9 +69,7 @@ struct edma_chan {
int ch_num;
bool alloced;
int slot[EDMA_MAX_SLOTS];
- dma_addr_t addr;
- int addr_width;
- int maxburst;
+ struct dma_slave_config cfg;
};
struct edma_cc {
@@ -178,29 +176,14 @@ static int edma_terminate_all(struct edma_chan *echan)
return 0;
}
-
static int edma_slave_config(struct edma_chan *echan,
- struct dma_slave_config *config)
+ struct dma_slave_config *cfg)
{
- if ((config->src_addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES) ||
- (config->dst_addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES))
+ if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
+ cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
return -EINVAL;
- if (config->direction == DMA_MEM_TO_DEV) {
- if (config->dst_addr)
- echan->addr = config->dst_addr;
- if (config->dst_addr_width)
- echan->addr_width = config->dst_addr_width;
- if (config->dst_maxburst)
- echan->maxburst = config->dst_maxburst;
- } else if (config->direction == DMA_DEV_TO_MEM) {
- if (config->src_addr)
- echan->addr = config->src_addr;
- if (config->src_addr_width)
- echan->addr_width = config->src_addr_width;
- if (config->src_maxburst)
- echan->maxburst = config->src_maxburst;
- }
+ memcpy(&echan->cfg, cfg, sizeof(echan->cfg));
return 0;
}
@@ -235,6 +218,9 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
struct edma_chan *echan = to_edma_chan(chan);
struct device *dev = chan->device->dev;
struct edma_desc *edesc;
+ dma_addr_t dev_addr;
+ enum dma_slave_buswidth dev_width;
+ u32 burst;
struct scatterlist *sg;
int i;
int acnt, bcnt, ccnt, src, dst, cidx;
@@ -243,7 +229,20 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
if (unlikely(!echan || !sgl || !sg_len))
return NULL;
- if (echan->addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
+ if (direction == DMA_DEV_TO_MEM) {
+ dev_addr = echan->cfg.src_addr;
+ dev_width = echan->cfg.src_addr_width;
+ burst = echan->cfg.src_maxburst;
+ } else if (direction == DMA_MEM_TO_DEV) {
+ dev_addr = echan->cfg.dst_addr;
+ dev_width = echan->cfg.dst_addr_width;
+ burst = echan->cfg.dst_maxburst;
+ } else {
+ dev_err(dev, "%s: bad direction?\n", __func__);
+ return NULL;
+ }
+
+ if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
dev_err(dev, "Undefined slave buswidth\n");
return NULL;
}
@@ -275,14 +274,14 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
}
}
- acnt = echan->addr_width;
+ acnt = dev_width;
/*
* If the maxburst is equal to the fifo width, use
* A-synced transfers. This allows for large contiguous
* buffer transfers using only one PaRAM set.
*/
- if (echan->maxburst == 1) {
+ if (burst == 1) {
edesc->absync = false;
ccnt = sg_dma_len(sg) / acnt / (SZ_64K - 1);
bcnt = sg_dma_len(sg) / acnt - ccnt * (SZ_64K - 1);
@@ -302,7 +301,7 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
*/
} else {
edesc->absync = true;
- bcnt = echan->maxburst;
+ bcnt = burst;
ccnt = sg_dma_len(sg) / (acnt * bcnt);
if (ccnt > (SZ_64K - 1)) {
dev_err(dev, "Exceeded max SG segment size\n");
@@ -313,13 +312,13 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
if (direction == DMA_MEM_TO_DEV) {
src = sg_dma_address(sg);
- dst = echan->addr;
+ dst = dev_addr;
src_bidx = acnt;
src_cidx = cidx;
dst_bidx = 0;
dst_cidx = 0;
} else {
- src = echan->addr;
+ src = dev_addr;
dst = sg_dma_address(sg);
src_bidx = 0;
src_cidx = 0;
@@ -621,13 +620,11 @@ static struct platform_device *pdev0, *pdev1;
static const struct platform_device_info edma_dev_info0 = {
.name = "edma-dma-engine",
.id = 0,
- .dma_mask = DMA_BIT_MASK(32),
};
static const struct platform_device_info edma_dev_info1 = {
.name = "edma-dma-engine",
.id = 1,
- .dma_mask = DMA_BIT_MASK(32),
};
static int edma_init(void)
@@ -641,6 +638,8 @@ static int edma_init(void)
ret = PTR_ERR(pdev0);
goto out;
}
+ pdev0->dev.dma_mask = &pdev0->dev.coherent_dma_mask;
+ pdev0->dev.coherent_dma_mask = DMA_BIT_MASK(32);
}
if (EDMA_CTLRS == 2) {
@@ -650,6 +649,8 @@ static int edma_init(void)
platform_device_unregister(pdev0);
ret = PTR_ERR(pdev1);
}
+ pdev1->dev.dma_mask = &pdev1->dev.coherent_dma_mask;
+ pdev1->dev.coherent_dma_mask = DMA_BIT_MASK(32);
}
out:
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index bcfde40..f2bf8c0 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -903,8 +903,7 @@ static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
switch (data->port) {
case EP93XX_DMA_SSP:
case EP93XX_DMA_IDE:
- if (data->direction != DMA_MEM_TO_DEV &&
- data->direction != DMA_DEV_TO_MEM)
+ if (!is_slave_direction(data->direction))
return -EINVAL;
break;
default:
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index a7dcf78..70b8975 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -14,6 +14,7 @@
* http://www.opensource.org/licenses/gpl-license.html
* http://www.gnu.org/copyleft/gpl.html
*/
+#include <linux/err.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/mm.h>
@@ -1010,9 +1011,9 @@ static int __init imxdma_probe(struct platform_device *pdev)
imxdma->devtype = pdev->id_entry->driver_data;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- imxdma->base = devm_request_and_ioremap(&pdev->dev, res);
- if (!imxdma->base)
- return -EADDRNOTAVAIL;
+ imxdma->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(imxdma->base))
+ return PTR_ERR(imxdma->base);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 1a68a8b..1879a59 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -833,14 +833,14 @@ int ioat_dma_self_test(struct ioatdma_device *device)
dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
- flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_DEST_UNMAP_SINGLE |
+ flags = DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP |
DMA_PREP_INTERRUPT;
tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
IOAT_TEST_SIZE, flags);
if (!tx) {
dev_err(dev, "Self-test prep failed, disabling\n");
err = -ENODEV;
- goto free_resources;
+ goto unmap_dma;
}
async_tx_ack(tx);
@@ -851,7 +851,7 @@ int ioat_dma_self_test(struct ioatdma_device *device)
if (cookie < 0) {
dev_err(dev, "Self-test setup failed, disabling\n");
err = -ENODEV;
- goto free_resources;
+ goto unmap_dma;
}
dma->device_issue_pending(dma_chan);
@@ -862,7 +862,7 @@ int ioat_dma_self_test(struct ioatdma_device *device)
!= DMA_SUCCESS) {
dev_err(dev, "Self-test copy timed out, disabling\n");
err = -ENODEV;
- goto free_resources;
+ goto unmap_dma;
}
if (memcmp(src, dest, IOAT_TEST_SIZE)) {
dev_err(dev, "Self-test copy failed compare, disabling\n");
@@ -870,6 +870,9 @@ int ioat_dma_self_test(struct ioatdma_device *device)
goto free_resources;
}
+unmap_dma:
+ dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
+ dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
free_resources:
dma->device_free_chan_resources(dma_chan);
out:
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 087935f..53a4cbb 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -97,6 +97,7 @@ struct ioat_chan_common {
#define IOAT_KOBJ_INIT_FAIL 3
#define IOAT_RESHAPE_PENDING 4
#define IOAT_RUN 5
+ #define IOAT_CHAN_ACTIVE 6
struct timer_list timer;
#define COMPLETION_TIMEOUT msecs_to_jiffies(100)
#define IDLE_TIMEOUT msecs_to_jiffies(2000)
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index 82d4e30..b925e1b 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -269,61 +269,22 @@ static void ioat2_restart_channel(struct ioat2_dma_chan *ioat)
__ioat2_restart_chan(ioat);
}
-void ioat2_timer_event(unsigned long data)
+static void check_active(struct ioat2_dma_chan *ioat)
{
- struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
struct ioat_chan_common *chan = &ioat->base;
- if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
- dma_addr_t phys_complete;
- u64 status;
-
- status = ioat_chansts(chan);
-
- /* when halted due to errors check for channel
- * programming errors before advancing the completion state
- */
- if (is_ioat_halted(status)) {
- u32 chanerr;
-
- chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
- dev_err(to_dev(chan), "%s: Channel halted (%x)\n",
- __func__, chanerr);
- if (test_bit(IOAT_RUN, &chan->state))
- BUG_ON(is_ioat_bug(chanerr));
- else /* we never got off the ground */
- return;
- }
-
- /* if we haven't made progress and we have already
- * acknowledged a pending completion once, then be more
- * forceful with a restart
- */
- spin_lock_bh(&chan->cleanup_lock);
- if (ioat_cleanup_preamble(chan, &phys_complete)) {
- __cleanup(ioat, phys_complete);
- } else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) {
- spin_lock_bh(&ioat->prep_lock);
- ioat2_restart_channel(ioat);
- spin_unlock_bh(&ioat->prep_lock);
- } else {
- set_bit(IOAT_COMPLETION_ACK, &chan->state);
- mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
- }
- spin_unlock_bh(&chan->cleanup_lock);
- } else {
- u16 active;
+ if (ioat2_ring_active(ioat)) {
+ mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
+ return;
+ }
+ if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &chan->state))
+ mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
+ else if (ioat->alloc_order > ioat_get_alloc_order()) {
/* if the ring is idle, empty, and oversized try to step
* down the size
*/
- spin_lock_bh(&chan->cleanup_lock);
- spin_lock_bh(&ioat->prep_lock);
- active = ioat2_ring_active(ioat);
- if (active == 0 && ioat->alloc_order > ioat_get_alloc_order())
- reshape_ring(ioat, ioat->alloc_order-1);
- spin_unlock_bh(&ioat->prep_lock);
- spin_unlock_bh(&chan->cleanup_lock);
+ reshape_ring(ioat, ioat->alloc_order - 1);
/* keep shrinking until we get back to our minimum
* default size
@@ -331,6 +292,60 @@ void ioat2_timer_event(unsigned long data)
if (ioat->alloc_order > ioat_get_alloc_order())
mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
}
+
+}
+
+void ioat2_timer_event(unsigned long data)
+{
+ struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
+ struct ioat_chan_common *chan = &ioat->base;
+ dma_addr_t phys_complete;
+ u64 status;
+
+ status = ioat_chansts(chan);
+
+ /* when halted due to errors check for channel
+ * programming errors before advancing the completion state
+ */
+ if (is_ioat_halted(status)) {
+ u32 chanerr;
+
+ chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
+ dev_err(to_dev(chan), "%s: Channel halted (%x)\n",
+ __func__, chanerr);
+ if (test_bit(IOAT_RUN, &chan->state))
+ BUG_ON(is_ioat_bug(chanerr));
+ else /* we never got off the ground */
+ return;
+ }
+
+ /* if we haven't made progress and we have already
+ * acknowledged a pending completion once, then be more
+ * forceful with a restart
+ */
+ spin_lock_bh(&chan->cleanup_lock);
+ if (ioat_cleanup_preamble(chan, &phys_complete))
+ __cleanup(ioat, phys_complete);
+ else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) {
+ spin_lock_bh(&ioat->prep_lock);
+ ioat2_restart_channel(ioat);
+ spin_unlock_bh(&ioat->prep_lock);
+ spin_unlock_bh(&chan->cleanup_lock);
+ return;
+ } else {
+ set_bit(IOAT_COMPLETION_ACK, &chan->state);
+ mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
+ }
+
+
+ if (ioat2_ring_active(ioat))
+ mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
+ else {
+ spin_lock_bh(&ioat->prep_lock);
+ check_active(ioat);
+ spin_unlock_bh(&ioat->prep_lock);
+ }
+ spin_unlock_bh(&chan->cleanup_lock);
}
static int ioat2_reset_hw(struct ioat_chan_common *chan)
@@ -404,7 +419,7 @@ static dma_cookie_t ioat2_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
cookie = dma_cookie_assign(tx);
dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie);
- if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state))
+ if (!test_and_set_bit(IOAT_CHAN_ACTIVE, &chan->state))
mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
/* make descriptor updates visible before advancing ioat->head,
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index 3e9d669..e8336cc 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -342,61 +342,22 @@ static void ioat3_restart_channel(struct ioat2_dma_chan *ioat)
__ioat2_restart_chan(ioat);
}
-static void ioat3_timer_event(unsigned long data)
+static void check_active(struct ioat2_dma_chan *ioat)
{
- struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
struct ioat_chan_common *chan = &ioat->base;
- if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
- dma_addr_t phys_complete;
- u64 status;
-
- status = ioat_chansts(chan);
-
- /* when halted due to errors check for channel
- * programming errors before advancing the completion state
- */
- if (is_ioat_halted(status)) {
- u32 chanerr;
-
- chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
- dev_err(to_dev(chan), "%s: Channel halted (%x)\n",
- __func__, chanerr);
- if (test_bit(IOAT_RUN, &chan->state))
- BUG_ON(is_ioat_bug(chanerr));
- else /* we never got off the ground */
- return;
- }
-
- /* if we haven't made progress and we have already
- * acknowledged a pending completion once, then be more
- * forceful with a restart
- */
- spin_lock_bh(&chan->cleanup_lock);
- if (ioat_cleanup_preamble(chan, &phys_complete))
- __cleanup(ioat, phys_complete);
- else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) {
- spin_lock_bh(&ioat->prep_lock);
- ioat3_restart_channel(ioat);
- spin_unlock_bh(&ioat->prep_lock);
- } else {
- set_bit(IOAT_COMPLETION_ACK, &chan->state);
- mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
- }
- spin_unlock_bh(&chan->cleanup_lock);
- } else {
- u16 active;
+ if (ioat2_ring_active(ioat)) {
+ mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
+ return;
+ }
+ if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &chan->state))
+ mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
+ else if (ioat->alloc_order > ioat_get_alloc_order()) {
/* if the ring is idle, empty, and oversized try to step
* down the size
*/
- spin_lock_bh(&chan->cleanup_lock);
- spin_lock_bh(&ioat->prep_lock);
- active = ioat2_ring_active(ioat);
- if (active == 0 && ioat->alloc_order > ioat_get_alloc_order())
- reshape_ring(ioat, ioat->alloc_order-1);
- spin_unlock_bh(&ioat->prep_lock);
- spin_unlock_bh(&chan->cleanup_lock);
+ reshape_ring(ioat, ioat->alloc_order - 1);
/* keep shrinking until we get back to our minimum
* default size
@@ -404,6 +365,60 @@ static void ioat3_timer_event(unsigned long data)
if (ioat->alloc_order > ioat_get_alloc_order())
mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
}
+
+}
+
+static void ioat3_timer_event(unsigned long data)
+{
+ struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
+ struct ioat_chan_common *chan = &ioat->base;
+ dma_addr_t phys_complete;
+ u64 status;
+
+ status = ioat_chansts(chan);
+
+ /* when halted due to errors check for channel
+ * programming errors before advancing the completion state
+ */
+ if (is_ioat_halted(status)) {
+ u32 chanerr;
+
+ chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
+ dev_err(to_dev(chan), "%s: Channel halted (%x)\n",
+ __func__, chanerr);
+ if (test_bit(IOAT_RUN, &chan->state))
+ BUG_ON(is_ioat_bug(chanerr));
+ else /* we never got off the ground */
+ return;
+ }
+
+ /* if we haven't made progress and we have already
+ * acknowledged a pending completion once, then be more
+ * forceful with a restart
+ */
+ spin_lock_bh(&chan->cleanup_lock);
+ if (ioat_cleanup_preamble(chan, &phys_complete))
+ __cleanup(ioat, phys_complete);
+ else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) {
+ spin_lock_bh(&ioat->prep_lock);
+ ioat3_restart_channel(ioat);
+ spin_unlock_bh(&ioat->prep_lock);
+ spin_unlock_bh(&chan->cleanup_lock);
+ return;
+ } else {
+ set_bit(IOAT_COMPLETION_ACK, &chan->state);
+ mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
+ }
+
+
+ if (ioat2_ring_active(ioat))
+ mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
+ else {
+ spin_lock_bh(&ioat->prep_lock);
+ check_active(ioat);
+ spin_unlock_bh(&ioat->prep_lock);
+ }
+ spin_unlock_bh(&chan->cleanup_lock);
}
static enum dma_status
@@ -863,6 +878,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
unsigned long tmo;
struct device *dev = &device->pdev->dev;
struct dma_device *dma = &device->common;
+ u8 op = 0;
dev_dbg(dev, "%s\n", __func__);
@@ -908,18 +924,22 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
}
/* test xor */
+ op = IOAT_OP_XOR;
+
dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE);
for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE,
DMA_TO_DEVICE);
tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
IOAT_NUM_SRC_TEST, PAGE_SIZE,
- DMA_PREP_INTERRUPT);
+ DMA_PREP_INTERRUPT |
+ DMA_COMPL_SKIP_SRC_UNMAP |
+ DMA_COMPL_SKIP_DEST_UNMAP);
if (!tx) {
dev_err(dev, "Self-test xor prep failed\n");
err = -ENODEV;
- goto free_resources;
+ goto dma_unmap;
}
async_tx_ack(tx);
@@ -930,7 +950,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
if (cookie < 0) {
dev_err(dev, "Self-test xor setup failed\n");
err = -ENODEV;
- goto free_resources;
+ goto dma_unmap;
}
dma->device_issue_pending(dma_chan);
@@ -939,9 +959,13 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
dev_err(dev, "Self-test xor timed out\n");
err = -ENODEV;
- goto free_resources;
+ goto dma_unmap;
}
+ dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
+ for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
+ dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
+
dma_sync_single_for_cpu(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
u32 *ptr = page_address(dest);
@@ -957,6 +981,8 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
goto free_resources;
+ op = IOAT_OP_XOR_VAL;
+
/* validate the sources with the destintation page */
for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
xor_val_srcs[i] = xor_srcs[i];
@@ -969,11 +995,13 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
DMA_TO_DEVICE);
tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
- &xor_val_result, DMA_PREP_INTERRUPT);
+ &xor_val_result, DMA_PREP_INTERRUPT |
+ DMA_COMPL_SKIP_SRC_UNMAP |
+ DMA_COMPL_SKIP_DEST_UNMAP);
if (!tx) {
dev_err(dev, "Self-test zero prep failed\n");
err = -ENODEV;
- goto free_resources;
+ goto dma_unmap;
}
async_tx_ack(tx);
@@ -984,7 +1012,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
if (cookie < 0) {
dev_err(dev, "Self-test zero setup failed\n");
err = -ENODEV;
- goto free_resources;
+ goto dma_unmap;
}
dma->device_issue_pending(dma_chan);
@@ -993,9 +1021,12 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
dev_err(dev, "Self-test validate timed out\n");
err = -ENODEV;
- goto free_resources;
+ goto dma_unmap;
}
+ for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
+ dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
+
if (xor_val_result != 0) {
dev_err(dev, "Self-test validate failed compare\n");
err = -ENODEV;
@@ -1007,14 +1038,18 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
goto free_resources;
/* test memset */
+ op = IOAT_OP_FILL;
+
dma_addr = dma_map_page(dev, dest, 0,
PAGE_SIZE, DMA_FROM_DEVICE);
tx = dma->device_prep_dma_memset(dma_chan, dma_addr, 0, PAGE_SIZE,
- DMA_PREP_INTERRUPT);
+ DMA_PREP_INTERRUPT |
+ DMA_COMPL_SKIP_SRC_UNMAP |
+ DMA_COMPL_SKIP_DEST_UNMAP);
if (!tx) {
dev_err(dev, "Self-test memset prep failed\n");
err = -ENODEV;
- goto free_resources;
+ goto dma_unmap;
}
async_tx_ack(tx);
@@ -1025,7 +1060,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
if (cookie < 0) {
dev_err(dev, "Self-test memset setup failed\n");
err = -ENODEV;
- goto free_resources;
+ goto dma_unmap;
}
dma->device_issue_pending(dma_chan);
@@ -1034,9 +1069,11 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
dev_err(dev, "Self-test memset timed out\n");
err = -ENODEV;
- goto free_resources;
+ goto dma_unmap;
}
+ dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
+
for (i = 0; i < PAGE_SIZE/sizeof(u32); i++) {
u32 *ptr = page_address(dest);
if (ptr[i]) {
@@ -1047,17 +1084,21 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
}
/* test for non-zero parity sum */
+ op = IOAT_OP_XOR_VAL;
+
xor_val_result = 0;
for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
DMA_TO_DEVICE);
tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
- &xor_val_result, DMA_PREP_INTERRUPT);
+ &xor_val_result, DMA_PREP_INTERRUPT |
+ DMA_COMPL_SKIP_SRC_UNMAP |
+ DMA_COMPL_SKIP_DEST_UNMAP);
if (!tx) {
dev_err(dev, "Self-test 2nd zero prep failed\n");
err = -ENODEV;
- goto free_resources;
+ goto dma_unmap;
}
async_tx_ack(tx);
@@ -1068,7 +1109,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
if (cookie < 0) {
dev_err(dev, "Self-test 2nd zero setup failed\n");
err = -ENODEV;
- goto free_resources;
+ goto dma_unmap;
}
dma->device_issue_pending(dma_chan);
@@ -1077,15 +1118,31 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
dev_err(dev, "Self-test 2nd validate timed out\n");
err = -ENODEV;
- goto free_resources;
+ goto dma_unmap;
}
if (xor_val_result != SUM_CHECK_P_RESULT) {
dev_err(dev, "Self-test validate failed compare\n");
err = -ENODEV;
- goto free_resources;
+ goto dma_unmap;
}
+ for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
+ dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
+
+ goto free_resources;
+dma_unmap:
+ if (op == IOAT_OP_XOR) {
+ dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
+ for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
+ dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
+ DMA_TO_DEVICE);
+ } else if (op == IOAT_OP_XOR_VAL) {
+ for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
+ dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
+ DMA_TO_DEVICE);
+ } else if (op == IOAT_OP_FILL)
+ dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
free_resources:
dma->device_free_chan_resources(dma_chan);
out:
@@ -1126,12 +1183,7 @@ static int ioat3_reset_hw(struct ioat_chan_common *chan)
chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
- /* -= IOAT ver.3 workarounds =- */
- /* Write CHANERRMSK_INT with 3E07h to mask out the errors
- * that can cause stability issues for IOAT ver.3, and clear any
- * pending errors
- */
- pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07);
+ /* clear any pending errors */
err = pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
if (err) {
dev_err(&pdev->dev, "channel error register unreachable\n");
@@ -1187,6 +1239,26 @@ static bool is_snb_ioat(struct pci_dev *pdev)
}
}
+static bool is_ivb_ioat(struct pci_dev *pdev)
+{
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB0:
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB1:
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB2:
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB3:
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB4:
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB5:
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB6:
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB7:
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB8:
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB9:
+ return true;
+ default:
+ return false;
+ }
+
+}
+
int ioat3_dma_probe(struct ioatdma_device *device, int dca)
{
struct pci_dev *pdev = device->pdev;
@@ -1207,7 +1279,7 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
dma->device_free_chan_resources = ioat2_free_chan_resources;
- if (is_jf_ioat(pdev) || is_snb_ioat(pdev))
+ if (is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev))
dma->copy_align = 6;
dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h
index d2ff3fd..7cb74c6 100644
--- a/drivers/dma/ioat/hw.h
+++ b/drivers/dma/ioat/hw.h
@@ -35,6 +35,17 @@
#define IOAT_VER_3_0 0x30 /* Version 3.0 */
#define IOAT_VER_3_2 0x32 /* Version 3.2 */
+#define PCI_DEVICE_ID_INTEL_IOAT_IVB0 0x0e20
+#define PCI_DEVICE_ID_INTEL_IOAT_IVB1 0x0e21
+#define PCI_DEVICE_ID_INTEL_IOAT_IVB2 0x0e22
+#define PCI_DEVICE_ID_INTEL_IOAT_IVB3 0x0e23
+#define PCI_DEVICE_ID_INTEL_IOAT_IVB4 0x0e24
+#define PCI_DEVICE_ID_INTEL_IOAT_IVB5 0x0e25
+#define PCI_DEVICE_ID_INTEL_IOAT_IVB6 0x0e26
+#define PCI_DEVICE_ID_INTEL_IOAT_IVB7 0x0e27
+#define PCI_DEVICE_ID_INTEL_IOAT_IVB8 0x0e2e
+#define PCI_DEVICE_ID_INTEL_IOAT_IVB9 0x0e2f
+
int system_has_dca_enabled(struct pci_dev *pdev);
struct ioat_dma_descriptor {
diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c
index 4f686c5..71c7ecd 100644
--- a/drivers/dma/ioat/pci.c
+++ b/drivers/dma/ioat/pci.c
@@ -40,17 +40,6 @@ MODULE_VERSION(IOAT_DMA_VERSION);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Intel Corporation");
-#define PCI_DEVICE_ID_INTEL_IOAT_IVB0 0x0e20
-#define PCI_DEVICE_ID_INTEL_IOAT_IVB1 0x0e21
-#define PCI_DEVICE_ID_INTEL_IOAT_IVB2 0x0e22
-#define PCI_DEVICE_ID_INTEL_IOAT_IVB3 0x0e23
-#define PCI_DEVICE_ID_INTEL_IOAT_IVB4 0x0e24
-#define PCI_DEVICE_ID_INTEL_IOAT_IVB5 0x0e25
-#define PCI_DEVICE_ID_INTEL_IOAT_IVB6 0x0e26
-#define PCI_DEVICE_ID_INTEL_IOAT_IVB7 0x0e27
-#define PCI_DEVICE_ID_INTEL_IOAT_IVB8 0x0e2e
-#define PCI_DEVICE_ID_INTEL_IOAT_IVB9 0x0e2f
-
static struct pci_device_id ioat_pci_tbl[] = {
/* I/OAT v1 platforms */
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT) },
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index eacb8be..7dafb9f 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -936,7 +936,7 @@ static irqreturn_t iop_adma_err_handler(int irq, void *data)
struct iop_adma_chan *chan = data;
unsigned long status = iop_chan_get_status(chan);
- dev_printk(KERN_ERR, chan->device->common.dev,
+ dev_err(chan->device->common.dev,
"error ( %s%s%s%s%s%s%s)\n",
iop_is_err_int_parity(status, chan) ? "int_parity " : "",
iop_is_err_mcu_abort(status, chan) ? "mcu_abort " : "",
@@ -1017,7 +1017,7 @@ static int iop_adma_memcpy_self_test(struct iop_adma_device *device)
if (iop_adma_status(dma_chan, cookie, NULL) !=
DMA_SUCCESS) {
- dev_printk(KERN_ERR, dma_chan->device->dev,
+ dev_err(dma_chan->device->dev,
"Self-test copy timed out, disabling\n");
err = -ENODEV;
goto free_resources;
@@ -1027,7 +1027,7 @@ static int iop_adma_memcpy_self_test(struct iop_adma_device *device)
dma_sync_single_for_cpu(&iop_chan->device->pdev->dev, dest_dma,
IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE);
if (memcmp(src, dest, IOP_ADMA_TEST_SIZE)) {
- dev_printk(KERN_ERR, dma_chan->device->dev,
+ dev_err(dma_chan->device->dev,
"Self-test copy failed compare, disabling\n");
err = -ENODEV;
goto free_resources;
@@ -1117,7 +1117,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
if (iop_adma_status(dma_chan, cookie, NULL) !=
DMA_SUCCESS) {
- dev_printk(KERN_ERR, dma_chan->device->dev,
+ dev_err(dma_chan->device->dev,
"Self-test xor timed out, disabling\n");
err = -ENODEV;
goto free_resources;
@@ -1129,7 +1129,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
u32 *ptr = page_address(dest);
if (ptr[i] != cmp_word) {
- dev_printk(KERN_ERR, dma_chan->device->dev,
+ dev_err(dma_chan->device->dev,
"Self-test xor failed compare, disabling\n");
err = -ENODEV;
goto free_resources;
@@ -1163,14 +1163,14 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
msleep(8);
if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
- dev_printk(KERN_ERR, dma_chan->device->dev,
+ dev_err(dma_chan->device->dev,
"Self-test zero sum timed out, disabling\n");
err = -ENODEV;
goto free_resources;
}
if (zero_sum_result != 0) {
- dev_printk(KERN_ERR, dma_chan->device->dev,
+ dev_err(dma_chan->device->dev,
"Self-test zero sum failed compare, disabling\n");
err = -ENODEV;
goto free_resources;
@@ -1187,7 +1187,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
msleep(8);
if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
- dev_printk(KERN_ERR, dma_chan->device->dev,
+ dev_err(dma_chan->device->dev,
"Self-test memset timed out, disabling\n");
err = -ENODEV;
goto free_resources;
@@ -1196,7 +1196,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
for (i = 0; i < PAGE_SIZE/sizeof(u32); i++) {
u32 *ptr = page_address(dest);
if (ptr[i]) {
- dev_printk(KERN_ERR, dma_chan->device->dev,
+ dev_err(dma_chan->device->dev,
"Self-test memset failed compare, disabling\n");
err = -ENODEV;
goto free_resources;
@@ -1219,14 +1219,14 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
msleep(8);
if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
- dev_printk(KERN_ERR, dma_chan->device->dev,
+ dev_err(dma_chan->device->dev,
"Self-test non-zero sum timed out, disabling\n");
err = -ENODEV;
goto free_resources;
}
if (zero_sum_result != 1) {
- dev_printk(KERN_ERR, dma_chan->device->dev,
+ dev_err(dma_chan->device->dev,
"Self-test non-zero sum failed compare, disabling\n");
err = -ENODEV;
goto free_resources;
@@ -1579,15 +1579,14 @@ static int iop_adma_probe(struct platform_device *pdev)
goto err_free_iop_chan;
}
- dev_printk(KERN_INFO, &pdev->dev, "Intel(R) IOP: "
- "( %s%s%s%s%s%s%s)\n",
- dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "pq " : "",
- dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask) ? "pq_val " : "",
- dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
- dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask) ? "xor_val " : "",
- dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "",
- dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
- dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
+ dev_info(&pdev->dev, "Intel(R) IOP: ( %s%s%s%s%s%s%s)\n",
+ dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "pq " : "",
+ dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask) ? "pq_val " : "",
+ dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
+ dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask) ? "xor_val " : "",
+ dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "",
+ dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
+ dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
dma_async_device_register(dma_dev);
goto out;
@@ -1651,8 +1650,8 @@ static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan)
/* run the descriptor */
iop_chan_enable(iop_chan);
} else
- dev_printk(KERN_ERR, iop_chan->device->common.dev,
- "failed to allocate null descriptor\n");
+ dev_err(iop_chan->device->common.dev,
+ "failed to allocate null descriptor\n");
spin_unlock_bh(&iop_chan->lock);
}
@@ -1704,7 +1703,7 @@ static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan)
/* run the descriptor */
iop_chan_enable(iop_chan);
} else
- dev_printk(KERN_ERR, iop_chan->device->common.dev,
+ dev_err(iop_chan->device->common.dev,
"failed to allocate null descriptor\n");
spin_unlock_bh(&iop_chan->lock);
}
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index 6585537..8c61d17 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -1347,7 +1347,7 @@ static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan
chan->chan_id != IDMAC_IC_7)
return NULL;
- if (direction != DMA_DEV_TO_MEM && direction != DMA_MEM_TO_DEV) {
+ if (!is_slave_direction(direction)) {
dev_err(chan->device->dev, "Invalid DMA direction %d!\n", direction);
return NULL;
}
diff --git a/drivers/dma/ipu/ipu_irq.c b/drivers/dma/ipu/ipu_irq.c
index a5ee37d..2e284a4 100644
--- a/drivers/dma/ipu/ipu_irq.c
+++ b/drivers/dma/ipu/ipu_irq.c
@@ -44,7 +44,6 @@ static void ipu_write_reg(struct ipu *ipu, u32 value, unsigned long reg)
struct ipu_irq_bank {
unsigned int control;
unsigned int status;
- spinlock_t lock;
struct ipu *ipu;
};
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index c6d98c0..c26699f 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -5,6 +5,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#include <linux/err.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
@@ -617,10 +618,8 @@ static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
else if (maxburst == 32)
chan->dcmd |= DCMD_BURST32;
- if (cfg) {
- chan->dir = cfg->direction;
- chan->drcmr = cfg->slave_id;
- }
+ chan->dir = cfg->direction;
+ chan->drcmr = cfg->slave_id;
chan->dev_addr = addr;
break;
default:
@@ -782,9 +781,9 @@ static int mmp_pdma_probe(struct platform_device *op)
if (!iores)
return -EINVAL;
- pdev->base = devm_request_and_ioremap(pdev->dev, iores);
- if (!pdev->base)
- return -EADDRNOTAVAIL;
+ pdev->base = devm_ioremap_resource(pdev->dev, iores);
+ if (IS_ERR(pdev->base))
+ return PTR_ERR(pdev->base);
of_id = of_match_device(mmp_pdma_dt_ids, pdev->dev);
if (of_id)
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index a9f1cd5..43d5a6c 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -9,6 +9,7 @@
*
*/
+#include <linux/err.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
@@ -547,9 +548,9 @@ static int mmp_tdma_probe(struct platform_device *pdev)
if (!iores)
return -EINVAL;
- tdev->base = devm_request_and_ioremap(&pdev->dev, iores);
- if (!tdev->base)
- return -EADDRNOTAVAIL;
+ tdev->base = devm_ioremap_resource(&pdev->dev, iores);
+ if (IS_ERR(tdev->base))
+ return PTR_ERR(tdev->base);
INIT_LIST_HEAD(&tdev->device.channels);
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index e17fad0..d64ae14 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -210,7 +210,7 @@ static void mv_set_mode(struct mv_xor_chan *chan,
break;
default:
dev_err(mv_chan_to_devp(chan),
- "error: unsupported operation %d.\n",
+ "error: unsupported operation %d\n",
type);
BUG();
return;
@@ -828,28 +828,22 @@ static void mv_dump_xor_regs(struct mv_xor_chan *chan)
u32 val;
val = __raw_readl(XOR_CONFIG(chan));
- dev_err(mv_chan_to_devp(chan),
- "config 0x%08x.\n", val);
+ dev_err(mv_chan_to_devp(chan), "config 0x%08x\n", val);
val = __raw_readl(XOR_ACTIVATION(chan));
- dev_err(mv_chan_to_devp(chan),
- "activation 0x%08x.\n", val);
+ dev_err(mv_chan_to_devp(chan), "activation 0x%08x\n", val);
val = __raw_readl(XOR_INTR_CAUSE(chan));
- dev_err(mv_chan_to_devp(chan),
- "intr cause 0x%08x.\n", val);
+ dev_err(mv_chan_to_devp(chan), "intr cause 0x%08x\n", val);
val = __raw_readl(XOR_INTR_MASK(chan));
- dev_err(mv_chan_to_devp(chan),
- "intr mask 0x%08x.\n", val);
+ dev_err(mv_chan_to_devp(chan), "intr mask 0x%08x\n", val);
val = __raw_readl(XOR_ERROR_CAUSE(chan));
- dev_err(mv_chan_to_devp(chan),
- "error cause 0x%08x.\n", val);
+ dev_err(mv_chan_to_devp(chan), "error cause 0x%08x\n", val);
val = __raw_readl(XOR_ERROR_ADDR(chan));
- dev_err(mv_chan_to_devp(chan),
- "error addr 0x%08x.\n", val);
+ dev_err(mv_chan_to_devp(chan), "error addr 0x%08x\n", val);
}
static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan,
@@ -862,7 +856,7 @@ static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan,
}
dev_err(mv_chan_to_devp(chan),
- "error on chan %d. intr cause 0x%08x.\n",
+ "error on chan %d. intr cause 0x%08x\n",
chan->idx, intr_cause);
mv_dump_xor_regs(chan);
@@ -1052,9 +1046,8 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
u32 *ptr = page_address(dest);
if (ptr[i] != cmp_word) {
dev_err(dma_chan->device->dev,
- "Self-test xor failed compare, disabling."
- " index %d, data %x, expected %x\n", i,
- ptr[i], cmp_word);
+ "Self-test xor failed compare, disabling. index %d, data %x, expected %x\n",
+ i, ptr[i], cmp_word);
err = -ENODEV;
goto free_resources;
}
@@ -1194,12 +1187,11 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
goto err_free_irq;
}
- dev_info(&pdev->dev, "Marvell XOR: "
- "( %s%s%s%s)\n",
- dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
- dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "",
- dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
- dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
+ dev_info(&pdev->dev, "Marvell XOR: ( %s%s%s%s)\n",
+ dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
+ dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "",
+ dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
+ dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
dma_async_device_register(dma_dev);
return mv_chan;
@@ -1253,7 +1245,7 @@ static int mv_xor_probe(struct platform_device *pdev)
struct resource *res;
int i, ret;
- dev_notice(&pdev->dev, "Marvell XOR driver\n");
+ dev_notice(&pdev->dev, "Marvell shared XOR driver\n");
xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL);
if (!xordev)
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index 9f02e79..8f6d30d 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -109,7 +109,7 @@ struct mxs_dma_chan {
struct dma_chan chan;
struct dma_async_tx_descriptor desc;
struct tasklet_struct tasklet;
- int chan_irq;
+ unsigned int chan_irq;
struct mxs_dma_ccw *ccw;
dma_addr_t ccw_phys;
int desc_count;
@@ -441,7 +441,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
struct mxs_dma_ccw *ccw;
struct scatterlist *sg;
- int i, j;
+ u32 i, j;
u32 *pio;
bool append = flags & DMA_PREP_INTERRUPT;
int idx = append ? mxs_chan->desc_count : 0;
@@ -537,8 +537,8 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic(
{
struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
- int num_periods = buf_len / period_len;
- int i = 0, buf = 0;
+ u32 num_periods = buf_len / period_len;
+ u32 i = 0, buf = 0;
if (mxs_chan->status == DMA_IN_PROGRESS)
return NULL;
diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c
new file mode 100644
index 0000000..69d04d2
--- /dev/null
+++ b/drivers/dma/of-dma.c
@@ -0,0 +1,267 @@
+/*
+ * Device tree helpers for DMA request / controller
+ *
+ * Based on of_gpio.c
+ *
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/rculist.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+
+static LIST_HEAD(of_dma_list);
+static DEFINE_SPINLOCK(of_dma_lock);
+
+/**
+ * of_dma_get_controller - Get a DMA controller in DT DMA helpers list
+ * @dma_spec: pointer to DMA specifier as found in the device tree
+ *
+ * Finds a DMA controller with matching device node and number for dma cells
+ * in a list of registered DMA controllers. If a match is found the use_count
+ * variable is increased and a valid pointer to the DMA data stored is retuned.
+ * A NULL pointer is returned if no match is found.
+ */
+static struct of_dma *of_dma_get_controller(struct of_phandle_args *dma_spec)
+{
+ struct of_dma *ofdma;
+
+ spin_lock(&of_dma_lock);
+
+ if (list_empty(&of_dma_list)) {
+ spin_unlock(&of_dma_lock);
+ return NULL;
+ }
+
+ list_for_each_entry(ofdma, &of_dma_list, of_dma_controllers)
+ if ((ofdma->of_node == dma_spec->np) &&
+ (ofdma->of_dma_nbcells == dma_spec->args_count)) {
+ ofdma->use_count++;
+ spin_unlock(&of_dma_lock);
+ return ofdma;
+ }
+
+ spin_unlock(&of_dma_lock);
+
+ pr_debug("%s: can't find DMA controller %s\n", __func__,
+ dma_spec->np->full_name);
+
+ return NULL;
+}
+
+/**
+ * of_dma_put_controller - Decrement use count for a registered DMA controller
+ * @of_dma: pointer to DMA controller data
+ *
+ * Decrements the use_count variable in the DMA data structure. This function
+ * should be called only when a valid pointer is returned from
+ * of_dma_get_controller() and no further accesses to data referenced by that
+ * pointer are needed.
+ */
+static void of_dma_put_controller(struct of_dma *ofdma)
+{
+ spin_lock(&of_dma_lock);
+ ofdma->use_count--;
+ spin_unlock(&of_dma_lock);
+}
+
+/**
+ * of_dma_controller_register - Register a DMA controller to DT DMA helpers
+ * @np: device node of DMA controller
+ * @of_dma_xlate: translation function which converts a phandle
+ * arguments list into a dma_chan structure
+ * @data pointer to controller specific data to be used by
+ * translation function
+ *
+ * Returns 0 on success or appropriate errno value on error.
+ *
+ * Allocated memory should be freed with appropriate of_dma_controller_free()
+ * call.
+ */
+int of_dma_controller_register(struct device_node *np,
+ struct dma_chan *(*of_dma_xlate)
+ (struct of_phandle_args *, struct of_dma *),
+ void *data)
+{
+ struct of_dma *ofdma;
+ int nbcells;
+
+ if (!np || !of_dma_xlate) {
+ pr_err("%s: not enough information provided\n", __func__);
+ return -EINVAL;
+ }
+
+ ofdma = kzalloc(sizeof(*ofdma), GFP_KERNEL);
+ if (!ofdma)
+ return -ENOMEM;
+
+ nbcells = be32_to_cpup(of_get_property(np, "#dma-cells", NULL));
+ if (!nbcells) {
+ pr_err("%s: #dma-cells property is missing or invalid\n",
+ __func__);
+ kfree(ofdma);
+ return -EINVAL;
+ }
+
+ ofdma->of_node = np;
+ ofdma->of_dma_nbcells = nbcells;
+ ofdma->of_dma_xlate = of_dma_xlate;
+ ofdma->of_dma_data = data;
+ ofdma->use_count = 0;
+
+ /* Now queue of_dma controller structure in list */
+ spin_lock(&of_dma_lock);
+ list_add_tail(&ofdma->of_dma_controllers, &of_dma_list);
+ spin_unlock(&of_dma_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_dma_controller_register);
+
+/**
+ * of_dma_controller_free - Remove a DMA controller from DT DMA helpers list
+ * @np: device node of DMA controller
+ *
+ * Memory allocated by of_dma_controller_register() is freed here.
+ */
+int of_dma_controller_free(struct device_node *np)
+{
+ struct of_dma *ofdma;
+
+ spin_lock(&of_dma_lock);
+
+ if (list_empty(&of_dma_list)) {
+ spin_unlock(&of_dma_lock);
+ return -ENODEV;
+ }
+
+ list_for_each_entry(ofdma, &of_dma_list, of_dma_controllers)
+ if (ofdma->of_node == np) {
+ if (ofdma->use_count) {
+ spin_unlock(&of_dma_lock);
+ return -EBUSY;
+ }
+
+ list_del(&ofdma->of_dma_controllers);
+ spin_unlock(&of_dma_lock);
+ kfree(ofdma);
+ return 0;
+ }
+
+ spin_unlock(&of_dma_lock);
+ return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(of_dma_controller_free);
+
+/**
+ * of_dma_match_channel - Check if a DMA specifier matches name
+ * @np: device node to look for DMA channels
+ * @name: channel name to be matched
+ * @index: index of DMA specifier in list of DMA specifiers
+ * @dma_spec: pointer to DMA specifier as found in the device tree
+ *
+ * Check if the DMA specifier pointed to by the index in a list of DMA
+ * specifiers, matches the name provided. Returns 0 if the name matches and
+ * a valid pointer to the DMA specifier is found. Otherwise returns -ENODEV.
+ */
+static int of_dma_match_channel(struct device_node *np, char *name, int index,
+ struct of_phandle_args *dma_spec)
+{
+ const char *s;
+
+ if (of_property_read_string_index(np, "dma-names", index, &s))
+ return -ENODEV;
+
+ if (strcmp(name, s))
+ return -ENODEV;
+
+ if (of_parse_phandle_with_args(np, "dmas", "#dma-cells", index,
+ dma_spec))
+ return -ENODEV;
+
+ return 0;
+}
+
+/**
+ * of_dma_request_slave_channel - Get the DMA slave channel
+ * @np: device node to get DMA request from
+ * @name: name of desired channel
+ *
+ * Returns pointer to appropriate dma channel on success or NULL on error.
+ */
+struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
+ char *name)
+{
+ struct of_phandle_args dma_spec;
+ struct of_dma *ofdma;
+ struct dma_chan *chan;
+ int count, i;
+
+ if (!np || !name) {
+ pr_err("%s: not enough information provided\n", __func__);
+ return NULL;
+ }
+
+ count = of_property_count_strings(np, "dma-names");
+ if (count < 0) {
+ pr_err("%s: dma-names property missing or empty\n", __func__);
+ return NULL;
+ }
+
+ for (i = 0; i < count; i++) {
+ if (of_dma_match_channel(np, name, i, &dma_spec))
+ continue;
+
+ ofdma = of_dma_get_controller(&dma_spec);
+
+ if (!ofdma)
+ continue;
+
+ chan = ofdma->of_dma_xlate(&dma_spec, ofdma);
+
+ of_dma_put_controller(ofdma);
+
+ of_node_put(dma_spec.np);
+
+ if (chan)
+ return chan;
+ }
+
+ return NULL;
+}
+
+/**
+ * of_dma_simple_xlate - Simple DMA engine translation function
+ * @dma_spec: pointer to DMA specifier as found in the device tree
+ * @of_dma: pointer to DMA controller data
+ *
+ * A simple translation function for devices that use a 32-bit value for the
+ * filter_param when calling the DMA engine dma_request_channel() function.
+ * Note that this translation function requires that #dma-cells is equal to 1
+ * and the argument of the dma specifier is the 32-bit filter_param. Returns
+ * pointer to appropriate dma channel on success or NULL on error.
+ */
+struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ int count = dma_spec->args_count;
+ struct of_dma_filter_info *info = ofdma->of_dma_data;
+
+ if (!info || !info->filter_fn)
+ return NULL;
+
+ if (count != 1)
+ return NULL;
+
+ return dma_request_channel(info->dma_cap, info->filter_fn,
+ &dma_spec->args[0]);
+}
+EXPORT_SYMBOL_GPL(of_dma_simple_xlate);
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index 5a31264..c4b4fd2 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -661,32 +661,14 @@ bool omap_dma_filter_fn(struct dma_chan *chan, void *param)
}
EXPORT_SYMBOL_GPL(omap_dma_filter_fn);
-static struct platform_device *pdev;
-
-static const struct platform_device_info omap_dma_dev_info = {
- .name = "omap-dma-engine",
- .id = -1,
- .dma_mask = DMA_BIT_MASK(32),
-};
-
static int omap_dma_init(void)
{
- int rc = platform_driver_register(&omap_dma_driver);
-
- if (rc == 0) {
- pdev = platform_device_register_full(&omap_dma_dev_info);
- if (IS_ERR(pdev)) {
- platform_driver_unregister(&omap_dma_driver);
- rc = PTR_ERR(pdev);
- }
- }
- return rc;
+ return platform_driver_register(&omap_dma_driver);
}
subsys_initcall(omap_dma_init);
static void __exit omap_dma_exit(void)
{
- platform_device_unregister(pdev);
platform_driver_unregister(&omap_dma_driver);
}
module_exit(omap_dma_exit);
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index 3f26172..d01faeb 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -1029,18 +1029,7 @@ static struct pci_driver pch_dma_driver = {
#endif
};
-static int __init pch_dma_init(void)
-{
- return pci_register_driver(&pch_dma_driver);
-}
-
-static void __exit pch_dma_exit(void)
-{
- pci_unregister_driver(&pch_dma_driver);
-}
-
-module_init(pch_dma_init);
-module_exit(pch_dma_exit);
+module_pci_driver(pch_dma_driver);
MODULE_DESCRIPTION("Intel EG20T PCH / LAPIS Semicon ML7213/ML7223/ML7831 IOH "
"DMA controller driver");
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 80680ee..7181531 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -25,6 +25,7 @@
#include <linux/amba/pl330.h>
#include <linux/scatterlist.h>
#include <linux/of.h>
+#include <linux/of_dma.h>
#include "dmaengine.h"
#define PL330_MAX_CHAN 8
@@ -606,6 +607,11 @@ struct dma_pl330_desc {
struct dma_pl330_chan *pchan;
};
+struct dma_pl330_filter_args {
+ struct dma_pl330_dmac *pdmac;
+ unsigned int chan_id;
+};
+
static inline void _callback(struct pl330_req *r, enum pl330_op_err err)
{
if (r && r->xfer_cb)
@@ -2352,6 +2358,16 @@ static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
tasklet_schedule(&pch->task);
}
+static bool pl330_dt_filter(struct dma_chan *chan, void *param)
+{
+ struct dma_pl330_filter_args *fargs = param;
+
+ if (chan->device != &fargs->pdmac->ddma)
+ return false;
+
+ return (chan->chan_id == fargs->chan_id);
+}
+
bool pl330_filter(struct dma_chan *chan, void *param)
{
u8 *peri_id;
@@ -2359,25 +2375,35 @@ bool pl330_filter(struct dma_chan *chan, void *param)
if (chan->device->dev->driver != &pl330_driver.drv)
return false;
-#ifdef CONFIG_OF
- if (chan->device->dev->of_node) {
- const __be32 *prop_value;
- phandle phandle;
- struct device_node *node;
-
- prop_value = ((struct property *)param)->value;
- phandle = be32_to_cpup(prop_value++);
- node = of_find_node_by_phandle(phandle);
- return ((chan->private == node) &&
- (chan->chan_id == be32_to_cpup(prop_value)));
- }
-#endif
-
peri_id = chan->private;
return *peri_id == (unsigned)param;
}
EXPORT_SYMBOL(pl330_filter);
+static struct dma_chan *of_dma_pl330_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ int count = dma_spec->args_count;
+ struct dma_pl330_dmac *pdmac = ofdma->of_dma_data;
+ struct dma_pl330_filter_args fargs;
+ dma_cap_mask_t cap;
+
+ if (!pdmac)
+ return NULL;
+
+ if (count != 1)
+ return NULL;
+
+ fargs.pdmac = pdmac;
+ fargs.chan_id = dma_spec->args[0];
+
+ dma_cap_zero(cap);
+ dma_cap_set(DMA_SLAVE, cap);
+ dma_cap_set(DMA_CYCLIC, cap);
+
+ return dma_request_channel(cap, pl330_dt_filter, &fargs);
+}
+
static int pl330_alloc_chan_resources(struct dma_chan *chan)
{
struct dma_pl330_chan *pch = to_pchan(chan);
@@ -2866,7 +2892,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
pdat = adev->dev.platform_data;
/* Allocate a new DMAC and its Channels */
- pdmac = kzalloc(sizeof(*pdmac), GFP_KERNEL);
+ pdmac = devm_kzalloc(&adev->dev, sizeof(*pdmac), GFP_KERNEL);
if (!pdmac) {
dev_err(&adev->dev, "unable to allocate mem\n");
return -ENOMEM;
@@ -2878,13 +2904,9 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
pi->mcbufsz = pdat ? pdat->mcbuf_sz : 0;
res = &adev->res;
- request_mem_region(res->start, resource_size(res), "dma-pl330");
-
- pi->base = ioremap(res->start, resource_size(res));
- if (!pi->base) {
- ret = -ENXIO;
- goto probe_err1;
- }
+ pi->base = devm_request_and_ioremap(&adev->dev, res);
+ if (!pi->base)
+ return -ENXIO;
amba_set_drvdata(adev, pdmac);
@@ -2892,11 +2914,11 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
ret = request_irq(irq, pl330_irq_handler, 0,
dev_name(&adev->dev), pi);
if (ret)
- goto probe_err2;
+ return ret;
ret = pl330_add(pi);
if (ret)
- goto probe_err3;
+ goto probe_err1;
INIT_LIST_HEAD(&pdmac->desc_pool);
spin_lock_init(&pdmac->pool_lock);
@@ -2918,7 +2940,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
if (!pdmac->peripherals) {
ret = -ENOMEM;
dev_err(&adev->dev, "unable to allocate pdmac->peripherals\n");
- goto probe_err4;
+ goto probe_err2;
}
for (i = 0; i < num_chan; i++) {
@@ -2962,7 +2984,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
ret = dma_async_device_register(pd);
if (ret) {
dev_err(&adev->dev, "unable to register DMAC\n");
- goto probe_err4;
+ goto probe_err2;
}
dev_info(&adev->dev,
@@ -2973,17 +2995,20 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan,
pi->pcfg.num_peri, pi->pcfg.num_events);
+ ret = of_dma_controller_register(adev->dev.of_node,
+ of_dma_pl330_xlate, pdmac);
+ if (ret) {
+ dev_err(&adev->dev,
+ "unable to register DMA to the generic DT DMA helpers\n");
+ goto probe_err2;
+ }
+
return 0;
-probe_err4:
- pl330_del(pi);
-probe_err3:
- free_irq(irq, pi);
probe_err2:
- iounmap(pi->base);
+ pl330_del(pi);
probe_err1:
- release_mem_region(res->start, resource_size(res));
- kfree(pdmac);
+ free_irq(irq, pi);
return ret;
}
@@ -2993,12 +3018,13 @@ static int pl330_remove(struct amba_device *adev)
struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev);
struct dma_pl330_chan *pch, *_p;
struct pl330_info *pi;
- struct resource *res;
int irq;
if (!pdmac)
return 0;
+ of_dma_controller_free(adev->dev.of_node);
+
amba_set_drvdata(adev, NULL);
/* Idle the DMAC */
@@ -3020,13 +3046,6 @@ static int pl330_remove(struct amba_device *adev)
irq = adev->irq[0];
free_irq(irq, pi);
- iounmap(pi->base);
-
- res = &adev->res;
- release_mem_region(res->start, resource_size(res));
-
- kfree(pdmac);
-
return 0;
}
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index f4cd946..4acb85a 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -638,9 +638,6 @@ static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
unsigned long flags;
int ret;
- if (!chan)
- return -EINVAL;
-
switch (cmd) {
case DMA_TERMINATE_ALL:
spin_lock_irqsave(&schan->chan_lock, flags);
diff --git a/drivers/dma/sh/shdma.c b/drivers/dma/sh/shdma.c
index 3315e4b..b70709b 100644
--- a/drivers/dma/sh/shdma.c
+++ b/drivers/dma/sh/shdma.c
@@ -326,7 +326,7 @@ static int sh_dmae_set_slave(struct shdma_chan *schan,
shdma_chan);
const struct sh_dmae_slave_config *cfg = dmae_find_slave(sh_chan, slave_id);
if (!cfg)
- return -ENODEV;
+ return -ENXIO;
if (!try)
sh_chan->config = cfg;
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
index 94674a9..1d627e2 100644
--- a/drivers/dma/sirf-dma.c
+++ b/drivers/dma/sirf-dma.c
@@ -32,7 +32,9 @@
#define SIRFSOC_DMA_CH_VALID 0x140
#define SIRFSOC_DMA_CH_INT 0x144
#define SIRFSOC_DMA_INT_EN 0x148
+#define SIRFSOC_DMA_INT_EN_CLR 0x14C
#define SIRFSOC_DMA_CH_LOOP_CTRL 0x150
+#define SIRFSOC_DMA_CH_LOOP_CTRL_CLR 0x15C
#define SIRFSOC_DMA_MODE_CTRL_BIT 4
#define SIRFSOC_DMA_DIR_CTRL_BIT 5
@@ -76,6 +78,7 @@ struct sirfsoc_dma {
struct sirfsoc_dma_chan channels[SIRFSOC_DMA_CHANNELS];
void __iomem *base;
int irq;
+ bool is_marco;
};
#define DRV_NAME "sirfsoc_dma"
@@ -288,17 +291,67 @@ static int sirfsoc_dma_terminate_all(struct sirfsoc_dma_chan *schan)
int cid = schan->chan.chan_id;
unsigned long flags;
- writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) &
- ~(1 << cid), sdma->base + SIRFSOC_DMA_INT_EN);
- writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID);
+ spin_lock_irqsave(&schan->lock, flags);
- writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL)
- & ~((1 << cid) | 1 << (cid + 16)),
+ if (!sdma->is_marco) {
+ writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) &
+ ~(1 << cid), sdma->base + SIRFSOC_DMA_INT_EN);
+ writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL)
+ & ~((1 << cid) | 1 << (cid + 16)),
sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
+ } else {
+ writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_INT_EN_CLR);
+ writel_relaxed((1 << cid) | 1 << (cid + 16),
+ sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL_CLR);
+ }
+
+ writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID);
- spin_lock_irqsave(&schan->lock, flags);
list_splice_tail_init(&schan->active, &schan->free);
list_splice_tail_init(&schan->queued, &schan->free);
+
+ spin_unlock_irqrestore(&schan->lock, flags);
+
+ return 0;
+}
+
+static int sirfsoc_dma_pause_chan(struct sirfsoc_dma_chan *schan)
+{
+ struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
+ int cid = schan->chan.chan_id;
+ unsigned long flags;
+
+ spin_lock_irqsave(&schan->lock, flags);
+
+ if (!sdma->is_marco)
+ writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL)
+ & ~((1 << cid) | 1 << (cid + 16)),
+ sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
+ else
+ writel_relaxed((1 << cid) | 1 << (cid + 16),
+ sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL_CLR);
+
+ spin_unlock_irqrestore(&schan->lock, flags);
+
+ return 0;
+}
+
+static int sirfsoc_dma_resume_chan(struct sirfsoc_dma_chan *schan)
+{
+ struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
+ int cid = schan->chan.chan_id;
+ unsigned long flags;
+
+ spin_lock_irqsave(&schan->lock, flags);
+
+ if (!sdma->is_marco)
+ writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL)
+ | ((1 << cid) | 1 << (cid + 16)),
+ sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
+ else
+ writel_relaxed((1 << cid) | 1 << (cid + 16),
+ sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
+
spin_unlock_irqrestore(&schan->lock, flags);
return 0;
@@ -311,6 +364,10 @@ static int sirfsoc_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
switch (cmd) {
+ case DMA_PAUSE:
+ return sirfsoc_dma_pause_chan(schan);
+ case DMA_RESUME:
+ return sirfsoc_dma_resume_chan(schan);
case DMA_TERMINATE_ALL:
return sirfsoc_dma_terminate_all(schan);
case DMA_SLAVE_CONFIG:
@@ -568,6 +625,9 @@ static int sirfsoc_dma_probe(struct platform_device *op)
return -ENOMEM;
}
+ if (of_device_is_compatible(dn, "sirf,marco-dmac"))
+ sdma->is_marco = true;
+
if (of_property_read_u32(dn, "cell-index", &id)) {
dev_err(dev, "Fail to get DMAC index\n");
return -ENODEV;
@@ -668,6 +728,7 @@ static int sirfsoc_dma_remove(struct platform_device *op)
static struct of_device_id sirfsoc_dma_match[] = {
{ .compatible = "sirf,prima2-dmac", },
+ { .compatible = "sirf,marco-dmac", },
{},
};
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 23c5573..1734fee 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -53,6 +53,8 @@
#define D40_ALLOC_PHY (1 << 30)
#define D40_ALLOC_LOG_FREE 0
+#define MAX(a, b) (((a) < (b)) ? (b) : (a))
+
/**
* enum 40_command - The different commands and/or statuses.
*
@@ -100,8 +102,19 @@ static u32 d40_backup_regs[] = {
#define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs)
-/* TODO: Check if all these registers have to be saved/restored on dma40 v3 */
-static u32 d40_backup_regs_v3[] = {
+/*
+ * since 9540 and 8540 has the same HW revision
+ * use v4a for 9540 or ealier
+ * use v4b for 8540 or later
+ * HW revision:
+ * DB8500ed has revision 0
+ * DB8500v1 has revision 2
+ * DB8500v2 has revision 3
+ * AP9540v1 has revision 4
+ * DB8540v1 has revision 4
+ * TODO: Check if all these registers have to be saved/restored on dma40 v4a
+ */
+static u32 d40_backup_regs_v4a[] = {
D40_DREG_PSEG1,
D40_DREG_PSEG2,
D40_DREG_PSEG3,
@@ -120,7 +133,32 @@ static u32 d40_backup_regs_v3[] = {
D40_DREG_RCEG4,
};
-#define BACKUP_REGS_SZ_V3 ARRAY_SIZE(d40_backup_regs_v3)
+#define BACKUP_REGS_SZ_V4A ARRAY_SIZE(d40_backup_regs_v4a)
+
+static u32 d40_backup_regs_v4b[] = {
+ D40_DREG_CPSEG1,
+ D40_DREG_CPSEG2,
+ D40_DREG_CPSEG3,
+ D40_DREG_CPSEG4,
+ D40_DREG_CPSEG5,
+ D40_DREG_CPCEG1,
+ D40_DREG_CPCEG2,
+ D40_DREG_CPCEG3,
+ D40_DREG_CPCEG4,
+ D40_DREG_CPCEG5,
+ D40_DREG_CRSEG1,
+ D40_DREG_CRSEG2,
+ D40_DREG_CRSEG3,
+ D40_DREG_CRSEG4,
+ D40_DREG_CRSEG5,
+ D40_DREG_CRCEG1,
+ D40_DREG_CRCEG2,
+ D40_DREG_CRCEG3,
+ D40_DREG_CRCEG4,
+ D40_DREG_CRCEG5,
+};
+
+#define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b)
static u32 d40_backup_regs_chan[] = {
D40_CHAN_REG_SSCFG,
@@ -134,6 +172,102 @@ static u32 d40_backup_regs_chan[] = {
};
/**
+ * struct d40_interrupt_lookup - lookup table for interrupt handler
+ *
+ * @src: Interrupt mask register.
+ * @clr: Interrupt clear register.
+ * @is_error: true if this is an error interrupt.
+ * @offset: start delta in the lookup_log_chans in d40_base. If equals to
+ * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
+ */
+struct d40_interrupt_lookup {
+ u32 src;
+ u32 clr;
+ bool is_error;
+ int offset;
+};
+
+
+static struct d40_interrupt_lookup il_v4a[] = {
+ {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0},
+ {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
+ {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
+ {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
+ {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0},
+ {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32},
+ {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64},
+ {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96},
+ {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN},
+ {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN},
+};
+
+static struct d40_interrupt_lookup il_v4b[] = {
+ {D40_DREG_CLCTIS1, D40_DREG_CLCICR1, false, 0},
+ {D40_DREG_CLCTIS2, D40_DREG_CLCICR2, false, 32},
+ {D40_DREG_CLCTIS3, D40_DREG_CLCICR3, false, 64},
+ {D40_DREG_CLCTIS4, D40_DREG_CLCICR4, false, 96},
+ {D40_DREG_CLCTIS5, D40_DREG_CLCICR5, false, 128},
+ {D40_DREG_CLCEIS1, D40_DREG_CLCICR1, true, 0},
+ {D40_DREG_CLCEIS2, D40_DREG_CLCICR2, true, 32},
+ {D40_DREG_CLCEIS3, D40_DREG_CLCICR3, true, 64},
+ {D40_DREG_CLCEIS4, D40_DREG_CLCICR4, true, 96},
+ {D40_DREG_CLCEIS5, D40_DREG_CLCICR5, true, 128},
+ {D40_DREG_CPCTIS, D40_DREG_CPCICR, false, D40_PHY_CHAN},
+ {D40_DREG_CPCEIS, D40_DREG_CPCICR, true, D40_PHY_CHAN},
+};
+
+/**
+ * struct d40_reg_val - simple lookup struct
+ *
+ * @reg: The register.
+ * @val: The value that belongs to the register in reg.
+ */
+struct d40_reg_val {
+ unsigned int reg;
+ unsigned int val;
+};
+
+static __initdata struct d40_reg_val dma_init_reg_v4a[] = {
+ /* Clock every part of the DMA block from start */
+ { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL},
+
+ /* Interrupts on all logical channels */
+ { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
+};
+static __initdata struct d40_reg_val dma_init_reg_v4b[] = {
+ /* Clock every part of the DMA block from start */
+ { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL},
+
+ /* Interrupts on all logical channels */
+ { .reg = D40_DREG_CLCMIS1, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_CLCMIS2, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_CLCMIS3, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_CLCMIS4, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_CLCMIS5, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_CLCICR1, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_CLCICR2, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_CLCICR3, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_CLCICR4, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_CLCICR5, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_CLCTIS1, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_CLCTIS2, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_CLCTIS3, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_CLCTIS4, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_CLCTIS5, .val = 0xFFFFFFFF}
+};
+
+/**
* struct d40_lli_pool - Structure for keeping LLIs in memory
*
* @base: Pointer to memory area when the pre_alloc_lli's are not large
@@ -221,6 +355,7 @@ struct d40_lcla_pool {
* @allocated_dst: Same as for src but is dst.
* allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
* event line number.
+ * @use_soft_lli: To mark if the linked lists of channel are managed by SW.
*/
struct d40_phy_res {
spinlock_t lock;
@@ -228,6 +363,7 @@ struct d40_phy_res {
int num;
u32 allocated_src;
u32 allocated_dst;
+ bool use_soft_lli;
};
struct d40_base;
@@ -248,6 +384,7 @@ struct d40_base;
* @client: Cliented owned descriptor list.
* @pending_queue: Submitted jobs, to be issued by issue_pending()
* @active: Active descriptor.
+ * @done: Completed jobs
* @queue: Queued jobs.
* @prepare_queue: Prepared jobs.
* @dma_cfg: The client configuration of this dma channel.
@@ -273,6 +410,7 @@ struct d40_chan {
struct list_head client;
struct list_head pending_queue;
struct list_head active;
+ struct list_head done;
struct list_head queue;
struct list_head prepare_queue;
struct stedma40_chan_cfg dma_cfg;
@@ -289,6 +427,38 @@ struct d40_chan {
};
/**
+ * struct d40_gen_dmac - generic values to represent u8500/u8540 DMA
+ * controller
+ *
+ * @backup: the pointer to the registers address array for backup
+ * @backup_size: the size of the registers address array for backup
+ * @realtime_en: the realtime enable register
+ * @realtime_clear: the realtime clear register
+ * @high_prio_en: the high priority enable register
+ * @high_prio_clear: the high priority clear register
+ * @interrupt_en: the interrupt enable register
+ * @interrupt_clear: the interrupt clear register
+ * @il: the pointer to struct d40_interrupt_lookup
+ * @il_size: the size of d40_interrupt_lookup array
+ * @init_reg: the pointer to the struct d40_reg_val
+ * @init_reg_size: the size of d40_reg_val array
+ */
+struct d40_gen_dmac {
+ u32 *backup;
+ u32 backup_size;
+ u32 realtime_en;
+ u32 realtime_clear;
+ u32 high_prio_en;
+ u32 high_prio_clear;
+ u32 interrupt_en;
+ u32 interrupt_clear;
+ struct d40_interrupt_lookup *il;
+ u32 il_size;
+ struct d40_reg_val *init_reg;
+ u32 init_reg_size;
+};
+
+/**
* struct d40_base - The big global struct, one for each probe'd instance.
*
* @interrupt_lock: Lock used to make sure one interrupt is handle a time.
@@ -326,11 +496,13 @@ struct d40_chan {
* @desc_slab: cache for descriptors.
* @reg_val_backup: Here the values of some hardware registers are stored
* before the DMA is powered off. They are restored when the power is back on.
- * @reg_val_backup_v3: Backup of registers that only exits on dma40 v3 and
- * later.
+ * @reg_val_backup_v4: Backup of registers that only exits on dma40 v3 and
+ * later
* @reg_val_backup_chan: Backup data for standard channel parameter registers.
* @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off.
* @initialized: true if the dma has been initialized
+ * @gen_dmac: the struct for generic registers values to represent u8500/8540
+ * DMA controller
*/
struct d40_base {
spinlock_t interrupt_lock;
@@ -344,6 +516,7 @@ struct d40_base {
int irq;
int num_phy_chans;
int num_log_chans;
+ struct device_dma_parameters dma_parms;
struct dma_device dma_both;
struct dma_device dma_slave;
struct dma_device dma_memcpy;
@@ -361,37 +534,11 @@ struct d40_base {
resource_size_t lcpa_size;
struct kmem_cache *desc_slab;
u32 reg_val_backup[BACKUP_REGS_SZ];
- u32 reg_val_backup_v3[BACKUP_REGS_SZ_V3];
+ u32 reg_val_backup_v4[MAX(BACKUP_REGS_SZ_V4A, BACKUP_REGS_SZ_V4B)];
u32 *reg_val_backup_chan;
u16 gcc_pwr_off_mask;
bool initialized;
-};
-
-/**
- * struct d40_interrupt_lookup - lookup table for interrupt handler
- *
- * @src: Interrupt mask register.
- * @clr: Interrupt clear register.
- * @is_error: true if this is an error interrupt.
- * @offset: start delta in the lookup_log_chans in d40_base. If equals to
- * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
- */
-struct d40_interrupt_lookup {
- u32 src;
- u32 clr;
- bool is_error;
- int offset;
-};
-
-/**
- * struct d40_reg_val - simple lookup struct
- *
- * @reg: The register.
- * @val: The value that belongs to the register in reg.
- */
-struct d40_reg_val {
- unsigned int reg;
- unsigned int val;
+ struct d40_gen_dmac gen_dmac;
};
static struct device *chan2dev(struct d40_chan *d40c)
@@ -494,19 +641,18 @@ static int d40_lcla_alloc_one(struct d40_chan *d40c,
unsigned long flags;
int i;
int ret = -EINVAL;
- int p;
spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
- p = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP;
-
/*
* Allocate both src and dst at the same time, therefore the half
* start on 1 since 0 can't be used since zero is used as end marker.
*/
for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
- if (!d40c->base->lcla_pool.alloc_map[p + i]) {
- d40c->base->lcla_pool.alloc_map[p + i] = d40d;
+ int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i;
+
+ if (!d40c->base->lcla_pool.alloc_map[idx]) {
+ d40c->base->lcla_pool.alloc_map[idx] = d40d;
d40d->lcla_alloc++;
ret = i;
break;
@@ -531,10 +677,10 @@ static int d40_lcla_free_all(struct d40_chan *d40c,
spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
- if (d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num *
- D40_LCLA_LINK_PER_EVENT_GRP + i] == d40d) {
- d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num *
- D40_LCLA_LINK_PER_EVENT_GRP + i] = NULL;
+ int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i;
+
+ if (d40c->base->lcla_pool.alloc_map[idx] == d40d) {
+ d40c->base->lcla_pool.alloc_map[idx] = NULL;
d40d->lcla_alloc--;
if (d40d->lcla_alloc == 0) {
ret = 0;
@@ -611,6 +757,11 @@ static void d40_phy_lli_load(struct d40_chan *chan, struct d40_desc *desc)
writel(lli_dst->reg_lnk, base + D40_CHAN_REG_SDLNK);
}
+static void d40_desc_done(struct d40_chan *d40c, struct d40_desc *desc)
+{
+ list_add_tail(&desc->node, &d40c->done);
+}
+
static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
{
struct d40_lcla_pool *pool = &chan->base->lcla_pool;
@@ -634,7 +785,16 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
* can't link back to the one in LCPA space
*/
if (linkback || (lli_len - lli_current > 1)) {
- curr_lcla = d40_lcla_alloc_one(chan, desc);
+ /*
+ * If the channel is expected to use only soft_lli don't
+ * allocate a lcla. This is to avoid a HW issue that exists
+ * in some controller during a peripheral to memory transfer
+ * that uses linked lists.
+ */
+ if (!(chan->phy_chan->use_soft_lli &&
+ chan->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM))
+ curr_lcla = d40_lcla_alloc_one(chan, desc);
+
first_lcla = curr_lcla;
}
@@ -771,6 +931,14 @@ static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
return d;
}
+static struct d40_desc *d40_first_done(struct d40_chan *d40c)
+{
+ if (list_empty(&d40c->done))
+ return NULL;
+
+ return list_first_entry(&d40c->done, struct d40_desc, node);
+}
+
static int d40_psize_2_burst_size(bool is_log, int psize)
{
if (is_log) {
@@ -874,11 +1042,11 @@ static void d40_save_restore_registers(struct d40_base *base, bool save)
save);
/* Save/Restore registers only existing on dma40 v3 and later */
- if (base->rev >= 3)
- dma40_backup(base->virtbase, base->reg_val_backup_v3,
- d40_backup_regs_v3,
- ARRAY_SIZE(d40_backup_regs_v3),
- save);
+ if (base->gen_dmac.backup)
+ dma40_backup(base->virtbase, base->reg_val_backup_v4,
+ base->gen_dmac.backup,
+ base->gen_dmac.backup_size,
+ save);
}
#else
static void d40_save_restore_registers(struct d40_base *base, bool save)
@@ -961,6 +1129,12 @@ static void d40_term_all(struct d40_chan *d40c)
struct d40_desc *d40d;
struct d40_desc *_d;
+ /* Release completed descriptors */
+ while ((d40d = d40_first_done(d40c))) {
+ d40_desc_remove(d40d);
+ d40_desc_free(d40c, d40d);
+ }
+
/* Release active descriptors */
while ((d40d = d40_first_active_get(d40c))) {
d40_desc_remove(d40d);
@@ -1396,6 +1570,9 @@ static void dma_tc_handle(struct d40_chan *d40c)
d40c->busy = false;
pm_runtime_mark_last_busy(d40c->base->dev);
pm_runtime_put_autosuspend(d40c->base->dev);
+
+ d40_desc_remove(d40d);
+ d40_desc_done(d40c, d40d);
}
d40c->pending_tx++;
@@ -1413,10 +1590,14 @@ static void dma_tasklet(unsigned long data)
spin_lock_irqsave(&d40c->lock, flags);
- /* Get first active entry from list */
- d40d = d40_first_active_get(d40c);
- if (d40d == NULL)
- goto err;
+ /* Get first entry from the done list */
+ d40d = d40_first_done(d40c);
+ if (d40d == NULL) {
+ /* Check if we have reached here for cyclic job */
+ d40d = d40_first_active_get(d40c);
+ if (d40d == NULL || !d40d->cyclic)
+ goto err;
+ }
if (!d40d->cyclic)
dma_cookie_complete(&d40d->txd);
@@ -1438,13 +1619,11 @@ static void dma_tasklet(unsigned long data)
if (async_tx_test_ack(&d40d->txd)) {
d40_desc_remove(d40d);
d40_desc_free(d40c, d40d);
- } else {
- if (!d40d->is_in_client_list) {
- d40_desc_remove(d40d);
- d40_lcla_free_all(d40c, d40d);
- list_add_tail(&d40d->node, &d40c->client);
- d40d->is_in_client_list = true;
- }
+ } else if (!d40d->is_in_client_list) {
+ d40_desc_remove(d40d);
+ d40_lcla_free_all(d40c, d40d);
+ list_add_tail(&d40d->node, &d40c->client);
+ d40d->is_in_client_list = true;
}
}
@@ -1469,53 +1648,51 @@ err:
static irqreturn_t d40_handle_interrupt(int irq, void *data)
{
- static const struct d40_interrupt_lookup il[] = {
- {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0},
- {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
- {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
- {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
- {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0},
- {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32},
- {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64},
- {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96},
- {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN},
- {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN},
- };
-
int i;
- u32 regs[ARRAY_SIZE(il)];
u32 idx;
u32 row;
long chan = -1;
struct d40_chan *d40c;
unsigned long flags;
struct d40_base *base = data;
+ u32 regs[base->gen_dmac.il_size];
+ struct d40_interrupt_lookup *il = base->gen_dmac.il;
+ u32 il_size = base->gen_dmac.il_size;
spin_lock_irqsave(&base->interrupt_lock, flags);
/* Read interrupt status of both logical and physical channels */
- for (i = 0; i < ARRAY_SIZE(il); i++)
+ for (i = 0; i < il_size; i++)
regs[i] = readl(base->virtbase + il[i].src);
for (;;) {
chan = find_next_bit((unsigned long *)regs,
- BITS_PER_LONG * ARRAY_SIZE(il), chan + 1);
+ BITS_PER_LONG * il_size, chan + 1);
/* No more set bits found? */
- if (chan == BITS_PER_LONG * ARRAY_SIZE(il))
+ if (chan == BITS_PER_LONG * il_size)
break;
row = chan / BITS_PER_LONG;
idx = chan & (BITS_PER_LONG - 1);
- /* ACK interrupt */
- writel(1 << idx, base->virtbase + il[row].clr);
-
if (il[row].offset == D40_PHY_CHAN)
d40c = base->lookup_phy_chans[idx];
else
d40c = base->lookup_log_chans[il[row].offset + idx];
+
+ if (!d40c) {
+ /*
+ * No error because this can happen if something else
+ * in the system is using the channel.
+ */
+ continue;
+ }
+
+ /* ACK interrupt */
+ writel(1 << idx, base->virtbase + il[row].clr);
+
spin_lock(&d40c->lock);
if (!il[row].is_error)
@@ -1710,10 +1887,12 @@ static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user)
int i;
int j;
int log_num;
+ int num_phy_chans;
bool is_src;
bool is_log = d40c->dma_cfg.mode == STEDMA40_MODE_LOGICAL;
phys = d40c->base->phy_res;
+ num_phy_chans = d40c->base->num_phy_chans;
if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
dev_type = d40c->dma_cfg.src_dev_type;
@@ -1734,12 +1913,19 @@ static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user)
if (!is_log) {
if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
/* Find physical half channel */
- for (i = 0; i < d40c->base->num_phy_chans; i++) {
-
+ if (d40c->dma_cfg.use_fixed_channel) {
+ i = d40c->dma_cfg.phy_channel;
if (d40_alloc_mask_set(&phys[i], is_src,
0, is_log,
first_phy_user))
goto found_phy;
+ } else {
+ for (i = 0; i < num_phy_chans; i++) {
+ if (d40_alloc_mask_set(&phys[i], is_src,
+ 0, is_log,
+ first_phy_user))
+ goto found_phy;
+ }
}
} else
for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
@@ -1954,7 +2140,6 @@ _exit:
}
-
static u32 stedma40_residue(struct dma_chan *chan)
{
struct d40_chan *d40c =
@@ -2030,7 +2215,6 @@ d40_prep_sg_phy(struct d40_chan *chan, struct d40_desc *desc,
return ret < 0 ? ret : 0;
}
-
static struct d40_desc *
d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg,
unsigned int sg_len, unsigned long dma_flags)
@@ -2056,7 +2240,6 @@ d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg,
goto err;
}
-
desc->lli_current = 0;
desc->txd.flags = dma_flags;
desc->txd.tx_submit = d40_tx_submit;
@@ -2105,7 +2288,6 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
return NULL;
}
-
spin_lock_irqsave(&chan->lock, flags);
desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags);
@@ -2179,11 +2361,26 @@ static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src)
{
bool realtime = d40c->dma_cfg.realtime;
bool highprio = d40c->dma_cfg.high_priority;
- u32 prioreg = highprio ? D40_DREG_PSEG1 : D40_DREG_PCEG1;
- u32 rtreg = realtime ? D40_DREG_RSEG1 : D40_DREG_RCEG1;
+ u32 rtreg;
u32 event = D40_TYPE_TO_EVENT(dev_type);
u32 group = D40_TYPE_TO_GROUP(dev_type);
u32 bit = 1 << event;
+ u32 prioreg;
+ struct d40_gen_dmac *dmac = &d40c->base->gen_dmac;
+
+ rtreg = realtime ? dmac->realtime_en : dmac->realtime_clear;
+ /*
+ * Due to a hardware bug, in some cases a logical channel triggered by
+ * a high priority destination event line can generate extra packet
+ * transactions.
+ *
+ * The workaround is to not set the high priority level for the
+ * destination event lines that trigger logical channels.
+ */
+ if (!src && chan_is_logical(d40c))
+ highprio = false;
+
+ prioreg = highprio ? dmac->high_prio_en : dmac->high_prio_clear;
/* Destination event lines are stored in the upper halfword */
if (!src)
@@ -2248,11 +2445,11 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
d40c->lcpa = d40c->base->lcpa_base +
- d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE;
+ d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE;
else
d40c->lcpa = d40c->base->lcpa_base +
- d40c->dma_cfg.dst_dev_type *
- D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
+ d40c->dma_cfg.dst_dev_type *
+ D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
}
dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n",
@@ -2287,7 +2484,6 @@ static void d40_free_chan_resources(struct dma_chan *chan)
return;
}
-
spin_lock_irqsave(&d40c->lock, flags);
err = d40_free_dma(d40c);
@@ -2330,14 +2526,12 @@ d40_prep_memcpy_sg(struct dma_chan *chan,
return d40_prep_sg(chan, src_sg, dst_sg, src_nents, DMA_NONE, dma_flags);
}
-static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
- struct scatterlist *sgl,
- unsigned int sg_len,
- enum dma_transfer_direction direction,
- unsigned long dma_flags,
- void *context)
+static struct dma_async_tx_descriptor *
+d40_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned long dma_flags, void *context)
{
- if (direction != DMA_DEV_TO_MEM && direction != DMA_MEM_TO_DEV)
+ if (!is_slave_direction(direction))
return NULL;
return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags);
@@ -2577,6 +2771,14 @@ static int d40_set_runtime_config(struct dma_chan *chan,
return -EINVAL;
}
+ if (src_maxburst > 16) {
+ src_maxburst = 16;
+ dst_maxburst = src_maxburst * src_addr_width / dst_addr_width;
+ } else if (dst_maxburst > 16) {
+ dst_maxburst = 16;
+ src_maxburst = dst_maxburst * dst_addr_width / src_addr_width;
+ }
+
ret = dma40_config_to_halfchannel(d40c, &cfg->src_info,
src_addr_width,
src_maxburst);
@@ -2659,6 +2861,7 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
d40c->log_num = D40_PHY_CHAN;
+ INIT_LIST_HEAD(&d40c->done);
INIT_LIST_HEAD(&d40c->active);
INIT_LIST_HEAD(&d40c->queue);
INIT_LIST_HEAD(&d40c->pending_queue);
@@ -2773,8 +2976,6 @@ static int dma40_pm_suspend(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct d40_base *base = platform_get_drvdata(pdev);
int ret = 0;
- if (!pm_runtime_suspended(dev))
- return -EBUSY;
if (base->lcpa_regulator)
ret = regulator_disable(base->lcpa_regulator);
@@ -2882,6 +3083,13 @@ static int __init d40_phy_res_init(struct d40_base *base)
num_phy_chans_avail--;
}
+ /* Mark soft_lli channels */
+ for (i = 0; i < base->plat_data->num_of_soft_lli_chans; i++) {
+ int chan = base->plat_data->soft_lli_chans[i];
+
+ base->phy_res[chan].use_soft_lli = true;
+ }
+
dev_info(base->dev, "%d of %d physical DMA channels available\n",
num_phy_chans_avail, base->num_phy_chans);
@@ -2975,14 +3183,21 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
* ? has revision 1
* DB8500v1 has revision 2
* DB8500v2 has revision 3
+ * AP9540v1 has revision 4
+ * DB8540v1 has revision 4
*/
rev = AMBA_REV_BITS(pid);
+ plat_data = pdev->dev.platform_data;
+
/* The number of physical channels on this HW */
- num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
+ if (plat_data->num_of_phy_chans)
+ num_phy_chans = plat_data->num_of_phy_chans;
+ else
+ num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
- dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n",
- rev, res->start);
+ dev_info(&pdev->dev, "hardware revision: %d @ 0x%x with %d physical channels\n",
+ rev, res->start, num_phy_chans);
if (rev < 2) {
d40_err(&pdev->dev, "hardware revision: %d is not supported",
@@ -2990,8 +3205,6 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
goto failure;
}
- plat_data = pdev->dev.platform_data;
-
/* Count the number of logical channels in use */
for (i = 0; i < plat_data->dev_len; i++)
if (plat_data->dev_rx[i] != 0)
@@ -3022,6 +3235,36 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
base->log_chans = &base->phy_chans[num_phy_chans];
+ if (base->plat_data->num_of_phy_chans == 14) {
+ base->gen_dmac.backup = d40_backup_regs_v4b;
+ base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4B;
+ base->gen_dmac.interrupt_en = D40_DREG_CPCMIS;
+ base->gen_dmac.interrupt_clear = D40_DREG_CPCICR;
+ base->gen_dmac.realtime_en = D40_DREG_CRSEG1;
+ base->gen_dmac.realtime_clear = D40_DREG_CRCEG1;
+ base->gen_dmac.high_prio_en = D40_DREG_CPSEG1;
+ base->gen_dmac.high_prio_clear = D40_DREG_CPCEG1;
+ base->gen_dmac.il = il_v4b;
+ base->gen_dmac.il_size = ARRAY_SIZE(il_v4b);
+ base->gen_dmac.init_reg = dma_init_reg_v4b;
+ base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4b);
+ } else {
+ if (base->rev >= 3) {
+ base->gen_dmac.backup = d40_backup_regs_v4a;
+ base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4A;
+ }
+ base->gen_dmac.interrupt_en = D40_DREG_PCMIS;
+ base->gen_dmac.interrupt_clear = D40_DREG_PCICR;
+ base->gen_dmac.realtime_en = D40_DREG_RSEG1;
+ base->gen_dmac.realtime_clear = D40_DREG_RCEG1;
+ base->gen_dmac.high_prio_en = D40_DREG_PSEG1;
+ base->gen_dmac.high_prio_clear = D40_DREG_PCEG1;
+ base->gen_dmac.il = il_v4a;
+ base->gen_dmac.il_size = ARRAY_SIZE(il_v4a);
+ base->gen_dmac.init_reg = dma_init_reg_v4a;
+ base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4a);
+ }
+
base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res),
GFP_KERNEL);
if (!base->phy_res)
@@ -3093,31 +3336,15 @@ failure:
static void __init d40_hw_init(struct d40_base *base)
{
- static struct d40_reg_val dma_init_reg[] = {
- /* Clock every part of the DMA block from start */
- { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL},
-
- /* Interrupts on all logical channels */
- { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
- { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
- { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
- { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
- { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
- { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
- { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
- { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
- { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
- { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
- { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
- { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
- };
int i;
u32 prmseo[2] = {0, 0};
u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
u32 pcmis = 0;
u32 pcicr = 0;
+ struct d40_reg_val *dma_init_reg = base->gen_dmac.init_reg;
+ u32 reg_size = base->gen_dmac.init_reg_size;
- for (i = 0; i < ARRAY_SIZE(dma_init_reg); i++)
+ for (i = 0; i < reg_size; i++)
writel(dma_init_reg[i].val,
base->virtbase + dma_init_reg[i].reg);
@@ -3150,11 +3377,14 @@ static void __init d40_hw_init(struct d40_base *base)
writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
/* Write which interrupt to enable */
- writel(pcmis, base->virtbase + D40_DREG_PCMIS);
+ writel(pcmis, base->virtbase + base->gen_dmac.interrupt_en);
/* Write which interrupt to clear */
- writel(pcicr, base->virtbase + D40_DREG_PCICR);
+ writel(pcicr, base->virtbase + base->gen_dmac.interrupt_clear);
+ /* These are __initdata and cannot be accessed after init */
+ base->gen_dmac.init_reg = NULL;
+ base->gen_dmac.init_reg_size = 0;
}
static int __init d40_lcla_allocate(struct d40_base *base)
@@ -3362,6 +3592,13 @@ static int __init d40_probe(struct platform_device *pdev)
if (err)
goto failure;
+ base->dev->dma_parms = &base->dma_parms;
+ err = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE);
+ if (err) {
+ d40_err(&pdev->dev, "Failed to set dma max seg size\n");
+ goto failure;
+ }
+
d40_hw_init(base);
dev_info(base->dev, "initialized\n");
@@ -3397,7 +3634,7 @@ failure:
release_mem_region(base->phy_start,
base->phy_size);
if (base->clk) {
- clk_disable(base->clk);
+ clk_disable_unprepare(base->clk);
clk_put(base->clk);
}
diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c
index 851ad56..7180e0d 100644
--- a/drivers/dma/ste_dma40_ll.c
+++ b/drivers/dma/ste_dma40_ll.c
@@ -102,17 +102,18 @@ void d40_phy_cfg(struct stedma40_chan_cfg *cfg,
src |= cfg->src_info.data_width << D40_SREG_CFG_ESIZE_POS;
dst |= cfg->dst_info.data_width << D40_SREG_CFG_ESIZE_POS;
+ /* Set the priority bit to high for the physical channel */
+ if (cfg->high_priority) {
+ src |= 1 << D40_SREG_CFG_PRI_POS;
+ dst |= 1 << D40_SREG_CFG_PRI_POS;
+ }
+
} else {
/* Logical channel */
dst |= 1 << D40_SREG_CFG_LOG_GIM_POS;
src |= 1 << D40_SREG_CFG_LOG_GIM_POS;
}
- if (cfg->high_priority) {
- src |= 1 << D40_SREG_CFG_PRI_POS;
- dst |= 1 << D40_SREG_CFG_PRI_POS;
- }
-
if (cfg->src_info.big_endian)
src |= 1 << D40_SREG_CFG_LBE_POS;
if (cfg->dst_info.big_endian)
@@ -250,7 +251,7 @@ d40_phy_buf_to_lli(struct d40_phy_lli *lli, dma_addr_t addr, u32 size,
return lli;
- err:
+err:
return NULL;
}
@@ -331,10 +332,10 @@ void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa,
{
d40_log_lli_link(lli_dst, lli_src, next, flags);
- writel(lli_src->lcsp02, &lcpa[0].lcsp0);
- writel(lli_src->lcsp13, &lcpa[0].lcsp1);
- writel(lli_dst->lcsp02, &lcpa[0].lcsp2);
- writel(lli_dst->lcsp13, &lcpa[0].lcsp3);
+ writel_relaxed(lli_src->lcsp02, &lcpa[0].lcsp0);
+ writel_relaxed(lli_src->lcsp13, &lcpa[0].lcsp1);
+ writel_relaxed(lli_dst->lcsp02, &lcpa[0].lcsp2);
+ writel_relaxed(lli_dst->lcsp13, &lcpa[0].lcsp3);
}
void d40_log_lli_lcla_write(struct d40_log_lli *lcla,
@@ -344,10 +345,10 @@ void d40_log_lli_lcla_write(struct d40_log_lli *lcla,
{
d40_log_lli_link(lli_dst, lli_src, next, flags);
- writel(lli_src->lcsp02, &lcla[0].lcsp02);
- writel(lli_src->lcsp13, &lcla[0].lcsp13);
- writel(lli_dst->lcsp02, &lcla[1].lcsp02);
- writel(lli_dst->lcsp13, &lcla[1].lcsp13);
+ writel_relaxed(lli_src->lcsp02, &lcla[0].lcsp02);
+ writel_relaxed(lli_src->lcsp13, &lcla[0].lcsp13);
+ writel_relaxed(lli_dst->lcsp02, &lcla[1].lcsp02);
+ writel_relaxed(lli_dst->lcsp13, &lcla[1].lcsp13);
}
static void d40_log_fill_lli(struct d40_log_lli *lli,
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h
index 6d47373..fdde8ef 100644
--- a/drivers/dma/ste_dma40_ll.h
+++ b/drivers/dma/ste_dma40_ll.h
@@ -125,7 +125,7 @@
#define D40_DREG_GCC 0x000
#define D40_DREG_GCC_ENA 0x1
/* This assumes that there are only 4 event groups */
-#define D40_DREG_GCC_ENABLE_ALL 0xff01
+#define D40_DREG_GCC_ENABLE_ALL 0x3ff01
#define D40_DREG_GCC_EVTGRP_POS 8
#define D40_DREG_GCC_SRC 0
#define D40_DREG_GCC_DST 1
@@ -148,14 +148,31 @@
#define D40_DREG_LCPA 0x020
#define D40_DREG_LCLA 0x024
+
+#define D40_DREG_SSEG1 0x030
+#define D40_DREG_SSEG2 0x034
+#define D40_DREG_SSEG3 0x038
+#define D40_DREG_SSEG4 0x03C
+
+#define D40_DREG_SCEG1 0x040
+#define D40_DREG_SCEG2 0x044
+#define D40_DREG_SCEG3 0x048
+#define D40_DREG_SCEG4 0x04C
+
#define D40_DREG_ACTIVE 0x050
#define D40_DREG_ACTIVO 0x054
-#define D40_DREG_FSEB1 0x058
-#define D40_DREG_FSEB2 0x05C
+#define D40_DREG_CIDMOD 0x058
+#define D40_DREG_TCIDV 0x05C
#define D40_DREG_PCMIS 0x060
#define D40_DREG_PCICR 0x064
#define D40_DREG_PCTIS 0x068
#define D40_DREG_PCEIS 0x06C
+
+#define D40_DREG_SPCMIS 0x070
+#define D40_DREG_SPCICR 0x074
+#define D40_DREG_SPCTIS 0x078
+#define D40_DREG_SPCEIS 0x07C
+
#define D40_DREG_LCMIS0 0x080
#define D40_DREG_LCMIS1 0x084
#define D40_DREG_LCMIS2 0x088
@@ -172,6 +189,33 @@
#define D40_DREG_LCEIS1 0x0B4
#define D40_DREG_LCEIS2 0x0B8
#define D40_DREG_LCEIS3 0x0BC
+
+#define D40_DREG_SLCMIS1 0x0C0
+#define D40_DREG_SLCMIS2 0x0C4
+#define D40_DREG_SLCMIS3 0x0C8
+#define D40_DREG_SLCMIS4 0x0CC
+
+#define D40_DREG_SLCICR1 0x0D0
+#define D40_DREG_SLCICR2 0x0D4
+#define D40_DREG_SLCICR3 0x0D8
+#define D40_DREG_SLCICR4 0x0DC
+
+#define D40_DREG_SLCTIS1 0x0E0
+#define D40_DREG_SLCTIS2 0x0E4
+#define D40_DREG_SLCTIS3 0x0E8
+#define D40_DREG_SLCTIS4 0x0EC
+
+#define D40_DREG_SLCEIS1 0x0F0
+#define D40_DREG_SLCEIS2 0x0F4
+#define D40_DREG_SLCEIS3 0x0F8
+#define D40_DREG_SLCEIS4 0x0FC
+
+#define D40_DREG_FSESS1 0x100
+#define D40_DREG_FSESS2 0x104
+
+#define D40_DREG_FSEBS1 0x108
+#define D40_DREG_FSEBS2 0x10C
+
#define D40_DREG_PSEG1 0x110
#define D40_DREG_PSEG2 0x114
#define D40_DREG_PSEG3 0x118
@@ -188,6 +232,86 @@
#define D40_DREG_RCEG2 0x144
#define D40_DREG_RCEG3 0x148
#define D40_DREG_RCEG4 0x14C
+
+#define D40_DREG_PREFOT 0x15C
+#define D40_DREG_EXTCFG 0x160
+
+#define D40_DREG_CPSEG1 0x200
+#define D40_DREG_CPSEG2 0x204
+#define D40_DREG_CPSEG3 0x208
+#define D40_DREG_CPSEG4 0x20C
+#define D40_DREG_CPSEG5 0x210
+
+#define D40_DREG_CPCEG1 0x220
+#define D40_DREG_CPCEG2 0x224
+#define D40_DREG_CPCEG3 0x228
+#define D40_DREG_CPCEG4 0x22C
+#define D40_DREG_CPCEG5 0x230
+
+#define D40_DREG_CRSEG1 0x240
+#define D40_DREG_CRSEG2 0x244
+#define D40_DREG_CRSEG3 0x248
+#define D40_DREG_CRSEG4 0x24C
+#define D40_DREG_CRSEG5 0x250
+
+#define D40_DREG_CRCEG1 0x260
+#define D40_DREG_CRCEG2 0x264
+#define D40_DREG_CRCEG3 0x268
+#define D40_DREG_CRCEG4 0x26C
+#define D40_DREG_CRCEG5 0x270
+
+#define D40_DREG_CFSESS1 0x280
+#define D40_DREG_CFSESS2 0x284
+#define D40_DREG_CFSESS3 0x288
+
+#define D40_DREG_CFSEBS1 0x290
+#define D40_DREG_CFSEBS2 0x294
+#define D40_DREG_CFSEBS3 0x298
+
+#define D40_DREG_CLCMIS1 0x300
+#define D40_DREG_CLCMIS2 0x304
+#define D40_DREG_CLCMIS3 0x308
+#define D40_DREG_CLCMIS4 0x30C
+#define D40_DREG_CLCMIS5 0x310
+
+#define D40_DREG_CLCICR1 0x320
+#define D40_DREG_CLCICR2 0x324
+#define D40_DREG_CLCICR3 0x328
+#define D40_DREG_CLCICR4 0x32C
+#define D40_DREG_CLCICR5 0x330
+
+#define D40_DREG_CLCTIS1 0x340
+#define D40_DREG_CLCTIS2 0x344
+#define D40_DREG_CLCTIS3 0x348
+#define D40_DREG_CLCTIS4 0x34C
+#define D40_DREG_CLCTIS5 0x350
+
+#define D40_DREG_CLCEIS1 0x360
+#define D40_DREG_CLCEIS2 0x364
+#define D40_DREG_CLCEIS3 0x368
+#define D40_DREG_CLCEIS4 0x36C
+#define D40_DREG_CLCEIS5 0x370
+
+#define D40_DREG_CPCMIS 0x380
+#define D40_DREG_CPCICR 0x384
+#define D40_DREG_CPCTIS 0x388
+#define D40_DREG_CPCEIS 0x38C
+
+#define D40_DREG_SCCIDA1 0xE80
+#define D40_DREG_SCCIDA2 0xE90
+#define D40_DREG_SCCIDA3 0xEA0
+#define D40_DREG_SCCIDA4 0xEB0
+#define D40_DREG_SCCIDA5 0xEC0
+
+#define D40_DREG_SCCIDB1 0xE84
+#define D40_DREG_SCCIDB2 0xE94
+#define D40_DREG_SCCIDB3 0xEA4
+#define D40_DREG_SCCIDB4 0xEB4
+#define D40_DREG_SCCIDB5 0xEC4
+
+#define D40_DREG_PRSCCIDA 0xF80
+#define D40_DREG_PRSCCIDB 0xF84
+
#define D40_DREG_STFU 0xFC8
#define D40_DREG_ICFG 0xFCC
#define D40_DREG_PERIPHID0 0xFE0
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index 3cad856..fcee27e 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -21,6 +21,7 @@
#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
+#include <linux/err.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -31,8 +32,8 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
+#include <linux/clk/tegra.h>
-#include <mach/clk.h>
#include "dmaengine.h"
#define TEGRA_APBDMA_GENERAL 0x0
@@ -62,6 +63,9 @@
#define TEGRA_APBDMA_STATUS_COUNT_SHIFT 2
#define TEGRA_APBDMA_STATUS_COUNT_MASK 0xFFFC
+#define TEGRA_APBDMA_CHAN_CSRE 0x00C
+#define TEGRA_APBDMA_CHAN_CSRE_PAUSE (1 << 31)
+
/* AHB memory address */
#define TEGRA_APBDMA_CHAN_AHBPTR 0x010
@@ -112,10 +116,12 @@ struct tegra_dma;
* tegra_dma_chip_data Tegra chip specific DMA data
* @nr_channels: Number of channels available in the controller.
* @max_dma_count: Maximum DMA transfer count supported by DMA controller.
+ * @support_channel_pause: Support channel wise pause of dma.
*/
struct tegra_dma_chip_data {
int nr_channels;
int max_dma_count;
+ bool support_channel_pause;
};
/* DMA channel registers */
@@ -354,6 +360,32 @@ static void tegra_dma_global_resume(struct tegra_dma_channel *tdc)
spin_unlock(&tdma->global_lock);
}
+static void tegra_dma_pause(struct tegra_dma_channel *tdc,
+ bool wait_for_burst_complete)
+{
+ struct tegra_dma *tdma = tdc->tdma;
+
+ if (tdma->chip_data->support_channel_pause) {
+ tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE,
+ TEGRA_APBDMA_CHAN_CSRE_PAUSE);
+ if (wait_for_burst_complete)
+ udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
+ } else {
+ tegra_dma_global_pause(tdc, wait_for_burst_complete);
+ }
+}
+
+static void tegra_dma_resume(struct tegra_dma_channel *tdc)
+{
+ struct tegra_dma *tdma = tdc->tdma;
+
+ if (tdma->chip_data->support_channel_pause) {
+ tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE, 0);
+ } else {
+ tegra_dma_global_resume(tdc);
+ }
+}
+
static void tegra_dma_stop(struct tegra_dma_channel *tdc)
{
u32 csr;
@@ -409,7 +441,7 @@ static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc,
* If there is already IEC status then interrupt handler need to
* load new configuration.
*/
- tegra_dma_global_pause(tdc, false);
+ tegra_dma_pause(tdc, false);
status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
/*
@@ -419,7 +451,7 @@ static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc,
if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
dev_err(tdc2dev(tdc),
"Skipping new configuration as interrupt is pending\n");
- tegra_dma_global_resume(tdc);
+ tegra_dma_resume(tdc);
return;
}
@@ -430,7 +462,7 @@ static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc,
nsg_req->ch_regs.csr | TEGRA_APBDMA_CSR_ENB);
nsg_req->configured = true;
- tegra_dma_global_resume(tdc);
+ tegra_dma_resume(tdc);
}
static void tdc_start_head_req(struct tegra_dma_channel *tdc)
@@ -691,7 +723,7 @@ static void tegra_dma_terminate_all(struct dma_chan *dc)
goto skip_dma_stop;
/* Pause DMA before checking the queue status */
- tegra_dma_global_pause(tdc, true);
+ tegra_dma_pause(tdc, true);
status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
@@ -709,7 +741,7 @@ static void tegra_dma_terminate_all(struct dma_chan *dc)
sgreq->dma_desc->bytes_transferred +=
get_current_xferred_count(tdc, sgreq, status);
}
- tegra_dma_global_resume(tdc);
+ tegra_dma_resume(tdc);
skip_dma_stop:
tegra_dma_abort_all(tdc);
@@ -737,7 +769,6 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
ret = dma_cookie_status(dc, cookie, txstate);
if (ret == DMA_SUCCESS) {
- dma_set_residue(txstate, 0);
spin_unlock_irqrestore(&tdc->lock, flags);
return ret;
}
@@ -1179,6 +1210,7 @@ static void tegra_dma_free_chan_resources(struct dma_chan *dc)
static const struct tegra_dma_chip_data tegra20_dma_chip_data = {
.nr_channels = 16,
.max_dma_count = 1024UL * 64,
+ .support_channel_pause = false,
};
#if defined(CONFIG_OF)
@@ -1186,10 +1218,22 @@ static const struct tegra_dma_chip_data tegra20_dma_chip_data = {
static const struct tegra_dma_chip_data tegra30_dma_chip_data = {
.nr_channels = 32,
.max_dma_count = 1024UL * 64,
+ .support_channel_pause = false,
};
+/* Tegra114 specific DMA controller information */
+static const struct tegra_dma_chip_data tegra114_dma_chip_data = {
+ .nr_channels = 32,
+ .max_dma_count = 1024UL * 64,
+ .support_channel_pause = true,
+};
+
+
static const struct of_device_id tegra_dma_of_match[] = {
{
+ .compatible = "nvidia,tegra114-apbdma",
+ .data = &tegra114_dma_chip_data,
+ }, {
.compatible = "nvidia,tegra30-apbdma",
.data = &tegra30_dma_chip_data,
}, {
@@ -1240,12 +1284,9 @@ static int tegra_dma_probe(struct platform_device *pdev)
return -EINVAL;
}
- tdma->base_addr = devm_request_and_ioremap(&pdev->dev, res);
- if (!tdma->base_addr) {
- dev_err(&pdev->dev,
- "Cannot request memregion/iomap dma address\n");
- return -EADDRNOTAVAIL;
- }
+ tdma->base_addr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(tdma->base_addr))
+ return PTR_ERR(tdma->base_addr);
tdma->dma_clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(tdma->dma_clk)) {