summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/dma/Kconfig38
-rw-r--r--drivers/dma/at_hdmac.c11
-rw-r--r--drivers/dma/at_xdmac.c8
-rw-r--r--drivers/dma/bestcomm/bestcomm.c4
-rw-r--r--drivers/dma/coh901318.c56
-rw-r--r--drivers/dma/coh901318_lli.c4
-rw-r--r--drivers/dma/cppi41.c142
-rw-r--r--drivers/dma/dma-jz4740.c2
-rw-r--r--drivers/dma/dma-jz4780.c10
-rw-r--r--drivers/dma/dmaengine.c7
-rw-r--r--drivers/dma/dmaengine.h84
-rw-r--r--drivers/dma/dmatest.c23
-rw-r--r--drivers/dma/dw/core.c14
-rw-r--r--drivers/dma/edma.c38
-rw-r--r--drivers/dma/ep93xx_dma.c28
-rw-r--r--drivers/dma/fsl_raid.c12
-rw-r--r--drivers/dma/fsldma.c22
-rw-r--r--drivers/dma/hsu/hsu.c9
-rw-r--r--drivers/dma/hsu/pci.c6
-rw-r--r--drivers/dma/imx-dma.c4
-rw-r--r--drivers/dma/imx-sdma.c35
-rw-r--r--drivers/dma/ioat/dma.c213
-rw-r--r--drivers/dma/ioat/registers.h2
-rw-r--r--drivers/dma/iop-adma.c3
-rw-r--r--drivers/dma/ipu/ipu_idmac.c18
-rw-r--r--drivers/dma/mic_x100_dma.c6
-rw-r--r--drivers/dma/mmp_pdma.c14
-rw-r--r--drivers/dma/mmp_tdma.c6
-rw-r--r--drivers/dma/mpc512x_dma.c7
-rw-r--r--drivers/dma/mv_xor.c5
-rw-r--r--drivers/dma/mxs-dma.c3
-rw-r--r--drivers/dma/nbpfaxi.c9
-rw-r--r--drivers/dma/pch_dma.c7
-rw-r--r--drivers/dma/pl330.c10
-rw-r--r--drivers/dma/ppc4xx/adma.c9
-rw-r--r--drivers/dma/qcom/hidma.c57
-rw-r--r--drivers/dma/qcom/hidma.h2
-rw-r--r--drivers/dma/qcom/hidma_ll.c32
-rw-r--r--drivers/dma/s3c24xx-dma.c9
-rw-r--r--drivers/dma/sa11x0-dma.c14
-rw-r--r--drivers/dma/sh/rcar-dmac.c16
-rw-r--r--drivers/dma/sh/shdma-base.c12
-rw-r--r--drivers/dma/sirf-dma.c7
-rw-r--r--drivers/dma/ste_dma40.c46
-rw-r--r--drivers/dma/stm32-dma.c2
-rw-r--r--drivers/dma/sun6i-dma.c7
-rw-r--r--drivers/dma/tegra20-apb-dma.c10
-rw-r--r--drivers/dma/ti-dma-crossbar.c30
-rw-r--r--drivers/dma/timb_dma.c9
-rw-r--r--drivers/dma/txx9dmac.c9
-rw-r--r--drivers/dma/virt-dma.c17
-rw-r--r--drivers/dma/virt-dma.h10
-rw-r--r--drivers/dma/xgene-dma.c3
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c10
-rw-r--r--drivers/ntb/ntb_transport.c193
-rw-r--r--drivers/tty/serial/8250/8250_mid.c8
56 files changed, 886 insertions, 486 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 739f797..9e680ec 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -102,7 +102,7 @@ config AXI_DMAC
config COH901318
bool "ST-Ericsson COH901318 DMA support"
select DMA_ENGINE
- depends on ARCH_U300
+ depends on ARCH_U300 || COMPILE_TEST
help
Enable support for ST-Ericsson COH 901 318 DMA.
@@ -114,13 +114,13 @@ config DMA_BCM2835
config DMA_JZ4740
tristate "JZ4740 DMA support"
- depends on MACH_JZ4740
+ depends on MACH_JZ4740 || COMPILE_TEST
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
config DMA_JZ4780
tristate "JZ4780 DMA support"
- depends on MACH_JZ4780
+ depends on MACH_JZ4780 || COMPILE_TEST
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
@@ -130,14 +130,14 @@ config DMA_JZ4780
config DMA_OMAP
tristate "OMAP DMA support"
- depends on ARCH_OMAP
+ depends on ARCH_OMAP || COMPILE_TEST
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
- select TI_DMA_CROSSBAR if SOC_DRA7XX
+ select TI_DMA_CROSSBAR if (SOC_DRA7XX || COMPILE_TEST)
config DMA_SA11X0
tristate "SA-11x0 DMA support"
- depends on ARCH_SA1100
+ depends on ARCH_SA1100 || COMPILE_TEST
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
@@ -150,7 +150,6 @@ config DMA_SUN4I
depends on MACH_SUN4I || MACH_SUN5I || MACH_SUN7I
default (MACH_SUN4I || MACH_SUN5I || MACH_SUN7I)
select DMA_ENGINE
- select DMA_OF
select DMA_VIRTUAL_CHANNELS
help
Enable support for the DMA controller present in the sun4i,
@@ -167,7 +166,7 @@ config DMA_SUN6I
config EP93XX_DMA
bool "Cirrus Logic EP93xx DMA support"
- depends on ARCH_EP93XX
+ depends on ARCH_EP93XX || COMPILE_TEST
select DMA_ENGINE
help
Enable support for the Cirrus Logic EP93xx M2P/M2M DMA controller.
@@ -297,16 +296,16 @@ config LPC18XX_DMAMUX
config MMP_PDMA
bool "MMP PDMA support"
- depends on (ARCH_MMP || ARCH_PXA)
+ depends on ARCH_MMP || ARCH_PXA || COMPILE_TEST
select DMA_ENGINE
help
Support the MMP PDMA engine for PXA and MMP platform.
config MMP_TDMA
bool "MMP Two-Channel DMA support"
- depends on ARCH_MMP
+ depends on ARCH_MMP || COMPILE_TEST
select DMA_ENGINE
- select MMP_SRAM
+ select MMP_SRAM if ARCH_MMP
help
Support the MMP Two-Channel DMA engine.
This engine used for MMP Audio DMA and pxa910 SQU.
@@ -316,7 +315,6 @@ config MOXART_DMA
tristate "MOXART DMA support"
depends on ARCH_MOXART
select DMA_ENGINE
- select DMA_OF
select DMA_VIRTUAL_CHANNELS
help
Enable support for the MOXA ART SoC DMA controller.
@@ -439,9 +437,8 @@ config STE_DMA40
config STM32_DMA
bool "STMicroelectronics STM32 DMA support"
- depends on ARCH_STM32
+ depends on ARCH_STM32 || COMPILE_TEST
select DMA_ENGINE
- select DMA_OF
select DMA_VIRTUAL_CHANNELS
help
Enable support for the on-chip DMA controller on STMicroelectronics
@@ -451,7 +448,7 @@ config STM32_DMA
config S3C24XX_DMAC
bool "Samsung S3C24XX DMA support"
- depends on ARCH_S3C24XX
+ depends on ARCH_S3C24XX || COMPILE_TEST
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
@@ -483,10 +480,9 @@ config TEGRA20_APB_DMA
config TEGRA210_ADMA
bool "NVIDIA Tegra210 ADMA support"
- depends on ARCH_TEGRA_210_SOC
+ depends on (ARCH_TEGRA_210_SOC || COMPILE_TEST) && PM_CLK
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
- select PM_CLK
help
Support for the NVIDIA Tegra210 ADMA controller driver. The
DMA controller has multiple DMA channels and is used to service
@@ -497,7 +493,7 @@ config TEGRA210_ADMA
config TIMB_DMA
tristate "Timberdale FPGA DMA support"
- depends on MFD_TIMBERDALE
+ depends on MFD_TIMBERDALE || COMPILE_TEST
select DMA_ENGINE
help
Enable support for the Timberdale FPGA DMA engine.
@@ -515,10 +511,10 @@ config TI_DMA_CROSSBAR
config TI_EDMA
bool "TI EDMA support"
- depends on ARCH_DAVINCI || ARCH_OMAP || ARCH_KEYSTONE
+ depends on ARCH_DAVINCI || ARCH_OMAP || ARCH_KEYSTONE || COMPILE_TEST
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
- select TI_DMA_CROSSBAR if ARCH_OMAP
+ select TI_DMA_CROSSBAR if (ARCH_OMAP || COMPILE_TEST)
default n
help
Enable support for the TI EDMA controller. This DMA
@@ -561,7 +557,7 @@ config XILINX_ZYNQMP_DMA
config ZX_DMA
tristate "ZTE ZX296702 DMA support"
- depends on ARCH_ZX
+ depends on ARCH_ZX || COMPILE_TEST
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 53d22eb..a4c8f80 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -473,15 +473,11 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
/* for cyclic transfers,
* no need to replay callback function while stopping */
if (!atc_chan_is_cyclic(atchan)) {
- dma_async_tx_callback callback = txd->callback;
- void *param = txd->callback_param;
-
/*
* The API requires that no submissions are done from a
* callback, so we don't need to drop the lock here
*/
- if (callback)
- callback(param);
+ dmaengine_desc_get_callback_invoke(txd, NULL);
}
dma_run_dependencies(txd);
@@ -598,15 +594,12 @@ static void atc_handle_cyclic(struct at_dma_chan *atchan)
{
struct at_desc *first = atc_first_active(atchan);
struct dma_async_tx_descriptor *txd = &first->txd;
- dma_async_tx_callback callback = txd->callback;
- void *param = txd->callback_param;
dev_vdbg(chan2dev(&atchan->chan_common),
"new cyclic period llp 0x%08x\n",
channel_readl(atchan, DSCR));
- if (callback)
- callback(param);
+ dmaengine_desc_get_callback_invoke(txd, NULL);
}
/*-- IRQ & Tasklet ---------------------------------------------------*/
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index e434ffe..2badc57 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -1572,8 +1572,8 @@ static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node);
txd = &desc->tx_dma_desc;
- if (txd->callback && (txd->flags & DMA_PREP_INTERRUPT))
- txd->callback(txd->callback_param);
+ if (txd->flags & DMA_PREP_INTERRUPT)
+ dmaengine_desc_get_callback_invoke(txd, NULL);
}
static void at_xdmac_tasklet(unsigned long data)
@@ -1616,8 +1616,8 @@ static void at_xdmac_tasklet(unsigned long data)
if (!at_xdmac_chan_is_cyclic(atchan)) {
dma_cookie_complete(txd);
- if (txd->callback && (txd->flags & DMA_PREP_INTERRUPT))
- txd->callback(txd->callback_param);
+ if (txd->flags & DMA_PREP_INTERRUPT)
+ dmaengine_desc_get_callback_invoke(txd, NULL);
}
dma_run_dependencies(txd);
diff --git a/drivers/dma/bestcomm/bestcomm.c b/drivers/dma/bestcomm/bestcomm.c
index 7ce8437..7a67b83 100644
--- a/drivers/dma/bestcomm/bestcomm.c
+++ b/drivers/dma/bestcomm/bestcomm.c
@@ -82,7 +82,7 @@ bcom_task_alloc(int bd_count, int bd_size, int priv_size)
/* Get IRQ of that task */
tsk->irq = irq_of_parse_and_map(bcom_eng->ofnode, tsk->tasknum);
- if (tsk->irq == NO_IRQ)
+ if (!tsk->irq)
goto error;
/* Init the BDs, if needed */
@@ -104,7 +104,7 @@ bcom_task_alloc(int bd_count, int bd_size, int priv_size)
error:
if (tsk) {
- if (tsk->irq != NO_IRQ)
+ if (tsk->irq)
irq_dispose_mapping(tsk->irq);
bcom_sram_free(tsk->bd);
kfree(tsk->cookie);
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index e4acd63..74794c9 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -1319,10 +1319,10 @@ static void coh901318_list_print(struct coh901318_chan *cohc,
int i = 0;
while (l) {
- dev_vdbg(COHC_2_DEV(cohc), "i %d, lli %p, ctrl 0x%x, src 0x%x"
- ", dst 0x%x, link 0x%x virt_link_addr 0x%p\n",
- i, l, l->control, l->src_addr, l->dst_addr,
- l->link_addr, l->virt_link_addr);
+ dev_vdbg(COHC_2_DEV(cohc), "i %d, lli %p, ctrl 0x%x, src 0x%pad"
+ ", dst 0x%pad, link 0x%pad virt_link_addr 0x%p\n",
+ i, l, l->control, &l->src_addr, &l->dst_addr,
+ &l->link_addr, l->virt_link_addr);
i++;
l = l->virt_link_addr;
}
@@ -1335,7 +1335,7 @@ static void coh901318_list_print(struct coh901318_chan *cohc,
static struct coh901318_base *debugfs_dma_base;
static struct dentry *dma_dentry;
-static int coh901318_debugfs_read(struct file *file, char __user *buf,
+static ssize_t coh901318_debugfs_read(struct file *file, char __user *buf,
size_t count, loff_t *f_pos)
{
u64 started_channels = debugfs_dma_base->pm.started_channels;
@@ -1352,9 +1352,10 @@ static int coh901318_debugfs_read(struct file *file, char __user *buf,
tmp += sprintf(tmp, "DMA -- enabled dma channels\n");
- for (i = 0; i < U300_DMA_CHANNELS; i++)
- if (started_channels & (1 << i))
+ for (i = 0; i < U300_DMA_CHANNELS; i++) {
+ if (started_channels & (1ULL << i))
tmp += sprintf(tmp, "channel %d\n", i);
+ }
tmp += sprintf(tmp, "Pool alloc nbr %d\n", pool_count);
@@ -1553,15 +1554,8 @@ coh901318_desc_submit(struct coh901318_chan *cohc, struct coh901318_desc *desc)
static struct coh901318_desc *
coh901318_first_active_get(struct coh901318_chan *cohc)
{
- struct coh901318_desc *d;
-
- if (list_empty(&cohc->active))
- return NULL;
-
- d = list_first_entry(&cohc->active,
- struct coh901318_desc,
- node);
- return d;
+ return list_first_entry_or_null(&cohc->active, struct coh901318_desc,
+ node);
}
static void
@@ -1579,15 +1573,8 @@ coh901318_desc_queue(struct coh901318_chan *cohc, struct coh901318_desc *desc)
static struct coh901318_desc *
coh901318_first_queued(struct coh901318_chan *cohc)
{
- struct coh901318_desc *d;
-
- if (list_empty(&cohc->queue))
- return NULL;
-
- d = list_first_entry(&cohc->queue,
- struct coh901318_desc,
- node);
- return d;
+ return list_first_entry_or_null(&cohc->queue, struct coh901318_desc,
+ node);
}
static inline u32 coh901318_get_bytes_in_lli(struct coh901318_lli *in_lli)
@@ -1766,7 +1753,7 @@ static int coh901318_resume(struct dma_chan *chan)
bool coh901318_filter_id(struct dma_chan *chan, void *chan_id)
{
- unsigned int ch_nr = (unsigned int) chan_id;
+ unsigned long ch_nr = (unsigned long) chan_id;
if (ch_nr == to_coh901318_chan(chan)->id)
return true;
@@ -1888,8 +1875,7 @@ static void dma_tasklet(unsigned long data)
struct coh901318_chan *cohc = (struct coh901318_chan *) data;
struct coh901318_desc *cohd_fin;
unsigned long flags;
- dma_async_tx_callback callback;
- void *callback_param;
+ struct dmaengine_desc_callback cb;
dev_vdbg(COHC_2_DEV(cohc), "[%s] chan_id %d"
" nbr_active_done %ld\n", __func__,
@@ -1904,8 +1890,7 @@ static void dma_tasklet(unsigned long data)
goto err;
/* locate callback to client */
- callback = cohd_fin->desc.callback;
- callback_param = cohd_fin->desc.callback_param;
+ dmaengine_desc_get_callback(&cohd_fin->desc, &cb);
/* sign this job as completed on the channel */
dma_cookie_complete(&cohd_fin->desc);
@@ -1920,8 +1905,7 @@ static void dma_tasklet(unsigned long data)
spin_unlock_irqrestore(&cohc->lock, flags);
/* Call the callback when we're done */
- if (callback)
- callback(callback_param);
+ dmaengine_desc_callback_invoke(&cb, NULL);
spin_lock_irqsave(&cohc->lock, flags);
@@ -2247,8 +2231,8 @@ coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
spin_lock_irqsave(&cohc->lock, flg);
dev_vdbg(COHC_2_DEV(cohc),
- "[%s] channel %d src 0x%x dest 0x%x size %d\n",
- __func__, cohc->id, src, dest, size);
+ "[%s] channel %d src 0x%pad dest 0x%pad size %zu\n",
+ __func__, cohc->id, &src, &dest, size);
if (flags & DMA_PREP_INTERRUPT)
/* Trigger interrupt after last lli */
@@ -2744,8 +2728,8 @@ static int __init coh901318_probe(struct platform_device *pdev)
goto err_register_of_dma;
platform_set_drvdata(pdev, base);
- dev_info(&pdev->dev, "Initialized COH901318 DMA on virtual base 0x%08x\n",
- (u32) base->virtbase);
+ dev_info(&pdev->dev, "Initialized COH901318 DMA on virtual base 0x%p\n",
+ base->virtbase);
return err;
diff --git a/drivers/dma/coh901318_lli.c b/drivers/dma/coh901318_lli.c
index 702112d..d612b2e 100644
--- a/drivers/dma/coh901318_lli.c
+++ b/drivers/dma/coh901318_lli.c
@@ -75,7 +75,7 @@ coh901318_lli_alloc(struct coh901318_pool *pool, unsigned int len)
lli = head;
lli->phy_this = phy;
lli->link_addr = 0x00000000;
- lli->virt_link_addr = 0x00000000U;
+ lli->virt_link_addr = NULL;
for (i = 1; i < len; i++) {
lli_prev = lli;
@@ -88,7 +88,7 @@ coh901318_lli_alloc(struct coh901318_pool *pool, unsigned int len)
DEBUGFS_POOL_COUNTER_ADD(pool, 1);
lli->phy_this = phy;
lli->link_addr = 0x00000000;
- lli->virt_link_addr = 0x00000000U;
+ lli->virt_link_addr = NULL;
lli_prev->link_addr = phy;
lli_prev->virt_link_addr = lli;
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
index 4b23174..bac5f02 100644
--- a/drivers/dma/cppi41.c
+++ b/drivers/dma/cppi41.c
@@ -108,6 +108,8 @@ struct cppi41_channel {
unsigned td_queued:1;
unsigned td_seen:1;
unsigned td_desc_seen:1;
+
+ struct list_head node; /* Node for pending list */
};
struct cppi41_desc {
@@ -146,6 +148,9 @@ struct cppi41_dd {
const struct chan_queues *queues_tx;
struct chan_queues td_queue;
+ struct list_head pending; /* Pending queued transfers */
+ spinlock_t lock; /* Lock for pending list */
+
/* context for suspend/resume */
unsigned int dma_tdfdq;
};
@@ -331,7 +336,11 @@ static irqreturn_t cppi41_irq(int irq, void *data)
c->residue = pd_trans_len(c->desc->pd6) - len;
dma_cookie_complete(&c->txd);
- c->txd.callback(c->txd.callback_param);
+ dmaengine_desc_get_callback_invoke(&c->txd, NULL);
+
+ /* Paired with cppi41_dma_issue_pending */
+ pm_runtime_mark_last_busy(cdd->ddev.dev);
+ pm_runtime_put_autosuspend(cdd->ddev.dev);
}
}
return IRQ_HANDLED;
@@ -349,6 +358,12 @@ static dma_cookie_t cppi41_tx_submit(struct dma_async_tx_descriptor *tx)
static int cppi41_dma_alloc_chan_resources(struct dma_chan *chan)
{
struct cppi41_channel *c = to_cpp41_chan(chan);
+ struct cppi41_dd *cdd = c->cdd;
+ int error;
+
+ error = pm_runtime_get_sync(cdd->ddev.dev);
+ if (error < 0)
+ return error;
dma_cookie_init(chan);
dma_async_tx_descriptor_init(&c->txd, chan);
@@ -357,11 +372,26 @@ static int cppi41_dma_alloc_chan_resources(struct dma_chan *chan)
if (!c->is_tx)
cppi_writel(c->q_num, c->gcr_reg + RXHPCRA0);
+ pm_runtime_mark_last_busy(cdd->ddev.dev);
+ pm_runtime_put_autosuspend(cdd->ddev.dev);
+
return 0;
}
static void cppi41_dma_free_chan_resources(struct dma_chan *chan)
{
+ struct cppi41_channel *c = to_cpp41_chan(chan);
+ struct cppi41_dd *cdd = c->cdd;
+ int error;
+
+ error = pm_runtime_get_sync(cdd->ddev.dev);
+ if (error < 0)
+ return;
+
+ WARN_ON(!list_empty(&cdd->pending));
+
+ pm_runtime_mark_last_busy(cdd->ddev.dev);
+ pm_runtime_put_autosuspend(cdd->ddev.dev);
}
static enum dma_status cppi41_dma_tx_status(struct dma_chan *chan,
@@ -386,21 +416,6 @@ static void push_desc_queue(struct cppi41_channel *c)
u32 desc_phys;
u32 reg;
- desc_phys = lower_32_bits(c->desc_phys);
- desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
- WARN_ON(cdd->chan_busy[desc_num]);
- cdd->chan_busy[desc_num] = c;
-
- reg = (sizeof(struct cppi41_desc) - 24) / 4;
- reg |= desc_phys;
- cppi_writel(reg, cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num));
-}
-
-static void cppi41_dma_issue_pending(struct dma_chan *chan)
-{
- struct cppi41_channel *c = to_cpp41_chan(chan);
- u32 reg;
-
c->residue = 0;
reg = GCR_CHAN_ENABLE;
@@ -418,7 +433,46 @@ static void cppi41_dma_issue_pending(struct dma_chan *chan)
* before starting the dma engine.
*/
__iowmb();
- push_desc_queue(c);
+
+ desc_phys = lower_32_bits(c->desc_phys);
+ desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
+ WARN_ON(cdd->chan_busy[desc_num]);
+ cdd->chan_busy[desc_num] = c;
+
+ reg = (sizeof(struct cppi41_desc) - 24) / 4;
+ reg |= desc_phys;
+ cppi_writel(reg, cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num));
+}
+
+static void pending_desc(struct cppi41_channel *c)
+{
+ struct cppi41_dd *cdd = c->cdd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cdd->lock, flags);
+ list_add_tail(&c->node, &cdd->pending);
+ spin_unlock_irqrestore(&cdd->lock, flags);
+}
+
+static void cppi41_dma_issue_pending(struct dma_chan *chan)
+{
+ struct cppi41_channel *c = to_cpp41_chan(chan);
+ struct cppi41_dd *cdd = c->cdd;
+ int error;
+
+ /* PM runtime paired with dmaengine_desc_get_callback_invoke */
+ error = pm_runtime_get(cdd->ddev.dev);
+ if ((error != -EINPROGRESS) && error < 0) {
+ dev_err(cdd->ddev.dev, "Failed to pm_runtime_get: %i\n",
+ error);
+
+ return;
+ }
+
+ if (likely(pm_runtime_active(cdd->ddev.dev)))
+ push_desc_queue(c);
+ else
+ pending_desc(c);
}
static u32 get_host_pd0(u32 length)
@@ -940,12 +994,18 @@ static int cppi41_dma_probe(struct platform_device *pdev)
cdd->ctrl_mem = of_iomap(dev->of_node, 1);
cdd->sched_mem = of_iomap(dev->of_node, 2);
cdd->qmgr_mem = of_iomap(dev->of_node, 3);
+ spin_lock_init(&cdd->lock);
+ INIT_LIST_HEAD(&cdd->pending);
+
+ platform_set_drvdata(pdev, cdd);
if (!cdd->usbss_mem || !cdd->ctrl_mem || !cdd->sched_mem ||
!cdd->qmgr_mem)
return -ENXIO;
pm_runtime_enable(dev);
+ pm_runtime_set_autosuspend_delay(dev, 100);
+ pm_runtime_use_autosuspend(dev);
ret = pm_runtime_get_sync(dev);
if (ret < 0)
goto err_get_sync;
@@ -985,7 +1045,9 @@ static int cppi41_dma_probe(struct platform_device *pdev)
if (ret)
goto err_of;
- platform_set_drvdata(pdev, cdd);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
return 0;
err_of:
dma_async_device_unregister(&cdd->ddev);
@@ -996,7 +1058,8 @@ err_irq:
err_chans:
deinit_cppi41(dev, cdd);
err_init_cppi:
- pm_runtime_put(dev);
+ pm_runtime_dont_use_autosuspend(dev);
+ pm_runtime_put_sync(dev);
err_get_sync:
pm_runtime_disable(dev);
iounmap(cdd->usbss_mem);
@@ -1021,13 +1084,13 @@ static int cppi41_dma_remove(struct platform_device *pdev)
iounmap(cdd->ctrl_mem);
iounmap(cdd->sched_mem);
iounmap(cdd->qmgr_mem);
- pm_runtime_put(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+ pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int cppi41_suspend(struct device *dev)
+static int __maybe_unused cppi41_suspend(struct device *dev)
{
struct cppi41_dd *cdd = dev_get_drvdata(dev);
@@ -1038,7 +1101,7 @@ static int cppi41_suspend(struct device *dev)
return 0;
}
-static int cppi41_resume(struct device *dev)
+static int __maybe_unused cppi41_resume(struct device *dev)
{
struct cppi41_dd *cdd = dev_get_drvdata(dev);
struct cppi41_channel *c;
@@ -1062,9 +1125,38 @@ static int cppi41_resume(struct device *dev)
return 0;
}
-#endif
-static SIMPLE_DEV_PM_OPS(cppi41_pm_ops, cppi41_suspend, cppi41_resume);
+static int __maybe_unused cppi41_runtime_suspend(struct device *dev)
+{
+ struct cppi41_dd *cdd = dev_get_drvdata(dev);
+
+ WARN_ON(!list_empty(&cdd->pending));
+
+ return 0;
+}
+
+static int __maybe_unused cppi41_runtime_resume(struct device *dev)
+{
+ struct cppi41_dd *cdd = dev_get_drvdata(dev);
+ struct cppi41_channel *c, *_c;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cdd->lock, flags);
+ list_for_each_entry_safe(c, _c, &cdd->pending, node) {
+ push_desc_queue(c);
+ list_del(&c->node);
+ }
+ spin_unlock_irqrestore(&cdd->lock, flags);
+
+ return 0;
+}
+
+static const struct dev_pm_ops cppi41_pm_ops = {
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(cppi41_suspend, cppi41_resume)
+ SET_RUNTIME_PM_OPS(cppi41_runtime_suspend,
+ cppi41_runtime_resume,
+ NULL)
+};
static struct platform_driver cpp41_dma_driver = {
.probe = cppi41_dma_probe,
diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c
index 9689b36..d50273f 100644
--- a/drivers/dma/dma-jz4740.c
+++ b/drivers/dma/dma-jz4740.c
@@ -21,8 +21,6 @@
#include <linux/irq.h>
#include <linux/clk.h>
-#include <asm/mach-jz4740/dma.h>
-
#include "virt-dma.h"
#define JZ_DMA_NR_CHANS 6
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
index dade7c4..7373b7a 100644
--- a/drivers/dma/dma-jz4780.c
+++ b/drivers/dma/dma-jz4780.c
@@ -324,8 +324,10 @@ static struct dma_async_tx_descriptor *jz4780_dma_prep_slave_sg(
sg_dma_address(&sgl[i]),
sg_dma_len(&sgl[i]),
direction);
- if (err < 0)
+ if (err < 0) {
+ jz4780_dma_desc_free(&jzchan->desc->vdesc);
return NULL;
+ }
desc->desc[i].dcm |= JZ_DMA_DCM_TIE;
@@ -368,8 +370,10 @@ static struct dma_async_tx_descriptor *jz4780_dma_prep_dma_cyclic(
for (i = 0; i < periods; i++) {
err = jz4780_dma_setup_hwdesc(jzchan, &desc->desc[i], buf_addr,
period_len, direction);
- if (err < 0)
+ if (err < 0) {
+ jz4780_dma_desc_free(&jzchan->desc->vdesc);
return NULL;
+ }
buf_addr += period_len;
@@ -396,7 +400,7 @@ static struct dma_async_tx_descriptor *jz4780_dma_prep_dma_cyclic(
return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags);
}
-struct dma_async_tx_descriptor *jz4780_dma_prep_dma_memcpy(
+static struct dma_async_tx_descriptor *jz4780_dma_prep_dma_memcpy(
struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
size_t len, unsigned long flags)
{
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 8c9f45f..6b53526 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -997,6 +997,13 @@ int dma_async_device_register(struct dma_device *device)
}
chan->client_count = 0;
}
+
+ if (!chancnt) {
+ dev_err(device->dev, "%s: device has no channels!\n", __func__);
+ rc = -ENODEV;
+ goto err_out;
+ }
+
device->chancnt = chancnt;
mutex_lock(&dma_list_mutex);
diff --git a/drivers/dma/dmaengine.h b/drivers/dma/dmaengine.h
index 17f983a..882ff94 100644
--- a/drivers/dma/dmaengine.h
+++ b/drivers/dma/dmaengine.h
@@ -86,4 +86,88 @@ static inline void dma_set_residue(struct dma_tx_state *state, u32 residue)
state->residue = residue;
}
+struct dmaengine_desc_callback {
+ dma_async_tx_callback callback;
+ dma_async_tx_callback_result callback_result;
+ void *callback_param;
+};
+
+/**
+ * dmaengine_desc_get_callback - get the passed in callback function
+ * @tx: tx descriptor
+ * @cb: temp struct to hold the callback info
+ *
+ * Fill the passed in cb struct with what's available in the passed in
+ * tx descriptor struct
+ * No locking is required.
+ */
+static inline void
+dmaengine_desc_get_callback(struct dma_async_tx_descriptor *tx,
+ struct dmaengine_desc_callback *cb)
+{
+ cb->callback = tx->callback;
+ cb->callback_result = tx->callback_result;
+ cb->callback_param = tx->callback_param;
+}
+
+/**
+ * dmaengine_desc_callback_invoke - call the callback function in cb struct
+ * @cb: temp struct that is holding the callback info
+ * @result: transaction result
+ *
+ * Call the callback function provided in the cb struct with the parameter
+ * in the cb struct.
+ * Locking is dependent on the driver.
+ */
+static inline void
+dmaengine_desc_callback_invoke(struct dmaengine_desc_callback *cb,
+ const struct dmaengine_result *result)
+{
+ struct dmaengine_result dummy_result = {
+ .result = DMA_TRANS_NOERROR,
+ .residue = 0
+ };
+
+ if (cb->callback_result) {
+ if (!result)
+ result = &dummy_result;
+ cb->callback_result(cb->callback_param, result);
+ } else if (cb->callback) {
+ cb->callback(cb->callback_param);
+ }
+}
+
+/**
+ * dmaengine_desc_get_callback_invoke - get the callback in tx descriptor and
+ * then immediately call the callback.
+ * @tx: dma async tx descriptor
+ * @result: transaction result
+ *
+ * Call dmaengine_desc_get_callback() and dmaengine_desc_callback_invoke()
+ * in a single function since no work is necessary in between for the driver.
+ * Locking is dependent on the driver.
+ */
+static inline void
+dmaengine_desc_get_callback_invoke(struct dma_async_tx_descriptor *tx,
+ const struct dmaengine_result *result)
+{
+ struct dmaengine_desc_callback cb;
+
+ dmaengine_desc_get_callback(tx, &cb);
+ dmaengine_desc_callback_invoke(&cb, result);
+}
+
+/**
+ * dmaengine_desc_callback_valid - verify the callback is valid in cb
+ * @cb: callback info struct
+ *
+ * Return a bool that verifies whether callback in cb is valid or not.
+ * No locking is required.
+ */
+static inline bool
+dmaengine_desc_callback_valid(struct dmaengine_desc_callback *cb)
+{
+ return (cb->callback) ? true : false;
+}
+
#endif
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 1245db5..cf76fc6 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -56,10 +56,10 @@ module_param(sg_buffers, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(sg_buffers,
"Number of scatter gather buffers (default: 1)");
-static unsigned int dmatest = 1;
+static unsigned int dmatest;
module_param(dmatest, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(dmatest,
- "dmatest 0-memcpy 1-slave_sg (default: 1)");
+ "dmatest 0-memcpy 1-slave_sg (default: 0)");
static unsigned int xor_sources = 3;
module_param(xor_sources, uint, S_IRUGO | S_IWUSR);
@@ -426,7 +426,9 @@ static int dmatest_func(void *data)
int src_cnt;
int dst_cnt;
int i;
- ktime_t ktime;
+ ktime_t ktime, start, diff;
+ ktime_t filltime = ktime_set(0, 0);
+ ktime_t comparetime = ktime_set(0, 0);
s64 runtime = 0;
unsigned long long total_len = 0;
@@ -503,7 +505,7 @@ static int dmatest_func(void *data)
total_tests++;
/* honor alignment restrictions */
- if (thread->type == DMA_MEMCPY)
+ if (thread->type == DMA_MEMCPY || thread->type == DMA_SG)
align = dev->copy_align;
else if (thread->type == DMA_XOR)
align = dev->xor_align;
@@ -531,6 +533,7 @@ static int dmatest_func(void *data)
src_off = 0;
dst_off = 0;
} else {
+ start = ktime_get();
src_off = dmatest_random() % (params->buf_size - len + 1);
dst_off = dmatest_random() % (params->buf_size - len + 1);
@@ -541,6 +544,9 @@ static int dmatest_func(void *data)
params->buf_size);
dmatest_init_dsts(thread->dsts, dst_off, len,
params->buf_size);
+
+ diff = ktime_sub(ktime_get(), start);
+ filltime = ktime_add(filltime, diff);
}
um = dmaengine_get_unmap_data(dev->dev, src_cnt+dst_cnt,
@@ -683,6 +689,7 @@ static int dmatest_func(void *data)
continue;
}
+ start = ktime_get();
pr_debug("%s: verifying source buffer...\n", current->comm);
error_count = dmatest_verify(thread->srcs, 0, src_off,
0, PATTERN_SRC, true);
@@ -703,6 +710,9 @@ static int dmatest_func(void *data)
params->buf_size, dst_off + len,
PATTERN_DST, false);
+ diff = ktime_sub(ktime_get(), start);
+ comparetime = ktime_add(comparetime, diff);
+
if (error_count) {
result("data error", total_tests, src_off, dst_off,
len, error_count);
@@ -712,7 +722,10 @@ static int dmatest_func(void *data)
dst_off, len, 0);
}
}
- runtime = ktime_us_delta(ktime_get(), ktime);
+ ktime = ktime_sub(ktime_get(), ktime);
+ ktime = ktime_sub(ktime, comparetime);
+ ktime = ktime_sub(ktime, filltime);
+ runtime = ktime_to_us(ktime);
ret = 0;
err_dstbuf:
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index edf053f..12eedd4 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -270,20 +270,19 @@ static void
dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
bool callback_required)
{
- dma_async_tx_callback callback = NULL;
- void *param = NULL;
struct dma_async_tx_descriptor *txd = &desc->txd;
struct dw_desc *child;
unsigned long flags;
+ struct dmaengine_desc_callback cb;
dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
spin_lock_irqsave(&dwc->lock, flags);
dma_cookie_complete(txd);
- if (callback_required) {
- callback = txd->callback;
- param = txd->callback_param;
- }
+ if (callback_required)
+ dmaengine_desc_get_callback(txd, &cb);
+ else
+ memset(&cb, 0, sizeof(cb));
/* async_tx_ack */
list_for_each_entry(child, &desc->tx_list, desc_node)
@@ -292,8 +291,7 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
dwc_desc_put(dwc, desc);
spin_unlock_irqrestore(&dwc->lock, flags);
- if (callback)
- callback(param);
+ dmaengine_desc_callback_invoke(&cb, NULL);
}
static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 3d277fa..e18a580 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -263,22 +263,29 @@ static const struct edmacc_param dummy_paramset = {
#define EDMA_BINDING_LEGACY 0
#define EDMA_BINDING_TPCC 1
+static const u32 edma_binding_type[] = {
+ [EDMA_BINDING_LEGACY] = EDMA_BINDING_LEGACY,
+ [EDMA_BINDING_TPCC] = EDMA_BINDING_TPCC,
+};
+
static const struct of_device_id edma_of_ids[] = {
{
.compatible = "ti,edma3",
- .data = (void *)EDMA_BINDING_LEGACY,
+ .data = &edma_binding_type[EDMA_BINDING_LEGACY],
},
{
.compatible = "ti,edma3-tpcc",
- .data = (void *)EDMA_BINDING_TPCC,
+ .data = &edma_binding_type[EDMA_BINDING_TPCC],
},
{}
};
+MODULE_DEVICE_TABLE(of, edma_of_ids);
static const struct of_device_id edma_tptc_of_ids[] = {
{ .compatible = "ti,edma3-tptc", },
{}
};
+MODULE_DEVICE_TABLE(of, edma_tptc_of_ids);
static inline unsigned int edma_read(struct edma_cc *ecc, int offset)
{
@@ -405,18 +412,12 @@ static inline void edma_param_or(struct edma_cc *ecc, int offset, int param_no,
edma_or(ecc, EDMA_PARM + offset + (param_no << 5), or);
}
-static inline void set_bits(int offset, int len, unsigned long *p)
+static inline void edma_set_bits(int offset, int len, unsigned long *p)
{
for (; len > 0; len--)
set_bit(offset + (len - 1), p);
}
-static inline void clear_bits(int offset, int len, unsigned long *p)
-{
- for (; len > 0; len--)
- clear_bit(offset + (len - 1), p);
-}
-
static void edma_assign_priority_to_queue(struct edma_cc *ecc, int queue_no,
int priority)
{
@@ -464,13 +465,15 @@ static void edma_write_slot(struct edma_cc *ecc, unsigned slot,
memcpy_toio(ecc->base + PARM_OFFSET(slot), param, PARM_SIZE);
}
-static void edma_read_slot(struct edma_cc *ecc, unsigned slot,
+static int edma_read_slot(struct edma_cc *ecc, unsigned slot,
struct edmacc_param *param)
{
slot = EDMA_CHAN_SLOT(slot);
if (slot >= ecc->num_slots)
- return;
+ return -EINVAL;
memcpy_fromio(param, ecc->base + PARM_OFFSET(slot), PARM_SIZE);
+
+ return 0;
}
/**
@@ -1476,13 +1479,15 @@ static void edma_error_handler(struct edma_chan *echan)
struct edma_cc *ecc = echan->ecc;
struct device *dev = echan->vchan.chan.device->dev;
struct edmacc_param p;
+ int err;
if (!echan->edesc)
return;
spin_lock(&echan->vchan.lock);
- edma_read_slot(ecc, echan->slot[0], &p);
+ err = edma_read_slot(ecc, echan->slot[0], &p);
+
/*
* Issue later based on missed flag which will be sure
* to happen as:
@@ -1495,7 +1500,7 @@ static void edma_error_handler(struct edma_chan *echan)
* lead to some nasty recursion when we are in a NULL
* slot. So we avoid doing so and set the missed flag.
*/
- if (p.a_b_cnt == 0 && p.ccnt == 0) {
+ if (err || (p.a_b_cnt == 0 && p.ccnt == 0)) {
dev_dbg(dev, "Error on null slot, setting miss\n");
echan->missed = 1;
} else {
@@ -2019,8 +2024,7 @@ static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev,
{
struct edma_soc_info *info;
struct property *prop;
- size_t sz;
- int ret;
+ int sz, ret;
info = devm_kzalloc(dev, sizeof(struct edma_soc_info), GFP_KERNEL);
if (!info)
@@ -2182,7 +2186,7 @@ static int edma_probe(struct platform_device *pdev)
const struct of_device_id *match;
match = of_match_node(edma_of_ids, node);
- if (match && (u32)match->data == EDMA_BINDING_TPCC)
+ if (match && (*(u32 *)match->data) == EDMA_BINDING_TPCC)
legacy_mode = false;
info = edma_setup_info_from_dt(dev, legacy_mode);
@@ -2260,7 +2264,7 @@ static int edma_probe(struct platform_device *pdev)
for (i = 0; rsv_slots[i][0] != -1; i++) {
off = rsv_slots[i][0];
ln = rsv_slots[i][1];
- set_bits(off, ln, ecc->slot_inuse);
+ edma_set_bits(off, ln, ecc->slot_inuse);
}
}
}
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index 21f08cc..d37e8dd 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -262,10 +262,8 @@ static void ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac,
static struct ep93xx_dma_desc *
ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac)
{
- if (list_empty(&edmac->active))
- return NULL;
-
- return list_first_entry(&edmac->active, struct ep93xx_dma_desc, node);
+ return list_first_entry_or_null(&edmac->active,
+ struct ep93xx_dma_desc, node);
}
/**
@@ -739,10 +737,10 @@ static void ep93xx_dma_tasklet(unsigned long data)
{
struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data;
struct ep93xx_dma_desc *desc, *d;
- dma_async_tx_callback callback = NULL;
- void *callback_param = NULL;
+ struct dmaengine_desc_callback cb;
LIST_HEAD(list);
+ memset(&cb, 0, sizeof(cb));
spin_lock_irq(&edmac->lock);
/*
* If dma_terminate_all() was called before we get to run, the active
@@ -757,8 +755,7 @@ static void ep93xx_dma_tasklet(unsigned long data)
dma_cookie_complete(&desc->txd);
list_splice_init(&edmac->active, &list);
}
- callback = desc->txd.callback;
- callback_param = desc->txd.callback_param;
+ dmaengine_desc_get_callback(&desc->txd, &cb);
}
spin_unlock_irq(&edmac->lock);
@@ -771,8 +768,7 @@ static void ep93xx_dma_tasklet(unsigned long data)
ep93xx_dma_desc_put(edmac, desc);
}
- if (callback)
- callback(callback_param);
+ dmaengine_desc_callback_invoke(&cb, NULL);
}
static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id)
@@ -1047,11 +1043,11 @@ ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
first = NULL;
for_each_sg(sgl, sg, sg_len, i) {
- size_t sg_len = sg_dma_len(sg);
+ size_t len = sg_dma_len(sg);
- if (sg_len > DMA_MAX_CHAN_BYTES) {
- dev_warn(chan2dev(edmac), "too big transfer size %d\n",
- sg_len);
+ if (len > DMA_MAX_CHAN_BYTES) {
+ dev_warn(chan2dev(edmac), "too big transfer size %zu\n",
+ len);
goto fail;
}
@@ -1068,7 +1064,7 @@ ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
desc->src_addr = edmac->runtime_addr;
desc->dst_addr = sg_dma_address(sg);
}
- desc->size = sg_len;
+ desc->size = len;
if (!first)
first = desc;
@@ -1125,7 +1121,7 @@ ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
}
if (period_len > DMA_MAX_CHAN_BYTES) {
- dev_warn(chan2dev(edmac), "too big period length %d\n",
+ dev_warn(chan2dev(edmac), "too big period length %zu\n",
period_len);
return NULL;
}
diff --git a/drivers/dma/fsl_raid.c b/drivers/dma/fsl_raid.c
index aad167e..40c58ae 100644
--- a/drivers/dma/fsl_raid.c
+++ b/drivers/dma/fsl_raid.c
@@ -134,16 +134,8 @@ static void fsl_re_issue_pending(struct dma_chan *chan)
static void fsl_re_desc_done(struct fsl_re_desc *desc)
{
- dma_async_tx_callback callback;
- void *callback_param;
-
dma_cookie_complete(&desc->async_tx);
-
- callback = desc->async_tx.callback;
- callback_param = desc->async_tx.callback_param;
- if (callback)
- callback(callback_param);
-
+ dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL);
dma_descriptor_unmap(&desc->async_tx);
}
@@ -670,7 +662,7 @@ static int fsl_re_chan_probe(struct platform_device *ofdev,
/* read irq property from dts */
chan->irq = irq_of_parse_and_map(np, 0);
- if (chan->irq == NO_IRQ) {
+ if (!chan->irq) {
dev_err(dev, "No IRQ defined for JR %d\n", q);
ret = -ENODEV;
goto err_free;
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 911b717..87f6ab2 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -517,11 +517,7 @@ static dma_cookie_t fsldma_run_tx_complete_actions(struct fsldma_chan *chan,
ret = txd->cookie;
/* Run the link descriptor callback function */
- if (txd->callback) {
- chan_dbg(chan, "LD %p callback\n", desc);
- txd->callback(txd->callback_param);
- }
-
+ dmaengine_desc_get_callback_invoke(txd, NULL);
dma_descriptor_unmap(txd);
}
@@ -1153,7 +1149,7 @@ static void fsldma_free_irqs(struct fsldma_device *fdev)
struct fsldma_chan *chan;
int i;
- if (fdev->irq != NO_IRQ) {
+ if (fdev->irq) {
dev_dbg(fdev->dev, "free per-controller IRQ\n");
free_irq(fdev->irq, fdev);
return;
@@ -1161,7 +1157,7 @@ static void fsldma_free_irqs(struct fsldma_device *fdev)
for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
chan = fdev->chan[i];
- if (chan && chan->irq != NO_IRQ) {
+ if (chan && chan->irq) {
chan_dbg(chan, "free per-channel IRQ\n");
free_irq(chan->irq, chan);
}
@@ -1175,7 +1171,7 @@ static int fsldma_request_irqs(struct fsldma_device *fdev)
int i;
/* if we have a per-controller IRQ, use that */
- if (fdev->irq != NO_IRQ) {
+ if (fdev->irq) {
dev_dbg(fdev->dev, "request per-controller IRQ\n");
ret = request_irq(fdev->irq, fsldma_ctrl_irq, IRQF_SHARED,
"fsldma-controller", fdev);
@@ -1188,7 +1184,7 @@ static int fsldma_request_irqs(struct fsldma_device *fdev)
if (!chan)
continue;
- if (chan->irq == NO_IRQ) {
+ if (!chan->irq) {
chan_err(chan, "interrupts property missing in device tree\n");
ret = -ENODEV;
goto out_unwind;
@@ -1211,7 +1207,7 @@ out_unwind:
if (!chan)
continue;
- if (chan->irq == NO_IRQ)
+ if (!chan->irq)
continue;
free_irq(chan->irq, chan);
@@ -1311,7 +1307,7 @@ static int fsl_dma_chan_probe(struct fsldma_device *fdev,
list_add_tail(&chan->common.device_node, &fdev->common.channels);
dev_info(fdev->dev, "#%d (%s), irq %d\n", chan->id, compatible,
- chan->irq != NO_IRQ ? chan->irq : fdev->irq);
+ chan->irq ? chan->irq : fdev->irq);
return 0;
@@ -1351,7 +1347,7 @@ static int fsldma_of_probe(struct platform_device *op)
if (!fdev->regs) {
dev_err(&op->dev, "unable to ioremap registers\n");
err = -ENOMEM;
- goto out_free_fdev;
+ goto out_free;
}
/* map the channel IRQ if it exists, but don't hookup the handler yet */
@@ -1416,6 +1412,8 @@ static int fsldma_of_probe(struct platform_device *op)
out_free_fdev:
irq_dispose_mapping(fdev->irq);
+ iounmap(fdev->regs);
+out_free:
kfree(fdev);
out_return:
return err;
diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c
index c5f21ef..29d04ca 100644
--- a/drivers/dma/hsu/hsu.c
+++ b/drivers/dma/hsu/hsu.c
@@ -200,10 +200,9 @@ EXPORT_SYMBOL_GPL(hsu_dma_get_status);
* is not a normal timeout interrupt, ie. hsu_dma_get_status() returned 0.
*
* Return:
- * IRQ_NONE for invalid channel number, IRQ_HANDLED otherwise.
+ * 0 for invalid channel number, 1 otherwise.
*/
-irqreturn_t hsu_dma_do_irq(struct hsu_dma_chip *chip, unsigned short nr,
- u32 status)
+int hsu_dma_do_irq(struct hsu_dma_chip *chip, unsigned short nr, u32 status)
{
struct hsu_dma_chan *hsuc;
struct hsu_dma_desc *desc;
@@ -211,7 +210,7 @@ irqreturn_t hsu_dma_do_irq(struct hsu_dma_chip *chip, unsigned short nr,
/* Sanity check */
if (nr >= chip->hsu->nr_channels)
- return IRQ_NONE;
+ return 0;
hsuc = &chip->hsu->chan[nr];
@@ -230,7 +229,7 @@ irqreturn_t hsu_dma_do_irq(struct hsu_dma_chip *chip, unsigned short nr,
}
spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
- return IRQ_HANDLED;
+ return 1;
}
EXPORT_SYMBOL_GPL(hsu_dma_do_irq);
diff --git a/drivers/dma/hsu/pci.c b/drivers/dma/hsu/pci.c
index 9916058..b51639f 100644
--- a/drivers/dma/hsu/pci.c
+++ b/drivers/dma/hsu/pci.c
@@ -29,7 +29,7 @@ static irqreturn_t hsu_pci_irq(int irq, void *dev)
u32 dmaisr;
u32 status;
unsigned short i;
- irqreturn_t ret = IRQ_NONE;
+ int ret = 0;
int err;
dmaisr = readl(chip->regs + HSU_PCI_DMAISR);
@@ -37,14 +37,14 @@ static irqreturn_t hsu_pci_irq(int irq, void *dev)
if (dmaisr & 0x1) {
err = hsu_dma_get_status(chip, i, &status);
if (err > 0)
- ret |= IRQ_HANDLED;
+ ret |= 1;
else if (err == 0)
ret |= hsu_dma_do_irq(chip, i, status);
}
dmaisr >>= 1;
}
- return ret;
+ return IRQ_RETVAL(ret);
}
static int hsu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index a960608..ab0fb80 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -663,9 +663,7 @@ static void imxdma_tasklet(unsigned long data)
out:
spin_unlock_irqrestore(&imxdma->lock, flags);
- if (desc->desc.callback)
- desc->desc.callback(desc->desc.callback_param);
-
+ dmaengine_desc_get_callback_invoke(&desc->desc, NULL);
}
static int imxdma_terminate_all(struct dma_chan *chan)
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 03ec76f..a6bffbc 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -184,7 +184,7 @@
struct sdma_mode_count {
u32 count : 16; /* size of the buffer pointed by this BD */
u32 status : 8; /* E,R,I,C,W,D status bits stored here */
- u32 command : 8; /* command mostlky used for channel 0 */
+ u32 command : 8; /* command mostly used for channel 0 */
};
/*
@@ -479,6 +479,24 @@ static struct sdma_driver_data sdma_imx6q = {
.script_addrs = &sdma_script_imx6q,
};
+static struct sdma_script_start_addrs sdma_script_imx7d = {
+ .ap_2_ap_addr = 644,
+ .uart_2_mcu_addr = 819,
+ .mcu_2_app_addr = 749,
+ .uartsh_2_mcu_addr = 1034,
+ .mcu_2_shp_addr = 962,
+ .app_2_mcu_addr = 685,
+ .shp_2_mcu_addr = 893,
+ .spdif_2_mcu_addr = 1102,
+ .mcu_2_spdif_addr = 1136,
+};
+
+static struct sdma_driver_data sdma_imx7d = {
+ .chnenbl0 = SDMA_CHNENBL0_IMX35,
+ .num_events = 48,
+ .script_addrs = &sdma_script_imx7d,
+};
+
static const struct platform_device_id sdma_devtypes[] = {
{
.name = "imx25-sdma",
@@ -499,6 +517,9 @@ static const struct platform_device_id sdma_devtypes[] = {
.name = "imx6q-sdma",
.driver_data = (unsigned long)&sdma_imx6q,
}, {
+ .name = "imx7d-sdma",
+ .driver_data = (unsigned long)&sdma_imx7d,
+ }, {
/* sentinel */
}
};
@@ -511,6 +532,7 @@ static const struct of_device_id sdma_dt_ids[] = {
{ .compatible = "fsl,imx35-sdma", .data = &sdma_imx35, },
{ .compatible = "fsl,imx31-sdma", .data = &sdma_imx31, },
{ .compatible = "fsl,imx25-sdma", .data = &sdma_imx25, },
+ { .compatible = "fsl,imx7d-sdma", .data = &sdma_imx7d, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sdma_dt_ids);
@@ -650,8 +672,7 @@ static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
static void sdma_handle_channel_loop(struct sdma_channel *sdmac)
{
- if (sdmac->desc.callback)
- sdmac->desc.callback(sdmac->desc.callback_param);
+ dmaengine_desc_get_callback_invoke(&sdmac->desc, NULL);
}
static void sdma_update_channel_loop(struct sdma_channel *sdmac)
@@ -701,8 +722,8 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
sdmac->status = DMA_COMPLETE;
dma_cookie_complete(&sdmac->desc);
- if (sdmac->desc.callback)
- sdmac->desc.callback(sdmac->desc.callback_param);
+
+ dmaengine_desc_get_callback_invoke(&sdmac->desc, NULL);
}
static void sdma_tasklet(unsigned long data)
@@ -1375,6 +1396,7 @@ static void sdma_issue_pending(struct dma_chan *chan)
#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34
#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2 38
#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3 41
+#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4 42
static void sdma_add_scripts(struct sdma_engine *sdma,
const struct sdma_script_start_addrs *addr)
@@ -1424,6 +1446,9 @@ static void sdma_load_firmware(const struct firmware *fw, void *context)
case 3:
sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3;
break;
+ case 4:
+ sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4;
+ break;
default:
dev_err(sdma->dev, "unknown firmware version\n");
goto err_firmware;
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index bd09961..49386ce0 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -38,8 +38,54 @@
#include "../dmaengine.h"
+static char *chanerr_str[] = {
+ "DMA Transfer Destination Address Error",
+ "Next Descriptor Address Error",
+ "Descriptor Error",
+ "Chan Address Value Error",
+ "CHANCMD Error",
+ "Chipset Uncorrectable Data Integrity Error",
+ "DMA Uncorrectable Data Integrity Error",
+ "Read Data Error",
+ "Write Data Error",
+ "Descriptor Control Error",
+ "Descriptor Transfer Size Error",
+ "Completion Address Error",
+ "Interrupt Configuration Error",
+ "Super extended descriptor Address Error",
+ "Unaffiliated Error",
+ "CRC or XOR P Error",
+ "XOR Q Error",
+ "Descriptor Count Error",
+ "DIF All F detect Error",
+ "Guard Tag verification Error",
+ "Application Tag verification Error",
+ "Reference Tag verification Error",
+ "Bundle Bit Error",
+ "Result DIF All F detect Error",
+ "Result Guard Tag verification Error",
+ "Result Application Tag verification Error",
+ "Result Reference Tag verification Error",
+ NULL
+};
+
static void ioat_eh(struct ioatdma_chan *ioat_chan);
+static void ioat_print_chanerrs(struct ioatdma_chan *ioat_chan, u32 chanerr)
+{
+ int i;
+
+ for (i = 0; i < 32; i++) {
+ if ((chanerr >> i) & 1) {
+ if (chanerr_str[i]) {
+ dev_err(to_dev(ioat_chan), "Err(%d): %s\n",
+ i, chanerr_str[i]);
+ } else
+ break;
+ }
+ }
+}
+
/**
* ioat_dma_do_interrupt - handler used for single vector interrupt mode
* @irq: interrupt id
@@ -568,12 +614,14 @@ static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
tx = &desc->txd;
if (tx->cookie) {
+ struct dmaengine_result res;
+
dma_cookie_complete(tx);
dma_descriptor_unmap(tx);
- if (tx->callback) {
- tx->callback(tx->callback_param);
- tx->callback = NULL;
- }
+ res.result = DMA_TRANS_NOERROR;
+ dmaengine_desc_get_callback_invoke(tx, NULL);
+ tx->callback = NULL;
+ tx->callback_result = NULL;
}
if (tx->phys == phys_complete)
@@ -622,7 +670,8 @@ static void ioat_cleanup(struct ioatdma_chan *ioat_chan)
if (is_ioat_halted(*ioat_chan->completion)) {
u32 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
- if (chanerr & IOAT_CHANERR_HANDLE_MASK) {
+ if (chanerr &
+ (IOAT_CHANERR_HANDLE_MASK | IOAT_CHANERR_RECOVER_MASK)) {
mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
ioat_eh(ioat_chan);
}
@@ -652,6 +701,61 @@ static void ioat_restart_channel(struct ioatdma_chan *ioat_chan)
__ioat_restart_chan(ioat_chan);
}
+
+static void ioat_abort_descs(struct ioatdma_chan *ioat_chan)
+{
+ struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
+ struct ioat_ring_ent *desc;
+ u16 active;
+ int idx = ioat_chan->tail, i;
+
+ /*
+ * We assume that the failed descriptor has been processed.
+ * Now we are just returning all the remaining submitted
+ * descriptors to abort.
+ */
+ active = ioat_ring_active(ioat_chan);
+
+ /* we skip the failed descriptor that tail points to */
+ for (i = 1; i < active; i++) {
+ struct dma_async_tx_descriptor *tx;
+
+ smp_read_barrier_depends();
+ prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1));
+ desc = ioat_get_ring_ent(ioat_chan, idx + i);
+
+ tx = &desc->txd;
+ if (tx->cookie) {
+ struct dmaengine_result res;
+
+ dma_cookie_complete(tx);
+ dma_descriptor_unmap(tx);
+ res.result = DMA_TRANS_ABORTED;
+ dmaengine_desc_get_callback_invoke(tx, &res);
+ tx->callback = NULL;
+ tx->callback_result = NULL;
+ }
+
+ /* skip extended descriptors */
+ if (desc_has_ext(desc)) {
+ WARN_ON(i + 1 >= active);
+ i++;
+ }
+
+ /* cleanup super extended descriptors */
+ if (desc->sed) {
+ ioat_free_sed(ioat_dma, desc->sed);
+ desc->sed = NULL;
+ }
+ }
+
+ smp_mb(); /* finish all descriptor reads before incrementing tail */
+ ioat_chan->tail = idx + active;
+
+ desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
+ ioat_chan->last_completion = *ioat_chan->completion = desc->txd.phys;
+}
+
static void ioat_eh(struct ioatdma_chan *ioat_chan)
{
struct pci_dev *pdev = to_pdev(ioat_chan);
@@ -662,6 +766,8 @@ static void ioat_eh(struct ioatdma_chan *ioat_chan)
u32 err_handled = 0;
u32 chanerr_int;
u32 chanerr;
+ bool abort = false;
+ struct dmaengine_result res;
/* cleanup so tail points to descriptor that caused the error */
if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
@@ -697,30 +803,55 @@ static void ioat_eh(struct ioatdma_chan *ioat_chan)
break;
}
+ if (chanerr & IOAT_CHANERR_RECOVER_MASK) {
+ if (chanerr & IOAT_CHANERR_READ_DATA_ERR) {
+ res.result = DMA_TRANS_READ_FAILED;
+ err_handled |= IOAT_CHANERR_READ_DATA_ERR;
+ } else if (chanerr & IOAT_CHANERR_WRITE_DATA_ERR) {
+ res.result = DMA_TRANS_WRITE_FAILED;
+ err_handled |= IOAT_CHANERR_WRITE_DATA_ERR;
+ }
+
+ abort = true;
+ } else
+ res.result = DMA_TRANS_NOERROR;
+
/* fault on unhandled error or spurious halt */
if (chanerr ^ err_handled || chanerr == 0) {
dev_err(to_dev(ioat_chan), "%s: fatal error (%x:%x)\n",
__func__, chanerr, err_handled);
+ dev_err(to_dev(ioat_chan), "Errors handled:\n");
+ ioat_print_chanerrs(ioat_chan, err_handled);
+ dev_err(to_dev(ioat_chan), "Errors not handled:\n");
+ ioat_print_chanerrs(ioat_chan, (chanerr & ~err_handled));
+
BUG();
- } else { /* cleanup the faulty descriptor */
- tx = &desc->txd;
- if (tx->cookie) {
- dma_cookie_complete(tx);
- dma_descriptor_unmap(tx);
- if (tx->callback) {
- tx->callback(tx->callback_param);
- tx->callback = NULL;
- }
- }
}
- writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
- pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int);
+ /* cleanup the faulty descriptor since we are continuing */
+ tx = &desc->txd;
+ if (tx->cookie) {
+ dma_cookie_complete(tx);
+ dma_descriptor_unmap(tx);
+ dmaengine_desc_get_callback_invoke(tx, &res);
+ tx->callback = NULL;
+ tx->callback_result = NULL;
+ }
/* mark faulting descriptor as complete */
*ioat_chan->completion = desc->txd.phys;
spin_lock_bh(&ioat_chan->prep_lock);
+ /* we need abort all descriptors */
+ if (abort) {
+ ioat_abort_descs(ioat_chan);
+ /* clean up the channel, we could be in weird state */
+ ioat_reset_hw(ioat_chan);
+ }
+
+ writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
+ pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int);
+
ioat_restart_channel(ioat_chan);
spin_unlock_bh(&ioat_chan->prep_lock);
}
@@ -753,10 +884,28 @@ void ioat_timer_event(unsigned long data)
chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
dev_err(to_dev(ioat_chan), "%s: Channel halted (%x)\n",
__func__, chanerr);
- if (test_bit(IOAT_RUN, &ioat_chan->state))
- BUG_ON(is_ioat_bug(chanerr));
- else /* we never got off the ground */
- return;
+ dev_err(to_dev(ioat_chan), "Errors:\n");
+ ioat_print_chanerrs(ioat_chan, chanerr);
+
+ if (test_bit(IOAT_RUN, &ioat_chan->state)) {
+ spin_lock_bh(&ioat_chan->cleanup_lock);
+ spin_lock_bh(&ioat_chan->prep_lock);
+ set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
+ spin_unlock_bh(&ioat_chan->prep_lock);
+
+ ioat_abort_descs(ioat_chan);
+ dev_warn(to_dev(ioat_chan), "Reset channel...\n");
+ ioat_reset_hw(ioat_chan);
+ dev_warn(to_dev(ioat_chan), "Restart channel...\n");
+ ioat_restart_channel(ioat_chan);
+
+ spin_lock_bh(&ioat_chan->prep_lock);
+ clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
+ spin_unlock_bh(&ioat_chan->prep_lock);
+ spin_unlock_bh(&ioat_chan->cleanup_lock);
+ }
+
+ return;
}
spin_lock_bh(&ioat_chan->cleanup_lock);
@@ -780,14 +929,26 @@ void ioat_timer_event(unsigned long data)
u32 chanerr;
chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
- dev_warn(to_dev(ioat_chan), "Restarting channel...\n");
- dev_warn(to_dev(ioat_chan), "CHANSTS: %#Lx CHANERR: %#x\n",
- status, chanerr);
- dev_warn(to_dev(ioat_chan), "Active descriptors: %d\n",
- ioat_ring_active(ioat_chan));
+ dev_err(to_dev(ioat_chan), "CHANSTS: %#Lx CHANERR: %#x\n",
+ status, chanerr);
+ dev_err(to_dev(ioat_chan), "Errors:\n");
+ ioat_print_chanerrs(ioat_chan, chanerr);
+
+ dev_dbg(to_dev(ioat_chan), "Active descriptors: %d\n",
+ ioat_ring_active(ioat_chan));
spin_lock_bh(&ioat_chan->prep_lock);
+ set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
+ spin_unlock_bh(&ioat_chan->prep_lock);
+
+ ioat_abort_descs(ioat_chan);
+ dev_warn(to_dev(ioat_chan), "Resetting channel...\n");
+ ioat_reset_hw(ioat_chan);
+ dev_warn(to_dev(ioat_chan), "Restarting channel...\n");
ioat_restart_channel(ioat_chan);
+
+ spin_lock_bh(&ioat_chan->prep_lock);
+ clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
spin_unlock_bh(&ioat_chan->prep_lock);
spin_unlock_bh(&ioat_chan->cleanup_lock);
return;
diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h
index 7053498..48fa4cf 100644
--- a/drivers/dma/ioat/registers.h
+++ b/drivers/dma/ioat/registers.h
@@ -240,6 +240,8 @@
#define IOAT_CHANERR_DESCRIPTOR_COUNT_ERR 0x40000
#define IOAT_CHANERR_HANDLE_MASK (IOAT_CHANERR_XOR_P_OR_CRC_ERR | IOAT_CHANERR_XOR_Q_ERR)
+#define IOAT_CHANERR_RECOVER_MASK (IOAT_CHANERR_READ_DATA_ERR | \
+ IOAT_CHANERR_WRITE_DATA_ERR)
#define IOAT_CHANERR_MASK_OFFSET 0x2C /* 32-bit Channel Error Register */
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index f039cfa..a410657 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -71,8 +71,7 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc,
/* call the callback (must not sleep or submit new
* operations to this channel)
*/
- if (tx->callback)
- tx->callback(tx->callback_param);
+ dmaengine_desc_get_callback_invoke(tx, NULL);
dma_descriptor_unmap(tx);
if (desc->group_head)
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index b54f62d..ed76044 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -1160,11 +1160,10 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
struct scatterlist **sg, *sgnext, *sgnew = NULL;
/* Next transfer descriptor */
struct idmac_tx_desc *desc, *descnew;
- dma_async_tx_callback callback;
- void *callback_param;
bool done = false;
u32 ready0, ready1, curbuf, err;
unsigned long flags;
+ struct dmaengine_desc_callback cb;
/* IDMAC has cleared the respective BUFx_RDY bit, we manage the buffer */
@@ -1278,12 +1277,12 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
if (likely(sgnew) &&
ipu_submit_buffer(ichan, descnew, sgnew, ichan->active_buffer) < 0) {
- callback = descnew->txd.callback;
- callback_param = descnew->txd.callback_param;
+ dmaengine_desc_get_callback(&descnew->txd, &cb);
+
list_del_init(&descnew->list);
spin_unlock(&ichan->lock);
- if (callback)
- callback(callback_param);
+
+ dmaengine_desc_callback_invoke(&cb, NULL);
spin_lock(&ichan->lock);
}
@@ -1292,13 +1291,12 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
if (done)
dma_cookie_complete(&desc->txd);
- callback = desc->txd.callback;
- callback_param = desc->txd.callback_param;
+ dmaengine_desc_get_callback(&desc->txd, &cb);
spin_unlock(&ichan->lock);
- if (done && (desc->txd.flags & DMA_PREP_INTERRUPT) && callback)
- callback(callback_param);
+ if (done && (desc->txd.flags & DMA_PREP_INTERRUPT))
+ dmaengine_desc_callback_invoke(&cb, NULL);
return IRQ_HANDLED;
}
diff --git a/drivers/dma/mic_x100_dma.c b/drivers/dma/mic_x100_dma.c
index 1502b24..8182558 100644
--- a/drivers/dma/mic_x100_dma.c
+++ b/drivers/dma/mic_x100_dma.c
@@ -104,10 +104,8 @@ static void mic_dma_cleanup(struct mic_dma_chan *ch)
tx = &ch->tx_array[last_tail];
if (tx->cookie) {
dma_cookie_complete(tx);
- if (tx->callback) {
- tx->callback(tx->callback_param);
- tx->callback = NULL;
- }
+ dmaengine_desc_get_callback_invoke(tx, NULL);
+ tx->callback = NULL;
}
last_tail = mic_dma_hw_ring_inc(last_tail);
}
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index f4b25fb..eb3a1f4 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -864,19 +864,15 @@ static void dma_do_tasklet(unsigned long data)
struct mmp_pdma_desc_sw *desc, *_desc;
LIST_HEAD(chain_cleanup);
unsigned long flags;
+ struct dmaengine_desc_callback cb;
if (chan->cyclic_first) {
- dma_async_tx_callback cb = NULL;
- void *cb_data = NULL;
-
spin_lock_irqsave(&chan->desc_lock, flags);
desc = chan->cyclic_first;
- cb = desc->async_tx.callback;
- cb_data = desc->async_tx.callback_param;
+ dmaengine_desc_get_callback(&desc->async_tx, &cb);
spin_unlock_irqrestore(&chan->desc_lock, flags);
- if (cb)
- cb(cb_data);
+ dmaengine_desc_callback_invoke(&cb, NULL);
return;
}
@@ -921,8 +917,8 @@ static void dma_do_tasklet(unsigned long data)
/* Remove from the list of transactions */
list_del(&desc->node);
/* Run the link descriptor callback function */
- if (txd->callback)
- txd->callback(txd->callback_param);
+ dmaengine_desc_get_callback(txd, &cb);
+ dmaengine_desc_callback_invoke(&cb, NULL);
dma_pool_free(chan->desc_pool, desc, txd->phys);
}
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index b3441f5..13c68b6 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -349,9 +349,7 @@ static void dma_do_tasklet(unsigned long data)
{
struct mmp_tdma_chan *tdmac = (struct mmp_tdma_chan *)data;
- if (tdmac->desc.callback)
- tdmac->desc.callback(tdmac->desc.callback_param);
-
+ dmaengine_desc_get_callback_invoke(&tdmac->desc, NULL);
}
static void mmp_tdma_free_descriptor(struct mmp_tdma_chan *tdmac)
@@ -433,7 +431,7 @@ static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic(
if (period_len > TDMA_MAX_XFER_BYTES) {
dev_err(tdmac->dev,
- "maximum period size exceeded: %d > %d\n",
+ "maximum period size exceeded: %zu > %d\n",
period_len, TDMA_MAX_XFER_BYTES);
goto err_out;
}
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index fa86592..dde7134 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -411,8 +411,7 @@ static void mpc_dma_process_completed(struct mpc_dma *mdma)
list_for_each_entry(mdesc, &list, node) {
desc = &mdesc->desc;
- if (desc->callback)
- desc->callback(desc->callback_param);
+ dmaengine_desc_get_callback_invoke(desc, NULL);
last_cookie = desc->cookie;
dma_run_dependencies(desc);
@@ -926,7 +925,7 @@ static int mpc_dma_probe(struct platform_device *op)
}
mdma->irq = irq_of_parse_and_map(dn, 0);
- if (mdma->irq == NO_IRQ) {
+ if (!mdma->irq) {
dev_err(dev, "Error mapping IRQ!\n");
retval = -EINVAL;
goto err;
@@ -935,7 +934,7 @@ static int mpc_dma_probe(struct platform_device *op)
if (of_device_is_compatible(dn, "fsl,mpc8308-dma")) {
mdma->is_mpc8308 = 1;
mdma->irq2 = irq_of_parse_and_map(dn, 1);
- if (mdma->irq2 == NO_IRQ) {
+ if (!mdma->irq2) {
dev_err(dev, "Error mapping IRQ!\n");
retval = -EINVAL;
goto err_dispose1;
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index f4c9f98..f8b5e74 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -209,10 +209,7 @@ mv_desc_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
/* call the callback (must not sleep or submit new
* operations to this channel)
*/
- if (desc->async_tx.callback)
- desc->async_tx.callback(
- desc->async_tx.callback_param);
-
+ dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL);
dma_descriptor_unmap(&desc->async_tx);
}
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index 60de352..50e64e1 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -326,8 +326,7 @@ static void mxs_dma_tasklet(unsigned long data)
{
struct mxs_dma_chan *mxs_chan = (struct mxs_dma_chan *) data;
- if (mxs_chan->desc.callback)
- mxs_chan->desc.callback(mxs_chan->desc.callback_param);
+ dmaengine_desc_get_callback_invoke(&mxs_chan->desc, NULL);
}
static int mxs_dma_irq_to_chan(struct mxs_dma_engine *mxs_dma, int irq)
diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c
index 08c45c1..09de715 100644
--- a/drivers/dma/nbpfaxi.c
+++ b/drivers/dma/nbpfaxi.c
@@ -1102,8 +1102,7 @@ static void nbpf_chan_tasklet(unsigned long data)
{
struct nbpf_channel *chan = (struct nbpf_channel *)data;
struct nbpf_desc *desc, *tmp;
- dma_async_tx_callback callback;
- void *param;
+ struct dmaengine_desc_callback cb;
while (!list_empty(&chan->done)) {
bool found = false, must_put, recycling = false;
@@ -1151,14 +1150,12 @@ static void nbpf_chan_tasklet(unsigned long data)
must_put = false;
}
- callback = desc->async_tx.callback;
- param = desc->async_tx.callback_param;
+ dmaengine_desc_get_callback(&desc->async_tx, &cb);
/* ack and callback completed descriptor */
spin_unlock_irq(&chan->lock);
- if (callback)
- callback(param);
+ dmaengine_desc_callback_invoke(&cb, NULL);
if (must_put)
nbpf_desc_put(desc);
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index 113605f..df95727d 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -357,14 +357,13 @@ static void pdc_chain_complete(struct pch_dma_chan *pd_chan,
struct pch_dma_desc *desc)
{
struct dma_async_tx_descriptor *txd = &desc->txd;
- dma_async_tx_callback callback = txd->callback;
- void *param = txd->callback_param;
+ struct dmaengine_desc_callback cb;
+ dmaengine_desc_get_callback(txd, &cb);
list_splice_init(&desc->tx_list, &pd_chan->free_list);
list_move(&desc->desc_node, &pd_chan->free_list);
- if (callback)
- callback(param);
+ dmaengine_desc_callback_invoke(&cb, NULL);
}
static void pdc_complete_all(struct pch_dma_chan *pd_chan)
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 4fc3ffb..1ecd467 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2039,14 +2039,12 @@ static void pl330_tasklet(unsigned long data)
}
while (!list_empty(&pch->completed_list)) {
- dma_async_tx_callback callback;
- void *callback_param;
+ struct dmaengine_desc_callback cb;
desc = list_first_entry(&pch->completed_list,
struct dma_pl330_desc, node);
- callback = desc->txd.callback;
- callback_param = desc->txd.callback_param;
+ dmaengine_desc_get_callback(&desc->txd, &cb);
if (pch->cyclic) {
desc->status = PREP;
@@ -2064,9 +2062,9 @@ static void pl330_tasklet(unsigned long data)
dma_descriptor_unmap(&desc->txd);
- if (callback) {
+ if (dmaengine_desc_callback_valid(&cb)) {
spin_unlock_irqrestore(&pch->lock, flags);
- callback(callback_param);
+ dmaengine_desc_callback_invoke(&cb, NULL);
spin_lock_irqsave(&pch->lock, flags);
}
}
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index da3688b..d45da34 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -1485,10 +1485,7 @@ static dma_cookie_t ppc440spe_adma_run_tx_complete_actions(
/* call the callback (must not sleep or submit new
* operations to this channel)
*/
- if (desc->async_tx.callback)
- desc->async_tx.callback(
- desc->async_tx.callback_param);
-
+ dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL);
dma_descriptor_unmap(&desc->async_tx);
}
@@ -3891,7 +3888,7 @@ static int ppc440spe_adma_setup_irqs(struct ppc440spe_adma_device *adev,
np = ofdev->dev.of_node;
if (adev->id != PPC440SPE_XOR_ID) {
adev->err_irq = irq_of_parse_and_map(np, 1);
- if (adev->err_irq == NO_IRQ) {
+ if (!adev->err_irq) {
dev_warn(adev->dev, "no err irq resource?\n");
*initcode = PPC_ADMA_INIT_IRQ2;
adev->err_irq = -ENXIO;
@@ -3902,7 +3899,7 @@ static int ppc440spe_adma_setup_irqs(struct ppc440spe_adma_device *adev,
}
adev->irq = irq_of_parse_and_map(np, 0);
- if (adev->irq == NO_IRQ) {
+ if (!adev->irq) {
dev_err(adev->dev, "no irq resource\n");
*initcode = PPC_ADMA_INIT_IRQ1;
ret = -ENXIO;
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
index b2374cd..e244e10 100644
--- a/drivers/dma/qcom/hidma.c
+++ b/drivers/dma/qcom/hidma.c
@@ -111,6 +111,7 @@ static void hidma_process_completed(struct hidma_chan *mchan)
struct dma_async_tx_descriptor *desc;
dma_cookie_t last_cookie;
struct hidma_desc *mdesc;
+ struct hidma_desc *next;
unsigned long irqflags;
struct list_head list;
@@ -122,28 +123,36 @@ static void hidma_process_completed(struct hidma_chan *mchan)
spin_unlock_irqrestore(&mchan->lock, irqflags);
/* Execute callbacks and run dependencies */
- list_for_each_entry(mdesc, &list, node) {
+ list_for_each_entry_safe(mdesc, next, &list, node) {
enum dma_status llstat;
+ struct dmaengine_desc_callback cb;
+ struct dmaengine_result result;
desc = &mdesc->desc;
+ last_cookie = desc->cookie;
spin_lock_irqsave(&mchan->lock, irqflags);
dma_cookie_complete(desc);
spin_unlock_irqrestore(&mchan->lock, irqflags);
llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch);
- if (desc->callback && (llstat == DMA_COMPLETE))
- desc->callback(desc->callback_param);
+ dmaengine_desc_get_callback(desc, &cb);
- last_cookie = desc->cookie;
dma_run_dependencies(desc);
- }
- /* Free descriptors */
- spin_lock_irqsave(&mchan->lock, irqflags);
- list_splice_tail_init(&list, &mchan->free);
- spin_unlock_irqrestore(&mchan->lock, irqflags);
+ spin_lock_irqsave(&mchan->lock, irqflags);
+ list_move(&mdesc->node, &mchan->free);
+
+ if (llstat == DMA_COMPLETE) {
+ mchan->last_success = last_cookie;
+ result.result = DMA_TRANS_NOERROR;
+ } else
+ result.result = DMA_TRANS_ABORTED;
+
+ spin_unlock_irqrestore(&mchan->lock, irqflags);
+ dmaengine_desc_callback_invoke(&cb, &result);
+ }
}
/*
@@ -238,6 +247,19 @@ static void hidma_issue_pending(struct dma_chan *dmach)
hidma_ll_start(dmadev->lldev);
}
+static inline bool hidma_txn_is_success(dma_cookie_t cookie,
+ dma_cookie_t last_success, dma_cookie_t last_used)
+{
+ if (last_success <= last_used) {
+ if ((cookie <= last_success) || (cookie > last_used))
+ return true;
+ } else {
+ if ((cookie <= last_success) && (cookie > last_used))
+ return true;
+ }
+ return false;
+}
+
static enum dma_status hidma_tx_status(struct dma_chan *dmach,
dma_cookie_t cookie,
struct dma_tx_state *txstate)
@@ -246,8 +268,13 @@ static enum dma_status hidma_tx_status(struct dma_chan *dmach,
enum dma_status ret;
ret = dma_cookie_status(dmach, cookie, txstate);
- if (ret == DMA_COMPLETE)
- return ret;
+ if (ret == DMA_COMPLETE) {
+ bool is_success;
+
+ is_success = hidma_txn_is_success(cookie, mchan->last_success,
+ dmach->cookie);
+ return is_success ? ret : DMA_ERROR;
+ }
if (mchan->paused && (ret == DMA_IN_PROGRESS)) {
unsigned long flags;
@@ -398,6 +425,7 @@ static int hidma_terminate_channel(struct dma_chan *chan)
hidma_process_completed(mchan);
spin_lock_irqsave(&mchan->lock, irqflags);
+ mchan->last_success = 0;
list_splice_init(&mchan->active, &list);
list_splice_init(&mchan->prepared, &list);
list_splice_init(&mchan->completed, &list);
@@ -413,14 +441,9 @@ static int hidma_terminate_channel(struct dma_chan *chan)
/* return all user requests */
list_for_each_entry_safe(mdesc, tmp, &list, node) {
struct dma_async_tx_descriptor *txd = &mdesc->desc;
- dma_async_tx_callback callback = mdesc->desc.callback;
- void *param = mdesc->desc.callback_param;
dma_descriptor_unmap(txd);
-
- if (callback)
- callback(param);
-
+ dmaengine_desc_get_callback_invoke(txd, NULL);
dma_run_dependencies(txd);
/* move myself to free_list */
diff --git a/drivers/dma/qcom/hidma.h b/drivers/dma/qcom/hidma.h
index db413a5..e52e207 100644
--- a/drivers/dma/qcom/hidma.h
+++ b/drivers/dma/qcom/hidma.h
@@ -72,7 +72,6 @@ struct hidma_lldev {
u32 tre_write_offset; /* TRE write location */
struct tasklet_struct task; /* task delivering notifications */
- struct tasklet_struct rst_task; /* task to reset HW */
DECLARE_KFIFO_PTR(handoff_fifo,
struct hidma_tre *); /* pending TREs FIFO */
};
@@ -89,6 +88,7 @@ struct hidma_chan {
bool allocated;
char dbg_name[16];
u32 dma_sig;
+ dma_cookie_t last_success;
/*
* active descriptor on this channel
diff --git a/drivers/dma/qcom/hidma_ll.c b/drivers/dma/qcom/hidma_ll.c
index ad20dfb..3224f24 100644
--- a/drivers/dma/qcom/hidma_ll.c
+++ b/drivers/dma/qcom/hidma_ll.c
@@ -381,27 +381,6 @@ static int hidma_ll_reset(struct hidma_lldev *lldev)
}
/*
- * Abort all transactions and perform a reset.
- */
-static void hidma_ll_abort(unsigned long arg)
-{
- struct hidma_lldev *lldev = (struct hidma_lldev *)arg;
- u8 err_code = HIDMA_EVRE_STATUS_ERROR;
- u8 err_info = 0xFF;
- int rc;
-
- hidma_cleanup_pending_tre(lldev, err_info, err_code);
-
- /* reset the channel for recovery */
- rc = hidma_ll_setup(lldev);
- if (rc) {
- dev_err(lldev->dev, "channel reinitialize failed after error\n");
- return;
- }
- writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
-}
-
-/*
* The interrupt handler for HIDMA will try to consume as many pending
* EVRE from the event queue as possible. Each EVRE has an associated
* TRE that holds the user interface parameters. EVRE reports the
@@ -454,13 +433,18 @@ irqreturn_t hidma_ll_inthandler(int chirq, void *arg)
while (cause) {
if (cause & HIDMA_ERR_INT_MASK) {
- dev_err(lldev->dev, "error 0x%x, resetting...\n",
+ dev_err(lldev->dev, "error 0x%x, disabling...\n",
cause);
/* Clear out pending interrupts */
writel(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
- tasklet_schedule(&lldev->rst_task);
+ /* No further submissions. */
+ hidma_ll_disable(lldev);
+
+ /* Driver completes the txn and intimates the client.*/
+ hidma_cleanup_pending_tre(lldev, 0xFF,
+ HIDMA_EVRE_STATUS_ERROR);
goto out;
}
@@ -808,7 +792,6 @@ struct hidma_lldev *hidma_ll_init(struct device *dev, u32 nr_tres,
return NULL;
spin_lock_init(&lldev->lock);
- tasklet_init(&lldev->rst_task, hidma_ll_abort, (unsigned long)lldev);
tasklet_init(&lldev->task, hidma_ll_tre_complete, (unsigned long)lldev);
lldev->initialized = 1;
writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
@@ -831,7 +814,6 @@ int hidma_ll_uninit(struct hidma_lldev *lldev)
required_bytes = sizeof(struct hidma_tre) * lldev->nr_tres;
tasklet_kill(&lldev->task);
- tasklet_kill(&lldev->rst_task);
memset(lldev->trepool, 0, required_bytes);
lldev->trepool = NULL;
lldev->pending_tre_count = 0;
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c
index ce67075..3c579ab 100644
--- a/drivers/dma/s3c24xx-dma.c
+++ b/drivers/dma/s3c24xx-dma.c
@@ -823,11 +823,11 @@ static struct dma_async_tx_descriptor *s3c24xx_dma_prep_memcpy(
struct s3c24xx_sg *dsg;
int src_mod, dest_mod;
- dev_dbg(&s3cdma->pdev->dev, "prepare memcpy of %d bytes from %s\n",
+ dev_dbg(&s3cdma->pdev->dev, "prepare memcpy of %zu bytes from %s\n",
len, s3cchan->name);
if ((len & S3C24XX_DCON_TC_MASK) != len) {
- dev_err(&s3cdma->pdev->dev, "memcpy size %d to large\n", len);
+ dev_err(&s3cdma->pdev->dev, "memcpy size %zu to large\n", len);
return NULL;
}
@@ -1301,6 +1301,9 @@ static int s3c24xx_dma_probe(struct platform_device *pdev)
s3cdma->slave.device_prep_dma_cyclic = s3c24xx_dma_prep_dma_cyclic;
s3cdma->slave.device_config = s3c24xx_dma_set_runtime_config;
s3cdma->slave.device_terminate_all = s3c24xx_dma_terminate_all;
+ s3cdma->slave.filter.map = pdata->slave_map;
+ s3cdma->slave.filter.mapcnt = pdata->slavecnt;
+ s3cdma->slave.filter.fn = s3c24xx_dma_filter;
/* Register as many memcpy channels as there are physical channels */
ret = s3c24xx_dma_init_virtual_channels(s3cdma, &s3cdma->memcpy,
@@ -1418,7 +1421,7 @@ bool s3c24xx_dma_filter(struct dma_chan *chan, void *param)
s3cchan = to_s3c24xx_dma_chan(chan);
- return s3cchan->id == (int)param;
+ return s3cchan->id == (uintptr_t)param;
}
EXPORT_SYMBOL(s3c24xx_dma_filter);
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c
index 43db255..1adeb32 100644
--- a/drivers/dma/sa11x0-dma.c
+++ b/drivers/dma/sa11x0-dma.c
@@ -463,7 +463,7 @@ static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan,
dma_addr_t addr = sa11x0_dma_pos(p);
unsigned i;
- dev_vdbg(d->slave.dev, "tx_status: addr:%x\n", addr);
+ dev_vdbg(d->slave.dev, "tx_status: addr:%pad\n", &addr);
for (i = 0; i < txd->sglen; i++) {
dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x\n",
@@ -491,7 +491,7 @@ static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan,
}
spin_unlock_irqrestore(&c->vc.lock, flags);
- dev_vdbg(d->slave.dev, "tx_status: bytes 0x%zx\n", state->residue);
+ dev_vdbg(d->slave.dev, "tx_status: bytes 0x%x\n", state->residue);
return ret;
}
@@ -551,8 +551,8 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
if (len > DMA_MAX_SIZE)
j += DIV_ROUND_UP(len, DMA_MAX_SIZE & ~DMA_ALIGN) - 1;
if (addr & DMA_ALIGN) {
- dev_dbg(chan->device->dev, "vchan %p: bad buffer alignment: %08x\n",
- &c->vc, addr);
+ dev_dbg(chan->device->dev, "vchan %p: bad buffer alignment: %pad\n",
+ &c->vc, &addr);
return NULL;
}
}
@@ -599,7 +599,7 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
txd->size = size;
txd->sglen = j;
- dev_dbg(chan->device->dev, "vchan %p: txd %p: size %u nr %u\n",
+ dev_dbg(chan->device->dev, "vchan %p: txd %p: size %zu nr %u\n",
&c->vc, &txd->vd, txd->size, txd->sglen);
return vchan_tx_prep(&c->vc, &txd->vd, flags);
@@ -693,8 +693,8 @@ static int sa11x0_dma_device_config(struct dma_chan *chan,
if (maxburst == 8)
ddar |= DDAR_BS;
- dev_dbg(c->vc.chan.device->dev, "vchan %p: dma_slave_config addr %x width %u burst %u\n",
- &c->vc, addr, width, maxburst);
+ dev_dbg(c->vc.chan.device->dev, "vchan %p: dma_slave_config addr %pad width %u burst %u\n",
+ &c->vc, &addr, width, maxburst);
c->ddar = ddar | (addr & 0xf0000000) | (addr & 0x003ffffc) << 6;
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index 0dd9538..d1defa4 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -1389,21 +1389,18 @@ static irqreturn_t rcar_dmac_isr_channel_thread(int irq, void *dev)
{
struct rcar_dmac_chan *chan = dev;
struct rcar_dmac_desc *desc;
+ struct dmaengine_desc_callback cb;
spin_lock_irq(&chan->lock);
/* For cyclic transfers notify the user after every chunk. */
if (chan->desc.running && chan->desc.running->cyclic) {
- dma_async_tx_callback callback;
- void *callback_param;
-
desc = chan->desc.running;
- callback = desc->async_tx.callback;
- callback_param = desc->async_tx.callback_param;
+ dmaengine_desc_get_callback(&desc->async_tx, &cb);
- if (callback) {
+ if (dmaengine_desc_callback_valid(&cb)) {
spin_unlock_irq(&chan->lock);
- callback(callback_param);
+ dmaengine_desc_callback_invoke(&cb, NULL);
spin_lock_irq(&chan->lock);
}
}
@@ -1418,14 +1415,15 @@ static irqreturn_t rcar_dmac_isr_channel_thread(int irq, void *dev)
dma_cookie_complete(&desc->async_tx);
list_del(&desc->node);
- if (desc->async_tx.callback) {
+ dmaengine_desc_get_callback(&desc->async_tx, &cb);
+ if (dmaengine_desc_callback_valid(&cb)) {
spin_unlock_irq(&chan->lock);
/*
* We own the only reference to this descriptor, we can
* safely dereference it without holding the channel
* lock.
*/
- desc->async_tx.callback(desc->async_tx.callback_param);
+ dmaengine_desc_callback_invoke(&cb, NULL);
spin_lock_irq(&chan->lock);
}
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index 10fcaba..12fa48e 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -330,10 +330,11 @@ static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all)
bool head_acked = false;
dma_cookie_t cookie = 0;
dma_async_tx_callback callback = NULL;
- void *param = NULL;
+ struct dmaengine_desc_callback cb;
unsigned long flags;
LIST_HEAD(cyclic_list);
+ memset(&cb, 0, sizeof(cb));
spin_lock_irqsave(&schan->chan_lock, flags);
list_for_each_entry_safe(desc, _desc, &schan->ld_queue, node) {
struct dma_async_tx_descriptor *tx = &desc->async_tx;
@@ -367,8 +368,8 @@ static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all)
/* Call callback on the last chunk */
if (desc->mark == DESC_COMPLETED && tx->callback) {
desc->mark = DESC_WAITING;
+ dmaengine_desc_get_callback(tx, &cb);
callback = tx->callback;
- param = tx->callback_param;
dev_dbg(schan->dev, "descriptor #%d@%p on %d callback\n",
tx->cookie, tx, schan->id);
BUG_ON(desc->chunks != 1);
@@ -430,8 +431,7 @@ static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all)
spin_unlock_irqrestore(&schan->chan_lock, flags);
- if (callback)
- callback(param);
+ dmaengine_desc_callback_invoke(&cb, NULL);
return callback;
}
@@ -885,9 +885,9 @@ bool shdma_reset(struct shdma_dev *sdev)
/* Complete all */
list_for_each_entry(sdesc, &dl, node) {
struct dma_async_tx_descriptor *tx = &sdesc->async_tx;
+
sdesc->mark = DESC_IDLE;
- if (tx->callback)
- tx->callback(tx->callback_param);
+ dmaengine_desc_get_callback_invoke(tx, NULL);
}
spin_lock(&schan->chan_lock);
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
index d8bc3f2..a96e4a4 100644
--- a/drivers/dma/sirf-dma.c
+++ b/drivers/dma/sirf-dma.c
@@ -360,9 +360,7 @@ static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma)
list_for_each_entry(sdesc, &list, node) {
desc = &sdesc->desc;
- if (desc->callback)
- desc->callback(desc->callback_param);
-
+ dmaengine_desc_get_callback_invoke(desc, NULL);
last_cookie = desc->cookie;
dma_run_dependencies(desc);
}
@@ -388,8 +386,7 @@ static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma)
desc = &sdesc->desc;
while (happened_cyclic != schan->completed_cyclic) {
- if (desc->callback)
- desc->callback(desc->callback_param);
+ dmaengine_desc_get_callback_invoke(desc, NULL);
schan->completed_cyclic++;
}
}
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 8b18e44..08f3d7b 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -941,15 +941,7 @@ static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
{
- struct d40_desc *d;
-
- if (list_empty(&d40c->active))
- return NULL;
-
- d = list_first_entry(&d40c->active,
- struct d40_desc,
- node);
- return d;
+ return list_first_entry_or_null(&d40c->active, struct d40_desc, node);
}
/* remove desc from current queue and add it to the pending_queue */
@@ -962,36 +954,18 @@ static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
static struct d40_desc *d40_first_pending(struct d40_chan *d40c)
{
- struct d40_desc *d;
-
- if (list_empty(&d40c->pending_queue))
- return NULL;
-
- d = list_first_entry(&d40c->pending_queue,
- struct d40_desc,
- node);
- return d;
+ return list_first_entry_or_null(&d40c->pending_queue, struct d40_desc,
+ node);
}
static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
{
- struct d40_desc *d;
-
- if (list_empty(&d40c->queue))
- return NULL;
-
- d = list_first_entry(&d40c->queue,
- struct d40_desc,
- node);
- return d;
+ return list_first_entry_or_null(&d40c->queue, struct d40_desc, node);
}
static struct d40_desc *d40_first_done(struct d40_chan *d40c)
{
- if (list_empty(&d40c->done))
- return NULL;
-
- return list_first_entry(&d40c->done, struct d40_desc, node);
+ return list_first_entry_or_null(&d40c->done, struct d40_desc, node);
}
static int d40_psize_2_burst_size(bool is_log, int psize)
@@ -1596,8 +1570,7 @@ static void dma_tasklet(unsigned long data)
struct d40_desc *d40d;
unsigned long flags;
bool callback_active;
- dma_async_tx_callback callback;
- void *callback_param;
+ struct dmaengine_desc_callback cb;
spin_lock_irqsave(&d40c->lock, flags);
@@ -1624,8 +1597,7 @@ static void dma_tasklet(unsigned long data)
/* Callback to client */
callback_active = !!(d40d->txd.flags & DMA_PREP_INTERRUPT);
- callback = d40d->txd.callback;
- callback_param = d40d->txd.callback_param;
+ dmaengine_desc_get_callback(&d40d->txd, &cb);
if (!d40d->cyclic) {
if (async_tx_test_ack(&d40d->txd)) {
@@ -1646,8 +1618,8 @@ static void dma_tasklet(unsigned long data)
spin_unlock_irqrestore(&d40c->lock, flags);
- if (callback_active && callback)
- callback(callback_param);
+ if (callback_active)
+ dmaengine_desc_callback_invoke(&cb, NULL);
return;
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
index 047476a..307547f 100644
--- a/drivers/dma/stm32-dma.c
+++ b/drivers/dma/stm32-dma.c
@@ -954,7 +954,7 @@ static void stm32_dma_desc_free(struct virt_dma_desc *vdesc)
kfree(container_of(vdesc, struct stm32_dma_desc, vdesc));
}
-void stm32_dma_set_config(struct stm32_dma_chan *chan,
+static void stm32_dma_set_config(struct stm32_dma_chan *chan,
struct stm32_dma_cfg *cfg)
{
stm32_dma_clear_reg(&chan->chan_reg);
diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c
index 3835fcd..8346199 100644
--- a/drivers/dma/sun6i-dma.c
+++ b/drivers/dma/sun6i-dma.c
@@ -1011,6 +1011,12 @@ static struct sun6i_dma_config sun8i_a23_dma_cfg = {
.nr_max_vchans = 37,
};
+static struct sun6i_dma_config sun8i_a83t_dma_cfg = {
+ .nr_max_channels = 8,
+ .nr_max_requests = 28,
+ .nr_max_vchans = 39,
+};
+
/*
* The H3 has 12 physical channels, a maximum DRQ port id of 27,
* and a total of 34 usable source and destination endpoints.
@@ -1025,6 +1031,7 @@ static struct sun6i_dma_config sun8i_h3_dma_cfg = {
static const struct of_device_id sun6i_dma_match[] = {
{ .compatible = "allwinner,sun6i-a31-dma", .data = &sun6i_a31_dma_cfg },
{ .compatible = "allwinner,sun8i-a23-dma", .data = &sun8i_a23_dma_cfg },
+ { .compatible = "allwinner,sun8i-a83t-dma", .data = &sun8i_a83t_dma_cfg },
{ .compatible = "allwinner,sun8i-h3-dma", .data = &sun8i_h3_dma_cfg },
{ /* sentinel */ }
};
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index 6ab9eb9..3722b9d 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -655,8 +655,7 @@ static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc,
static void tegra_dma_tasklet(unsigned long data)
{
struct tegra_dma_channel *tdc = (struct tegra_dma_channel *)data;
- dma_async_tx_callback callback = NULL;
- void *callback_param = NULL;
+ struct dmaengine_desc_callback cb;
struct tegra_dma_desc *dma_desc;
unsigned long flags;
int cb_count;
@@ -666,13 +665,12 @@ static void tegra_dma_tasklet(unsigned long data)
dma_desc = list_first_entry(&tdc->cb_desc,
typeof(*dma_desc), cb_node);
list_del(&dma_desc->cb_node);
- callback = dma_desc->txd.callback;
- callback_param = dma_desc->txd.callback_param;
+ dmaengine_desc_get_callback(&dma_desc->txd, &cb);
cb_count = dma_desc->cb_count;
dma_desc->cb_count = 0;
spin_unlock_irqrestore(&tdc->lock, flags);
- while (cb_count-- && callback)
- callback(callback_param);
+ while (cb_count--)
+ dmaengine_desc_callback_invoke(&cb, NULL);
spin_lock_irqsave(&tdc->lock, flags);
}
spin_unlock_irqrestore(&tdc->lock, flags);
diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c
index 5ae294b..3f24aeb 100644
--- a/drivers/dma/ti-dma-crossbar.c
+++ b/drivers/dma/ti-dma-crossbar.c
@@ -18,15 +18,19 @@
#define TI_XBAR_DRA7 0
#define TI_XBAR_AM335X 1
+static const u32 ti_xbar_type[] = {
+ [TI_XBAR_DRA7] = TI_XBAR_DRA7,
+ [TI_XBAR_AM335X] = TI_XBAR_AM335X,
+};
static const struct of_device_id ti_dma_xbar_match[] = {
{
.compatible = "ti,dra7-dma-crossbar",
- .data = (void *)TI_XBAR_DRA7,
+ .data = &ti_xbar_type[TI_XBAR_DRA7],
},
{
.compatible = "ti,am335x-edma-crossbar",
- .data = (void *)TI_XBAR_AM335X,
+ .data = &ti_xbar_type[TI_XBAR_AM335X],
},
{},
};
@@ -190,9 +194,6 @@ static int ti_am335x_xbar_probe(struct platform_device *pdev)
#define TI_DRA7_XBAR_OUTPUTS 127
#define TI_DRA7_XBAR_INPUTS 256
-#define TI_XBAR_EDMA_OFFSET 0
-#define TI_XBAR_SDMA_OFFSET 1
-
struct ti_dra7_xbar_data {
void __iomem *iomem;
@@ -280,18 +281,25 @@ static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec,
return map;
}
+#define TI_XBAR_EDMA_OFFSET 0
+#define TI_XBAR_SDMA_OFFSET 1
+static const u32 ti_dma_offset[] = {
+ [TI_XBAR_EDMA_OFFSET] = 0,
+ [TI_XBAR_SDMA_OFFSET] = 1,
+};
+
static const struct of_device_id ti_dra7_master_match[] = {
{
.compatible = "ti,omap4430-sdma",
- .data = (void *)TI_XBAR_SDMA_OFFSET,
+ .data = &ti_dma_offset[TI_XBAR_SDMA_OFFSET],
},
{
.compatible = "ti,edma3",
- .data = (void *)TI_XBAR_EDMA_OFFSET,
+ .data = &ti_dma_offset[TI_XBAR_EDMA_OFFSET],
},
{
.compatible = "ti,edma3-tpcc",
- .data = (void *)TI_XBAR_EDMA_OFFSET,
+ .data = &ti_dma_offset[TI_XBAR_EDMA_OFFSET],
},
{},
};
@@ -311,7 +319,7 @@ static int ti_dra7_xbar_probe(struct platform_device *pdev)
struct property *prop;
struct resource *res;
u32 safe_val;
- size_t sz;
+ int sz;
void __iomem *iomem;
int i, ret;
@@ -395,7 +403,7 @@ static int ti_dra7_xbar_probe(struct platform_device *pdev)
xbar->dmarouter.dev = &pdev->dev;
xbar->dmarouter.route_free = ti_dra7_xbar_free;
- xbar->dma_offset = (u32)match->data;
+ xbar->dma_offset = *(u32 *)match->data;
mutex_init(&xbar->mutex);
platform_set_drvdata(pdev, xbar);
@@ -428,7 +436,7 @@ static int ti_dma_xbar_probe(struct platform_device *pdev)
if (unlikely(!match))
return -EINVAL;
- switch ((u32)match->data) {
+ switch (*(u32 *)match->data) {
case TI_XBAR_DRA7:
ret = ti_dra7_xbar_probe(pdev);
break;
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index e82745a..896bafb 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -226,8 +226,7 @@ static void __td_start_dma(struct timb_dma_chan *td_chan)
static void __td_finish(struct timb_dma_chan *td_chan)
{
- dma_async_tx_callback callback;
- void *param;
+ struct dmaengine_desc_callback cb;
struct dma_async_tx_descriptor *txd;
struct timb_dma_desc *td_desc;
@@ -252,8 +251,7 @@ static void __td_finish(struct timb_dma_chan *td_chan)
dma_cookie_complete(txd);
td_chan->ongoing = false;
- callback = txd->callback;
- param = txd->callback_param;
+ dmaengine_desc_get_callback(txd, &cb);
list_move(&td_desc->desc_node, &td_chan->free_list);
@@ -262,8 +260,7 @@ static void __td_finish(struct timb_dma_chan *td_chan)
* The API requires that no submissions are done from a
* callback, so we don't need to drop the lock here
*/
- if (callback)
- callback(param);
+ dmaengine_desc_callback_invoke(&cb, NULL);
}
static u32 __td_ier_mask(struct timb_dma *td)
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index 7632290..4d8c7b9 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -403,16 +403,14 @@ static void
txx9dmac_descriptor_complete(struct txx9dmac_chan *dc,
struct txx9dmac_desc *desc)
{
- dma_async_tx_callback callback;
- void *param;
+ struct dmaengine_desc_callback cb;
struct dma_async_tx_descriptor *txd = &desc->txd;
dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n",
txd->cookie, desc);
dma_cookie_complete(txd);
- callback = txd->callback;
- param = txd->callback_param;
+ dmaengine_desc_get_callback(txd, &cb);
txx9dmac_sync_desc_for_cpu(dc, desc);
list_splice_init(&desc->tx_list, &dc->free_list);
@@ -423,8 +421,7 @@ txx9dmac_descriptor_complete(struct txx9dmac_chan *dc,
* The API requires that no submissions are done from a
* callback, so we don't need to drop the lock here
*/
- if (callback)
- callback(param);
+ dmaengine_desc_callback_invoke(&cb, NULL);
dma_run_dependencies(txd);
}
diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c
index a35c211..e47fc9b 100644
--- a/drivers/dma/virt-dma.c
+++ b/drivers/dma/virt-dma.c
@@ -87,8 +87,7 @@ static void vchan_complete(unsigned long arg)
{
struct virt_dma_chan *vc = (struct virt_dma_chan *)arg;
struct virt_dma_desc *vd;
- dma_async_tx_callback cb = NULL;
- void *cb_data = NULL;
+ struct dmaengine_desc_callback cb;
LIST_HEAD(head);
spin_lock_irq(&vc->lock);
@@ -96,18 +95,17 @@ static void vchan_complete(unsigned long arg)
vd = vc->cyclic;
if (vd) {
vc->cyclic = NULL;
- cb = vd->tx.callback;
- cb_data = vd->tx.callback_param;
+ dmaengine_desc_get_callback(&vd->tx, &cb);
+ } else {
+ memset(&cb, 0, sizeof(cb));
}
spin_unlock_irq(&vc->lock);
- if (cb)
- cb(cb_data);
+ dmaengine_desc_callback_invoke(&cb, NULL);
while (!list_empty(&head)) {
vd = list_first_entry(&head, struct virt_dma_desc, node);
- cb = vd->tx.callback;
- cb_data = vd->tx.callback_param;
+ dmaengine_desc_get_callback(&vd->tx, &cb);
list_del(&vd->node);
if (dmaengine_desc_test_reuse(&vd->tx))
@@ -115,8 +113,7 @@ static void vchan_complete(unsigned long arg)
else
vc->desc_free(vd);
- if (cb)
- cb(cb_data);
+ dmaengine_desc_callback_invoke(&cb, NULL);
}
}
diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h
index d9731ca..3f776a4 100644
--- a/drivers/dma/virt-dma.h
+++ b/drivers/dma/virt-dma.h
@@ -45,6 +45,8 @@ static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan)
void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head);
void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev);
struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *, dma_cookie_t);
+extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *);
+extern int vchan_tx_desc_free(struct dma_async_tx_descriptor *);
/**
* vchan_tx_prep - prepare a descriptor
@@ -55,8 +57,6 @@ struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *, dma_cookie_t);
static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan *vc,
struct virt_dma_desc *vd, unsigned long tx_flags)
{
- extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *);
- extern int vchan_tx_desc_free(struct dma_async_tx_descriptor *);
unsigned long flags;
dma_async_tx_descriptor_init(&vd->tx, &vc->chan);
@@ -123,10 +123,8 @@ static inline void vchan_cyclic_callback(struct virt_dma_desc *vd)
*/
static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
{
- if (list_empty(&vc->desc_issued))
- return NULL;
-
- return list_first_entry(&vc->desc_issued, struct virt_dma_desc, node);
+ return list_first_entry_or_null(&vc->desc_issued,
+ struct virt_dma_desc, node);
}
/**
diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c
index 9cb93c5..d66ed11 100644
--- a/drivers/dma/xgene-dma.c
+++ b/drivers/dma/xgene-dma.c
@@ -608,8 +608,7 @@ static void xgene_dma_run_tx_complete_actions(struct xgene_dma_chan *chan,
dma_cookie_complete(tx);
/* Run the link descriptor callback function */
- if (tx->callback)
- tx->callback(tx->callback_param);
+ dmaengine_desc_get_callback_invoke(tx, NULL);
dma_descriptor_unmap(tx);
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 4e223d0..8288fe4 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -755,8 +755,7 @@ static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
spin_lock_irqsave(&chan->lock, flags);
list_for_each_entry_safe(desc, next, &chan->done_list, node) {
- dma_async_tx_callback callback;
- void *callback_param;
+ struct dmaengine_desc_callback cb;
if (desc->cyclic) {
xilinx_dma_chan_handle_cyclic(chan, desc, &flags);
@@ -767,11 +766,10 @@ static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
list_del(&desc->node);
/* Run the link descriptor callback function */
- callback = desc->async_tx.callback;
- callback_param = desc->async_tx.callback_param;
- if (callback) {
+ dmaengine_desc_get_callback(&desc->async_tx, &cb);
+ if (dmaengine_desc_callback_valid(&cb)) {
spin_unlock_irqrestore(&chan->lock, flags);
- callback(callback_param);
+ dmaengine_desc_callback_invoke(&cb, NULL);
spin_lock_irqsave(&chan->lock, flags);
}
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index d5c5894..8601c10 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -102,13 +102,16 @@ struct ntb_queue_entry {
void *buf;
unsigned int len;
unsigned int flags;
+ int retries;
+ int errors;
+ unsigned int tx_index;
+ unsigned int rx_index;
struct ntb_transport_qp *qp;
union {
struct ntb_payload_header __iomem *tx_hdr;
struct ntb_payload_header *rx_hdr;
};
- unsigned int index;
};
struct ntb_rx_info {
@@ -259,6 +262,12 @@ enum {
static void ntb_transport_rxc_db(unsigned long data);
static const struct ntb_ctx_ops ntb_transport_ops;
static struct ntb_client ntb_transport_client;
+static int ntb_async_tx_submit(struct ntb_transport_qp *qp,
+ struct ntb_queue_entry *entry);
+static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset);
+static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset);
+static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset);
+
static int ntb_transport_bus_match(struct device *dev,
struct device_driver *drv)
@@ -1229,7 +1238,7 @@ static void ntb_complete_rxc(struct ntb_transport_qp *qp)
break;
entry->rx_hdr->flags = 0;
- iowrite32(entry->index, &qp->rx_info->entry);
+ iowrite32(entry->rx_index, &qp->rx_info->entry);
cb_data = entry->cb_data;
len = entry->len;
@@ -1247,10 +1256,36 @@ static void ntb_complete_rxc(struct ntb_transport_qp *qp)
spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags);
}
-static void ntb_rx_copy_callback(void *data)
+static void ntb_rx_copy_callback(void *data,
+ const struct dmaengine_result *res)
{
struct ntb_queue_entry *entry = data;
+ /* we need to check DMA results if we are using DMA */
+ if (res) {
+ enum dmaengine_tx_result dma_err = res->result;
+
+ switch (dma_err) {
+ case DMA_TRANS_READ_FAILED:
+ case DMA_TRANS_WRITE_FAILED:
+ entry->errors++;
+ case DMA_TRANS_ABORTED:
+ {
+ struct ntb_transport_qp *qp = entry->qp;
+ void *offset = qp->rx_buff + qp->rx_max_frame *
+ qp->rx_index;
+
+ ntb_memcpy_rx(entry, offset);
+ qp->rx_memcpy++;
+ return;
+ }
+
+ case DMA_TRANS_NOERROR:
+ default:
+ break;
+ }
+ }
+
entry->flags |= DESC_DONE_FLAG;
ntb_complete_rxc(entry->qp);
@@ -1266,10 +1301,10 @@ static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset)
/* Ensure that the data is fully copied out before clearing the flag */
wmb();
- ntb_rx_copy_callback(entry);
+ ntb_rx_copy_callback(entry, NULL);
}
-static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
+static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset)
{
struct dma_async_tx_descriptor *txd;
struct ntb_transport_qp *qp = entry->qp;
@@ -1282,13 +1317,6 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
int retries = 0;
len = entry->len;
-
- if (!chan)
- goto err;
-
- if (len < copy_bytes)
- goto err;
-
device = chan->device;
pay_off = (size_t)offset & ~PAGE_MASK;
buff_off = (size_t)buf & ~PAGE_MASK;
@@ -1316,7 +1344,8 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
unmap->from_cnt = 1;
for (retries = 0; retries < DMA_RETRIES; retries++) {
- txd = device->device_prep_dma_memcpy(chan, unmap->addr[1],
+ txd = device->device_prep_dma_memcpy(chan,
+ unmap->addr[1],
unmap->addr[0], len,
DMA_PREP_INTERRUPT);
if (txd)
@@ -1331,7 +1360,7 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
goto err_get_unmap;
}
- txd->callback = ntb_rx_copy_callback;
+ txd->callback_result = ntb_rx_copy_callback;
txd->callback_param = entry;
dma_set_unmap(txd, unmap);
@@ -1345,13 +1374,38 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
qp->rx_async++;
- return;
+ return 0;
err_set_unmap:
dmaengine_unmap_put(unmap);
err_get_unmap:
dmaengine_unmap_put(unmap);
err:
+ return -ENXIO;
+}
+
+static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
+{
+ struct ntb_transport_qp *qp = entry->qp;
+ struct dma_chan *chan = qp->rx_dma_chan;
+ int res;
+
+ if (!chan)
+ goto err;
+
+ if (entry->len < copy_bytes)
+ goto err;
+
+ res = ntb_async_rx_submit(entry, offset);
+ if (res < 0)
+ goto err;
+
+ if (!entry->retries)
+ qp->rx_async++;
+
+ return;
+
+err:
ntb_memcpy_rx(entry, offset);
qp->rx_memcpy++;
}
@@ -1397,7 +1451,7 @@ static int ntb_process_rxc(struct ntb_transport_qp *qp)
}
entry->rx_hdr = hdr;
- entry->index = qp->rx_index;
+ entry->rx_index = qp->rx_index;
if (hdr->len > entry->len) {
dev_dbg(&qp->ndev->pdev->dev,
@@ -1467,12 +1521,39 @@ static void ntb_transport_rxc_db(unsigned long data)
}
}
-static void ntb_tx_copy_callback(void *data)
+static void ntb_tx_copy_callback(void *data,
+ const struct dmaengine_result *res)
{
struct ntb_queue_entry *entry = data;
struct ntb_transport_qp *qp = entry->qp;
struct ntb_payload_header __iomem *hdr = entry->tx_hdr;
+ /* we need to check DMA results if we are using DMA */
+ if (res) {
+ enum dmaengine_tx_result dma_err = res->result;
+
+ switch (dma_err) {
+ case DMA_TRANS_READ_FAILED:
+ case DMA_TRANS_WRITE_FAILED:
+ entry->errors++;
+ case DMA_TRANS_ABORTED:
+ {
+ void __iomem *offset =
+ qp->tx_mw + qp->tx_max_frame *
+ entry->tx_index;
+
+ /* resubmit via CPU */
+ ntb_memcpy_tx(entry, offset);
+ qp->tx_memcpy++;
+ return;
+ }
+
+ case DMA_TRANS_NOERROR:
+ default:
+ break;
+ }
+ }
+
iowrite32(entry->flags | DESC_DONE_FLAG, &hdr->flags);
ntb_peer_db_set(qp->ndev, BIT_ULL(qp->qp_num));
@@ -1507,40 +1588,25 @@ static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset)
/* Ensure that the data is fully copied out before setting the flags */
wmb();
- ntb_tx_copy_callback(entry);
+ ntb_tx_copy_callback(entry, NULL);
}
-static void ntb_async_tx(struct ntb_transport_qp *qp,
- struct ntb_queue_entry *entry)
+static int ntb_async_tx_submit(struct ntb_transport_qp *qp,
+ struct ntb_queue_entry *entry)
{
- struct ntb_payload_header __iomem *hdr;
struct dma_async_tx_descriptor *txd;
struct dma_chan *chan = qp->tx_dma_chan;
struct dma_device *device;
+ size_t len = entry->len;
+ void *buf = entry->buf;
size_t dest_off, buff_off;
struct dmaengine_unmap_data *unmap;
dma_addr_t dest;
dma_cookie_t cookie;
- void __iomem *offset;
- size_t len = entry->len;
- void *buf = entry->buf;
int retries = 0;
- offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index;
- hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
- entry->tx_hdr = hdr;
-
- iowrite32(entry->len, &hdr->len);
- iowrite32((u32)qp->tx_pkts, &hdr->ver);
-
- if (!chan)
- goto err;
-
- if (len < copy_bytes)
- goto err;
-
device = chan->device;
- dest = qp->tx_mw_phys + qp->tx_max_frame * qp->tx_index;
+ dest = qp->tx_mw_phys + qp->tx_max_frame * entry->tx_index;
buff_off = (size_t)buf & ~PAGE_MASK;
dest_off = (size_t)dest & ~PAGE_MASK;
@@ -1560,8 +1626,9 @@ static void ntb_async_tx(struct ntb_transport_qp *qp,
unmap->to_cnt = 1;
for (retries = 0; retries < DMA_RETRIES; retries++) {
- txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0],
- len, DMA_PREP_INTERRUPT);
+ txd = device->device_prep_dma_memcpy(chan, dest,
+ unmap->addr[0], len,
+ DMA_PREP_INTERRUPT);
if (txd)
break;
@@ -1574,7 +1641,7 @@ static void ntb_async_tx(struct ntb_transport_qp *qp,
goto err_get_unmap;
}
- txd->callback = ntb_tx_copy_callback;
+ txd->callback_result = ntb_tx_copy_callback;
txd->callback_param = entry;
dma_set_unmap(txd, unmap);
@@ -1585,14 +1652,48 @@ static void ntb_async_tx(struct ntb_transport_qp *qp,
dmaengine_unmap_put(unmap);
dma_async_issue_pending(chan);
- qp->tx_async++;
- return;
+ return 0;
err_set_unmap:
dmaengine_unmap_put(unmap);
err_get_unmap:
dmaengine_unmap_put(unmap);
err:
+ return -ENXIO;
+}
+
+static void ntb_async_tx(struct ntb_transport_qp *qp,
+ struct ntb_queue_entry *entry)
+{
+ struct ntb_payload_header __iomem *hdr;
+ struct dma_chan *chan = qp->tx_dma_chan;
+ void __iomem *offset;
+ int res;
+
+ entry->tx_index = qp->tx_index;
+ offset = qp->tx_mw + qp->tx_max_frame * entry->tx_index;
+ hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
+ entry->tx_hdr = hdr;
+
+ iowrite32(entry->len, &hdr->len);
+ iowrite32((u32)qp->tx_pkts, &hdr->ver);
+
+ if (!chan)
+ goto err;
+
+ if (entry->len < copy_bytes)
+ goto err;
+
+ res = ntb_async_tx_submit(qp, entry);
+ if (res < 0)
+ goto err;
+
+ if (!entry->retries)
+ qp->tx_async++;
+
+ return;
+
+err:
ntb_memcpy_tx(entry, offset);
qp->tx_memcpy++;
}
@@ -1928,6 +2029,9 @@ int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
entry->buf = data;
entry->len = len;
entry->flags = 0;
+ entry->retries = 0;
+ entry->errors = 0;
+ entry->rx_index = 0;
ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_pend_q);
@@ -1970,6 +2074,9 @@ int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
entry->buf = data;
entry->len = len;
entry->flags = 0;
+ entry->errors = 0;
+ entry->retries = 0;
+ entry->tx_index = 0;
rc = ntb_process_tx(qp, entry);
if (rc)
diff --git a/drivers/tty/serial/8250/8250_mid.c b/drivers/tty/serial/8250/8250_mid.c
index 339de9c..121a7f2 100644
--- a/drivers/tty/serial/8250/8250_mid.c
+++ b/drivers/tty/serial/8250/8250_mid.c
@@ -99,27 +99,27 @@ static int dnv_handle_irq(struct uart_port *p)
struct uart_8250_port *up = up_to_u8250p(p);
unsigned int fisr = serial_port_in(p, INTEL_MID_UART_DNV_FISR);
u32 status;
- int ret = IRQ_NONE;
+ int ret = 0;
int err;
if (fisr & BIT(2)) {
err = hsu_dma_get_status(&mid->dma_chip, 1, &status);
if (err > 0) {
serial8250_rx_dma_flush(up);
- ret |= IRQ_HANDLED;
+ ret |= 1;
} else if (err == 0)
ret |= hsu_dma_do_irq(&mid->dma_chip, 1, status);
}
if (fisr & BIT(1)) {
err = hsu_dma_get_status(&mid->dma_chip, 0, &status);
if (err > 0)
- ret |= IRQ_HANDLED;
+ ret |= 1;
else if (err == 0)
ret |= hsu_dma_do_irq(&mid->dma_chip, 0, status);
}
if (fisr & BIT(0))
ret |= serial8250_handle_irq(p, serial_port_in(p, UART_IIR));
- return ret;
+ return IRQ_RETVAL(ret);
}
#define DNV_DMA_CHAN_OFFSET 0x80