summaryrefslogtreecommitdiff
path: root/drivers/dma/mxs-dma.c
diff options
context:
space:
mode:
authorJohn W. Linville <linville@tuxdriver.com>2012-04-12 17:49:28 (GMT)
committerJohn W. Linville <linville@tuxdriver.com>2012-04-12 17:49:28 (GMT)
commit8065248069097dddf9945acfb2081025e9618c16 (patch)
treeeddf3fb0372ba0f65c01382d386942ea8d18932d /drivers/dma/mxs-dma.c
parente66a8ddff72e85605f2212a0ebc666c7e9116641 (diff)
parentb4838d12e1f3cb48c2489a0b08733b5dbf848297 (diff)
downloadlinux-fsl-qoriq-8065248069097dddf9945acfb2081025e9618c16.tar.xz
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless
Diffstat (limited to 'drivers/dma/mxs-dma.c')
-rw-r--r--drivers/dma/mxs-dma.c60
1 files changed, 37 insertions, 23 deletions
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index b06cd4c..c81ef7e 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -22,12 +22,14 @@
#include <linux/platform_device.h>
#include <linux/dmaengine.h>
#include <linux/delay.h>
+#include <linux/fsl/mxs-dma.h>
#include <asm/irq.h>
#include <mach/mxs.h>
-#include <mach/dma.h>
#include <mach/common.h>
+#include "dmaengine.h"
+
/*
* NOTE: The term "PIO" throughout the mxs-dma implementation means
* PIO mode of mxs apbh-dma and apbx-dma. With this working mode,
@@ -111,7 +113,6 @@ struct mxs_dma_chan {
struct mxs_dma_ccw *ccw;
dma_addr_t ccw_phys;
int desc_count;
- dma_cookie_t last_completed;
enum dma_status status;
unsigned int flags;
#define MXS_DMA_SG_LOOP (1 << 0)
@@ -193,19 +194,6 @@ static void mxs_dma_resume_chan(struct mxs_dma_chan *mxs_chan)
mxs_chan->status = DMA_IN_PROGRESS;
}
-static dma_cookie_t mxs_dma_assign_cookie(struct mxs_dma_chan *mxs_chan)
-{
- dma_cookie_t cookie = mxs_chan->chan.cookie;
-
- if (++cookie < 0)
- cookie = 1;
-
- mxs_chan->chan.cookie = cookie;
- mxs_chan->desc.cookie = cookie;
-
- return cookie;
-}
-
static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan)
{
return container_of(chan, struct mxs_dma_chan, chan);
@@ -217,7 +205,7 @@ static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx)
mxs_dma_enable_chan(mxs_chan);
- return mxs_dma_assign_cookie(mxs_chan);
+ return dma_cookie_assign(tx);
}
static void mxs_dma_tasklet(unsigned long data)
@@ -274,7 +262,7 @@ static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id)
stat1 &= ~(1 << channel);
if (mxs_chan->status == DMA_SUCCESS)
- mxs_chan->last_completed = mxs_chan->desc.cookie;
+ dma_cookie_complete(&mxs_chan->desc);
/* schedule tasklet on this channel */
tasklet_schedule(&mxs_chan->tasklet);
@@ -349,10 +337,32 @@ static void mxs_dma_free_chan_resources(struct dma_chan *chan)
clk_disable_unprepare(mxs_dma->clk);
}
+/*
+ * How to use the flags for ->device_prep_slave_sg() :
+ * [1] If there is only one DMA command in the DMA chain, the code should be:
+ * ......
+ * ->device_prep_slave_sg(DMA_CTRL_ACK);
+ * ......
+ * [2] If there are two DMA commands in the DMA chain, the code should be
+ * ......
+ * ->device_prep_slave_sg(0);
+ * ......
+ * ->device_prep_slave_sg(DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ * ......
+ * [3] If there are more than two DMA commands in the DMA chain, the code
+ * should be:
+ * ......
+ * ->device_prep_slave_sg(0); // First
+ * ......
+ * ->device_prep_slave_sg(DMA_PREP_INTERRUPT [| DMA_CTRL_ACK]);
+ * ......
+ * ->device_prep_slave_sg(DMA_PREP_INTERRUPT | DMA_CTRL_ACK); // Last
+ * ......
+ */
static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction direction,
- unsigned long append)
+ unsigned long flags, void *context)
{
struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
@@ -360,6 +370,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
struct scatterlist *sg;
int i, j;
u32 *pio;
+ bool append = flags & DMA_PREP_INTERRUPT;
int idx = append ? mxs_chan->desc_count : 0;
if (mxs_chan->status == DMA_IN_PROGRESS && !append)
@@ -386,7 +397,6 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
ccw->bits |= CCW_CHAIN;
ccw->bits &= ~CCW_IRQ;
ccw->bits &= ~CCW_DEC_SEM;
- ccw->bits &= ~CCW_WAIT4END;
} else {
idx = 0;
}
@@ -401,7 +411,8 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
ccw->bits = 0;
ccw->bits |= CCW_IRQ;
ccw->bits |= CCW_DEC_SEM;
- ccw->bits |= CCW_WAIT4END;
+ if (flags & DMA_CTRL_ACK)
+ ccw->bits |= CCW_WAIT4END;
ccw->bits |= CCW_HALT_ON_TERM;
ccw->bits |= CCW_TERM_FLUSH;
ccw->bits |= BF_CCW(sg_len, PIO_NUM);
@@ -432,7 +443,8 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
ccw->bits &= ~CCW_CHAIN;
ccw->bits |= CCW_IRQ;
ccw->bits |= CCW_DEC_SEM;
- ccw->bits |= CCW_WAIT4END;
+ if (flags & DMA_CTRL_ACK)
+ ccw->bits |= CCW_WAIT4END;
}
}
}
@@ -447,7 +459,8 @@ err_out:
static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
- size_t period_len, enum dma_transfer_direction direction)
+ size_t period_len, enum dma_transfer_direction direction,
+ void *context)
{
struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
@@ -538,7 +551,7 @@ static enum dma_status mxs_dma_tx_status(struct dma_chan *chan,
dma_cookie_t last_used;
last_used = chan->cookie;
- dma_set_tx_state(txstate, mxs_chan->last_completed, last_used, 0);
+ dma_set_tx_state(txstate, chan->completed_cookie, last_used, 0);
return mxs_chan->status;
}
@@ -630,6 +643,7 @@ static int __init mxs_dma_probe(struct platform_device *pdev)
mxs_chan->mxs_dma = mxs_dma;
mxs_chan->chan.device = &mxs_dma->dma_device;
+ dma_cookie_init(&mxs_chan->chan);
tasklet_init(&mxs_chan->tasklet, mxs_dma_tasklet,
(unsigned long) mxs_chan);