From 3565fe53334cd3f0d59ff5db0872de9370775a19 Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Mon, 30 May 2016 00:41:48 +0000 Subject: dmaengine: rcar-dmac: use list_add() on rcar_dmac_desc_put() For each descriptor, in addition to the memory used by the descriptors structure itself, the driver allocates a list of chunks as well as a buffer for hardware descriptors. Descriptors themselves are preallocated, and allocation of the chunks and buffer is performed the first time the descriptor is used. The memory isn't freed when the transfer is completed, as the chunks and buffer will be needed again when the descriptor is reused internally, so the driver keeps the memory around. If only a few descriptors are used concurrently, the current list_add_tail() implementation will result in all preallocated descriptors being used before going back to the first one, and will thus allocate chunks and a buffer for all preallocated descriptors. Using list_add() will put the complete descriptor at the head of the list of available descriptors, so the next transfer will be more likely to reuse a descriptor that already has associated memory instead of one that has never been used before. Signed-off-by: Kuninori Morimoto Signed-off-by: Vinod Koul diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index dfb1792..433e982 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c @@ -510,7 +510,7 @@ static void rcar_dmac_desc_put(struct rcar_dmac_chan *chan, spin_lock_irqsave(&chan->lock, flags); list_splice_tail_init(&desc->chunks, &chan->desc.chunks_free); - list_add_tail(&desc->node, &chan->desc.free); + list_add(&desc->node, &chan->desc.free); spin_unlock_irqrestore(&chan->lock, flags); } -- cgit v0.10.2 From 92d794dfb699bd7243d9f56656cc1850a48b9f04 Mon Sep 17 00:00:00 2001 From: Kedareswara rao Appana Date: Wed, 18 May 2016 13:17:30 +0530 Subject: dmaengine: vdma: Add support for cyclic dma mode This patch adds support for AXI DMA cyclic dma mode. In cyclic mode, DMA fetches and processes the same BDs without interruption. The DMA continues to fetch and process until it is stopped or reset. Signed-off-by: Kedareswara rao Appana Signed-off-by: Vinod Koul diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c index df91185..3edd454 100644 --- a/drivers/dma/xilinx/xilinx_vdma.c +++ b/drivers/dma/xilinx/xilinx_vdma.c @@ -157,6 +157,7 @@ /* AXI DMA Specific Masks/Bit fields */ #define XILINX_DMA_MAX_TRANS_LEN GENMASK(22, 0) #define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16) +#define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4) #define XILINX_DMA_CR_COALESCE_SHIFT 16 #define XILINX_DMA_BD_SOP BIT(27) #define XILINX_DMA_BD_EOP BIT(26) @@ -278,11 +279,13 @@ struct xilinx_cdma_tx_segment { * @async_tx: Async transaction descriptor * @segments: TX segments list * @node: Node in the channel descriptors list + * @cyclic: Check for cyclic transfers. */ struct xilinx_dma_tx_descriptor { struct dma_async_tx_descriptor async_tx; struct list_head segments; struct list_head node; + bool cyclic; }; /** @@ -302,6 +305,7 @@ struct xilinx_dma_tx_descriptor { * @direction: Transfer direction * @num_frms: Number of frames * @has_sg: Support scatter transfers + * @cyclic: Check for cyclic transfers. * @genlock: Support genlock mode * @err: Channel has errors * @tasklet: Cleanup work after irq @@ -312,6 +316,7 @@ struct xilinx_dma_tx_descriptor { * @desc_submitcount: Descriptor h/w submitted count * @residue: Residue for AXI DMA * @seg_v: Statically allocated segments base + * @cyclic_seg_v: Statically allocated segment base for cyclic transfers * @start_transfer: Differentiate b/w DMA IP's transfer */ struct xilinx_dma_chan { @@ -330,6 +335,7 @@ struct xilinx_dma_chan { enum dma_transfer_direction direction; int num_frms; bool has_sg; + bool cyclic; bool genlock; bool err; struct tasklet_struct tasklet; @@ -340,6 +346,7 @@ struct xilinx_dma_chan { u32 desc_submitcount; u32 residue; struct xilinx_axidma_tx_segment *seg_v; + struct xilinx_axidma_tx_segment *cyclic_seg_v; void (*start_transfer)(struct xilinx_dma_chan *chan); }; @@ -660,13 +667,37 @@ static void xilinx_dma_free_chan_resources(struct dma_chan *dchan) dev_dbg(chan->dev, "Free all channel resources.\n"); xilinx_dma_free_descriptors(chan); - if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) + if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { + xilinx_dma_free_tx_segment(chan, chan->cyclic_seg_v); xilinx_dma_free_tx_segment(chan, chan->seg_v); + } dma_pool_destroy(chan->desc_pool); chan->desc_pool = NULL; } /** + * xilinx_dma_chan_handle_cyclic - Cyclic dma callback + * @chan: Driver specific dma channel + * @desc: dma transaction descriptor + * @flags: flags for spin lock + */ +static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan *chan, + struct xilinx_dma_tx_descriptor *desc, + unsigned long *flags) +{ + dma_async_tx_callback callback; + void *callback_param; + + callback = desc->async_tx.callback; + callback_param = desc->async_tx.callback_param; + if (callback) { + spin_unlock_irqrestore(&chan->lock, *flags); + callback(callback_param); + spin_lock_irqsave(&chan->lock, *flags); + } +} + +/** * xilinx_dma_chan_desc_cleanup - Clean channel descriptors * @chan: Driver specific DMA channel */ @@ -681,6 +712,11 @@ static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan) dma_async_tx_callback callback; void *callback_param; + if (desc->cyclic) { + xilinx_dma_chan_handle_cyclic(chan, desc, &flags); + break; + } + /* Remove from the list of running transactions */ list_del(&desc->node); @@ -757,7 +793,7 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan) return -ENOMEM; } - if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) + if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { /* * For AXI DMA case after submitting a pending_list, keep * an extra segment allocated so that the "next descriptor" @@ -768,6 +804,15 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan) */ chan->seg_v = xilinx_axidma_alloc_tx_segment(chan); + /* + * For cyclic DMA mode we need to program the tail Descriptor + * register with a value which is not a part of the BD chain + * so allocating a desc segment during channel allocation for + * programming tail descriptor. + */ + chan->cyclic_seg_v = xilinx_axidma_alloc_tx_segment(chan); + } + dma_cookie_init(dchan); if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { @@ -1157,8 +1202,12 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan) /* Start the transfer */ if (chan->has_sg) { - dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, - tail_segment->phys); + if (chan->cyclic) + dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, + chan->cyclic_seg_v->phys); + else + dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, + tail_segment->phys); } else { struct xilinx_axidma_tx_segment *segment; struct xilinx_axidma_desc_hw *hw; @@ -1209,7 +1258,8 @@ static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan) list_for_each_entry_safe(desc, next, &chan->active_list, node) { list_del(&desc->node); - dma_cookie_complete(&desc->async_tx); + if (!desc->cyclic) + dma_cookie_complete(&desc->async_tx); list_add_tail(&desc->node, &chan->done_list); } } @@ -1397,6 +1447,11 @@ static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx) unsigned long flags; int err; + if (chan->cyclic) { + xilinx_dma_free_tx_descriptor(chan, desc); + return -EBUSY; + } + if (chan->err) { /* * If reset fails, need to hard reset the system. @@ -1414,6 +1469,9 @@ static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx) /* Put this transaction onto the tail of the pending queue */ append_desc_queue(chan, desc); + if (desc->cyclic) + chan->cyclic = true; + spin_unlock_irqrestore(&chan->lock, flags); return cookie; @@ -1669,12 +1727,112 @@ error: } /** + * xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction + * @chan: DMA channel + * @sgl: scatterlist to transfer to/from + * @sg_len: number of entries in @scatterlist + * @direction: DMA direction + * @flags: transfer ack flags + */ +static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic( + struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len, + size_t period_len, enum dma_transfer_direction direction, + unsigned long flags) +{ + struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); + struct xilinx_dma_tx_descriptor *desc; + struct xilinx_axidma_tx_segment *segment, *head_segment, *prev = NULL; + size_t copy, sg_used; + unsigned int num_periods; + int i; + u32 reg; + + num_periods = buf_len / period_len; + + if (!is_slave_direction(direction)) + return NULL; + + /* Allocate a transaction descriptor. */ + desc = xilinx_dma_alloc_tx_descriptor(chan); + if (!desc) + return NULL; + + chan->direction = direction; + dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); + desc->async_tx.tx_submit = xilinx_dma_tx_submit; + + for (i = 0; i < num_periods; ++i) { + sg_used = 0; + + while (sg_used < period_len) { + struct xilinx_axidma_desc_hw *hw; + + /* Get a free segment */ + segment = xilinx_axidma_alloc_tx_segment(chan); + if (!segment) + goto error; + + /* + * Calculate the maximum number of bytes to transfer, + * making sure it is less than the hw limit + */ + copy = min_t(size_t, period_len - sg_used, + XILINX_DMA_MAX_TRANS_LEN); + hw = &segment->hw; + hw->buf_addr = buf_addr + sg_used + (period_len * i); + hw->control = copy; + + if (prev) + prev->hw.next_desc = segment->phys; + + prev = segment; + sg_used += copy; + + /* + * Insert the segment into the descriptor segments + * list. + */ + list_add_tail(&segment->node, &desc->segments); + } + } + + head_segment = list_first_entry(&desc->segments, + struct xilinx_axidma_tx_segment, node); + desc->async_tx.phys = head_segment->phys; + + desc->cyclic = true; + reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); + reg |= XILINX_DMA_CR_CYCLIC_BD_EN_MASK; + dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); + + /* For the last DMA_MEM_TO_DEV transfer, set EOP */ + if (direction == DMA_MEM_TO_DEV) { + segment->hw.control |= XILINX_DMA_BD_SOP; + segment = list_last_entry(&desc->segments, + struct xilinx_axidma_tx_segment, + node); + segment->hw.control |= XILINX_DMA_BD_EOP; + segment->hw.next_desc = (u32) head_segment->phys; + } + + return &desc->async_tx; + +error: + xilinx_dma_free_tx_descriptor(chan, desc); + return NULL; +} + +/** * xilinx_dma_terminate_all - Halt the channel and free descriptors * @chan: Driver specific DMA Channel pointer */ static int xilinx_dma_terminate_all(struct dma_chan *dchan) { struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); + u32 reg; + + if (chan->cyclic) + xilinx_dma_chan_reset(chan); /* Halt the DMA engine */ xilinx_dma_halt(chan); @@ -1682,6 +1840,13 @@ static int xilinx_dma_terminate_all(struct dma_chan *dchan) /* Remove and free all of the descriptors in the lists */ xilinx_dma_free_descriptors(chan); + if (chan->cyclic) { + reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); + reg &= ~XILINX_DMA_CR_CYCLIC_BD_EN_MASK; + dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); + chan->cyclic = false; + } + return 0; } @@ -2218,7 +2383,10 @@ static int xilinx_dma_probe(struct platform_device *pdev) xdev->common.device_tx_status = xilinx_dma_tx_status; xdev->common.device_issue_pending = xilinx_dma_issue_pending; if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { + dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask); xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg; + xdev->common.device_prep_dma_cyclic = + xilinx_dma_prep_dma_cyclic; /* Residue calculation is supported by only AXI DMA */ xdev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; -- cgit v0.10.2 From 62147866517b3c2b22f52adec2b21bf97ce1e684 Mon Sep 17 00:00:00 2001 From: Kedareswara rao Appana Date: Wed, 18 May 2016 13:17:31 +0530 Subject: dmaengine: vdma: Use dma_pool_zalloc dma_pool_zalloc combines dma_pool_alloc and memset 0 this patch updates the driver to use dma_pool_zalloc. Signed-off-by: Kedareswara rao Appana Signed-off-by: Vinod Koul diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c index 3edd454..0f5b38a 100644 --- a/drivers/dma/xilinx/xilinx_vdma.c +++ b/drivers/dma/xilinx/xilinx_vdma.c @@ -498,11 +498,10 @@ xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan) struct xilinx_cdma_tx_segment *segment; dma_addr_t phys; - segment = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &phys); + segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys); if (!segment) return NULL; - memset(segment, 0, sizeof(*segment)); segment->phys = phys; return segment; @@ -520,11 +519,10 @@ xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan) struct xilinx_axidma_tx_segment *segment; dma_addr_t phys; - segment = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &phys); + segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys); if (!segment) return NULL; - memset(segment, 0, sizeof(*segment)); segment->phys = phys; return segment; -- cgit v0.10.2 From a4a1e53df41270b9fcb5f0c42872fd08ab42a74f Mon Sep 17 00:00:00 2001 From: Stefan Roese Date: Wed, 1 Jun 2016 12:43:31 +0200 Subject: dmaengine: mv_xor: Minor coding style fix Remove the space before the "err_free_dma:" label in mv_xor_channel_add(). Signed-off-by: Stefan Roese Cc: Gregory CLEMENT Cc: Marcin Wojtas Reviewed-by: Thomas Petazzoni Signed-off-by: Vinod Koul diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 25d1dad..55815c1 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c @@ -1055,7 +1055,7 @@ mv_xor_channel_add(struct mv_xor_device *xordev, err_free_irq: free_irq(mv_chan->irq, mv_chan); - err_free_dma: +err_free_dma: dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE, mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool); return ERR_PTR(ret); -- cgit v0.10.2 From 2e4ed0879eb15230cb58c3915ac51620480375c3 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Tue, 7 Jun 2016 11:19:44 +0300 Subject: dmaengine: edma: Use early completion for intermediate paRAM set in slave_sg The driver limits the physical number of paRAM slots to be used by channels. If the transfer needs more slots (more SGs) then the transfer is broken up to smaller chunks. When the chunk is finished the driver will rewrite the physical slots and continues the transfer. This set up time can take some time and we might miss DMA events. If the intermediate set completion is using early completion (the interrupt will happen when the last slot is issued to the TPTC and not when the transfer is finished by the TPTC) we will have a bit more time to update the paRAM slots and less likely to have missed events. Signed-off-by: Peter Ujfalusi Signed-off-by: Vinod Koul diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index 8181ed1..7c76b55 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c @@ -1114,14 +1114,17 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( edesc->absync = ret; edesc->residue += sg_dma_len(sg); - /* If this is the last in a current SG set of transactions, - enable interrupts so that next set is processed */ - if (!((i+1) % MAX_NR_SG)) - edesc->pset[i].param.opt |= TCINTEN; - - /* If this is the last set, enable completion interrupt flag */ if (i == sg_len - 1) + /* Enable completion interrupt */ edesc->pset[i].param.opt |= TCINTEN; + else if (!((i+1) % MAX_NR_SG)) + /* + * Enable early completion interrupt for the + * intermediateset. In this case the driver will be + * notified when the paRAM set is submitted to TC. This + * will allow more time to set up the next set of slots. + */ + edesc->pset[i].param.opt |= (TCINTEN | TCCMODE); } edesc->residue_stat = edesc->residue; -- cgit v0.10.2 From 0161df13250035e7599f3cce6039bd046b7647dc Mon Sep 17 00:00:00 2001 From: Ben Dooks Date: Tue, 7 Jun 2016 16:50:03 +0100 Subject: dmaengine: ste_dma40_ll: make d40_width_to_bits static Fix warning due to d40_width_to_bits() not being used outside this file. Fixes: drivers/dma/ste_dma40_ll.c:13:4: warning: symbol 'd40_width_to_bits' was not declared. Should it be static? Signed-off-by: Ben Dooks Signed-off-by: Vinod Koul diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c index 27b818d..13b42dd 100644 --- a/drivers/dma/ste_dma40_ll.c +++ b/drivers/dma/ste_dma40_ll.c @@ -10,7 +10,7 @@ #include "ste_dma40_ll.h" -u8 d40_width_to_bits(enum dma_slave_buswidth width) +static u8 d40_width_to_bits(enum dma_slave_buswidth width) { if (width == DMA_SLAVE_BUSWIDTH_1_BYTE) return STEDMA40_ESIZE_8_BIT; -- cgit v0.10.2 From 7978a583b1d4413f4e58b145081a4a93d94398f7 Mon Sep 17 00:00:00 2001 From: Ben Dooks Date: Tue, 7 Jun 2016 16:54:48 +0100 Subject: dmaengine: sirf: fix un-exported struct warnings The sirfsoc_dmadata structs are not used outside the driver, so remove build warnings by making them static. Fixes: drivers/dma/sirf-dma.c:1129:24: warning: symbol 'sirfsoc_dmadata_a6' was not declared. Should it be static? drivers/dma/sirf-dma.c:1134:24: warning: symbol 'sirfsoc_dmadata_a7v1' was not declared. Should it be static? drivers/dma/sirf-dma.c:1139:24: warning: symbol 'sirfsoc_dmadata_a7v2' was not declared. Should it be static? Signed-off-by: Ben Dooks Signed-off-by: Vinod Koul diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c index e48350e..dde392d 100644 --- a/drivers/dma/sirf-dma.c +++ b/drivers/dma/sirf-dma.c @@ -1126,17 +1126,17 @@ static const struct dev_pm_ops sirfsoc_dma_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(sirfsoc_dma_pm_suspend, sirfsoc_dma_pm_resume) }; -struct sirfsoc_dmadata sirfsoc_dmadata_a6 = { +static struct sirfsoc_dmadata sirfsoc_dmadata_a6 = { .exec = sirfsoc_dma_execute_hw_a6, .type = SIRFSOC_DMA_VER_A6, }; -struct sirfsoc_dmadata sirfsoc_dmadata_a7v1 = { +static struct sirfsoc_dmadata sirfsoc_dmadata_a7v1 = { .exec = sirfsoc_dma_execute_hw_a7v1, .type = SIRFSOC_DMA_VER_A7V1, }; -struct sirfsoc_dmadata sirfsoc_dmadata_a7v2 = { +static struct sirfsoc_dmadata sirfsoc_dmadata_a7v2 = { .exec = sirfsoc_dma_execute_hw_a7v2, .type = SIRFSOC_DMA_VER_A7V2, }; -- cgit v0.10.2 From 192dc8c07594c43b6d58242b6bb2db742e3421c0 Mon Sep 17 00:00:00 2001 From: Ben Dooks Date: Tue, 7 Jun 2016 17:09:15 +0100 Subject: dmaengine: at_xdmac: fix un-exported functions The at_xdmac_init_used_desc() and at_xdmac_prep_dma_memset() functions are not exported outside the driver, so make them static to avoid the following warnings: drivers/dma/at_xdmac.c:459:6: warning: symbol 'at_xdmac_init_used_desc' was not declared. Should it be static? drivers/dma/at_xdmac.c:1205:32: warning: symbol 'at_xdmac_prep_dma_memset' was not declared. Should it be static? Signed-off-by: Ben Dooks Signed-off-by: Vinod Koul diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index 8e304b1..2503b40 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -456,7 +456,7 @@ static struct at_xdmac_desc *at_xdmac_alloc_desc(struct dma_chan *chan, return desc; } -void at_xdmac_init_used_desc(struct at_xdmac_desc *desc) +static void at_xdmac_init_used_desc(struct at_xdmac_desc *desc) { memset(&desc->lld, 0, sizeof(desc->lld)); INIT_LIST_HEAD(&desc->descs_list); @@ -1202,7 +1202,7 @@ static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan, return desc; } -struct dma_async_tx_descriptor * +static struct dma_async_tx_descriptor * at_xdmac_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value, size_t len, unsigned long flags) { -- cgit v0.10.2 From 636372281f514af1910f5df5e83bc72b56b4bd15 Mon Sep 17 00:00:00 2001 From: Ben Dooks Date: Tue, 7 Jun 2016 17:14:56 +0100 Subject: dmaengine: bcm2835: fix unexported function The bcm2835_dma_prep_dma_memcpy() function is not exported outside the driver, so make it static to avoid the following warning: drivers/dma/bcm2835-dma.c:616:32: warning: symbol 'bcm2835_dma_prep_dma_memcpy' was not declared. Should it be static? Signed-off-by: Ben Dooks Reviewed-by: Eric Anholt Signed-off-by: Vinod Koul diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c index 6149b27..9ecb942 100644 --- a/drivers/dma/bcm2835-dma.c +++ b/drivers/dma/bcm2835-dma.c @@ -613,7 +613,7 @@ static void bcm2835_dma_issue_pending(struct dma_chan *chan) spin_unlock_irqrestore(&c->vc.lock, flags); } -struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_memcpy( +static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_memcpy( struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, size_t len, unsigned long flags) { -- cgit v0.10.2 From e167a0b6dc49cb3694fea888560aa462532951dd Mon Sep 17 00:00:00 2001 From: Kedareswara rao Appana Date: Thu, 9 Jun 2016 11:32:12 +0530 Subject: dmaengine: vdma: Fix compilation warning in cyclic dma mode This patch fixes the below compilation warining. drivers/dma/xilinx/xilinx_vdma.c: In function 'xilinx_dma_prep_dma_cyclic': drivers/dma/xilinx/xilinx_vdma.c:1808:23: warning: 'segment' may be used uninitialized in this function [-Wmaybe-uninitialized] segment->hw.control |= XILINX_DMA_BD_SOP; The start of packet (SOP) should be set to the first segment in the desc chain not for the last segment of the desc chain. Signed-off-by: Kedareswara rao Appana Reported-by: Stephen Rothwell Signed-off-by: Vinod Koul diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c index 0f5b38a..0af0cf4 100644 --- a/drivers/dma/xilinx/xilinx_vdma.c +++ b/drivers/dma/xilinx/xilinx_vdma.c @@ -1805,7 +1805,7 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic( /* For the last DMA_MEM_TO_DEV transfer, set EOP */ if (direction == DMA_MEM_TO_DEV) { - segment->hw.control |= XILINX_DMA_BD_SOP; + head_segment->hw.control |= XILINX_DMA_BD_SOP; segment = list_last_entry(&desc->segments, struct xilinx_axidma_tx_segment, node); -- cgit v0.10.2 From f67c3bdabbab47555232f1b883d4758792dd2bc4 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 13 Jun 2016 17:07:33 +0200 Subject: dmaengine: xilinx-vdma: add some sanity checks The newly added xilinx_dma_prep_dma_cyclic function sometimes causes a gcc warning about the use of the segment function in case we never run into the inner loop of the function: dma/xilinx/xilinx_vdma.c: In function 'xilinx_dma_prep_dma_cyclic': dma/xilinx/xilinx_vdma.c:1808:23: error: 'segment' may be used uninitialized in this function [-Werror=maybe-uninitialized] segment->hw.control |= XILINX_DMA_BD_SOP; This can only happen if the period len is zero (which would cause other problems earlier), or if the buffer is shorter than a period. Neither of them should ever happen, but by adding an explicit check for these two cases, we can abort in a more controlled way, and the compiler is able to see that we never use uninitialized data. Signed-off-by: Arnd Bergmann Signed-off-by: Vinod Koul diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c index 0af0cf4..914268b 100644 --- a/drivers/dma/xilinx/xilinx_vdma.c +++ b/drivers/dma/xilinx/xilinx_vdma.c @@ -1745,8 +1745,14 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic( int i; u32 reg; + if (!period_len) + return NULL; + num_periods = buf_len / period_len; + if (!num_periods) + return NULL; + if (!is_slave_direction(direction)) return NULL; -- cgit v0.10.2 From 7b0e00d912f24d08d3ffee377cb478529194f75f Mon Sep 17 00:00:00 2001 From: Thierry Reding Date: Tue, 14 Jun 2016 16:18:46 +0200 Subject: dmaengine: tegra: Remove some whitespace funkiness There are some places where whitespace is used in very funky ways. Fix the most serious ones to make the code easier on the eye. Signed-off-by: Thierry Reding Acked-by: Jon Hunter Signed-off-by: Vinod Koul diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index 01e316f..80c490f 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c @@ -484,7 +484,7 @@ static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc, * load new configuration. */ tegra_dma_pause(tdc, false); - status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); + status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); /* * If interrupt is pending then do nothing as the ISR will handle @@ -905,7 +905,6 @@ static int get_transfer_param(struct tegra_dma_channel *tdc, unsigned long *apb_seq, unsigned long *csr, unsigned int *burst_size, enum dma_slave_buswidth *slave_bw) { - switch (direction) { case DMA_MEM_TO_DEV: *apb_addr = tdc->dma_sconfig.dst_addr; @@ -948,8 +947,8 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg( { struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); struct tegra_dma_desc *dma_desc; - unsigned int i; - struct scatterlist *sg; + unsigned int i; + struct scatterlist *sg; unsigned long csr, ahb_seq, apb_ptr, apb_seq; struct list_head req_list; struct tegra_dma_sg_req *sg_req = NULL; @@ -1062,7 +1061,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic( { struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); struct tegra_dma_desc *dma_desc = NULL; - struct tegra_dma_sg_req *sg_req = NULL; + struct tegra_dma_sg_req *sg_req = NULL; unsigned long csr, ahb_seq, apb_ptr, apb_seq; int len; size_t remain_len; @@ -1204,7 +1203,6 @@ static void tegra_dma_free_chan_resources(struct dma_chan *dc) { struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); struct tegra_dma *tdma = tdc->tdma; - struct tegra_dma_desc *dma_desc; struct tegra_dma_sg_req *sg_req; struct list_head dma_desc_list; @@ -1305,7 +1303,7 @@ static const struct tegra_dma_chip_data tegra148_dma_chip_data = { static int tegra_dma_probe(struct platform_device *pdev) { - struct resource *res; + struct resource *res; struct tegra_dma *tdma; int ret; int i; -- cgit v0.10.2 From f0cba685caa30927e3e5f35074c8f6a62aa628ac Mon Sep 17 00:00:00 2001 From: Kedareswara rao Appana Date: Tue, 7 Jun 2016 19:21:15 +0530 Subject: dmaengine: vdma: Add 64 bit addressing support for the axi dma The AXI DMA is a soft ip, which can be programmed to support 32 bit addressing or greater than 32 bit addressing. When the AXI DMA ip is configured for 32 bit address space in simple dma mode the buffer address is specified by a single register (18h for MM2S channel and 48h for S2MM channel). When configured in SG mode The current descriptor and tail descriptor are specified by a single Register(08h for curdesc 10h for tail desc for MM2S channel and 38h for Curdesc and 40h for tail desc for S2MM). When the AXI DMA core is configured for an address space greater than 32 then each buffer address or descriptor address is specified by a combination of two registers. The first register specifies the LSB 32 bits of address, while the next register specifies the MSB 32 bits of address. For example, 48h will specify the LSB 32 bits while 4Ch will specify the MSB 32 bits of the first start address. So we need to program two registers at a time. This patch adds the 64 bit addressing support for the axidma IP in the driver. Signed-off-by: Kedareswara rao Appana Signed-off-by: Vinod Koul diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c index 914268b..40509a4 100644 --- a/drivers/dma/xilinx/xilinx_vdma.c +++ b/drivers/dma/xilinx/xilinx_vdma.c @@ -45,6 +45,7 @@ #include #include #include +#include #include "../dmaengine.h" @@ -195,22 +196,22 @@ struct xilinx_vdma_desc_hw { /** * struct xilinx_axidma_desc_hw - Hardware Descriptor for AXI DMA * @next_desc: Next Descriptor Pointer @0x00 - * @pad1: Reserved @0x04 + * @next_desc_msb: MSB of Next Descriptor Pointer @0x04 * @buf_addr: Buffer address @0x08 - * @pad2: Reserved @0x0C - * @pad3: Reserved @0x10 - * @pad4: Reserved @0x14 + * @buf_addr_msb: MSB of Buffer address @0x0C + * @pad1: Reserved @0x10 + * @pad2: Reserved @0x14 * @control: Control field @0x18 * @status: Status field @0x1C * @app: APP Fields @0x20 - 0x30 */ struct xilinx_axidma_desc_hw { u32 next_desc; - u32 pad1; + u32 next_desc_msb; u32 buf_addr; + u32 buf_addr_msb; + u32 pad1; u32 pad2; - u32 pad3; - u32 pad4; u32 control; u32 status; u32 app[XILINX_DMA_NUM_APP_WORDS]; @@ -461,6 +462,34 @@ static inline void vdma_desc_write_64(struct xilinx_dma_chan *chan, u32 reg, writel(value_msb, chan->xdev->regs + chan->desc_offset + reg + 4); } +static inline void dma_writeq(struct xilinx_dma_chan *chan, u32 reg, u64 value) +{ + lo_hi_writeq(value, chan->xdev->regs + chan->ctrl_offset + reg); +} + +static inline void xilinx_write(struct xilinx_dma_chan *chan, u32 reg, + dma_addr_t addr) +{ + if (chan->ext_addr) + dma_writeq(chan, reg, addr); + else + dma_ctrl_write(chan, reg, addr); +} + +static inline void xilinx_axidma_buf(struct xilinx_dma_chan *chan, + struct xilinx_axidma_desc_hw *hw, + dma_addr_t buf_addr, size_t sg_used, + size_t period_len) +{ + if (chan->ext_addr) { + hw->buf_addr = lower_32_bits(buf_addr + sg_used + period_len); + hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used + + period_len); + } else { + hw->buf_addr = buf_addr + sg_used + period_len; + } +} + /* ----------------------------------------------------------------------------- * Descriptors and segments alloc and free */ @@ -1190,8 +1219,8 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan) } if (chan->has_sg) - dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, - head_desc->async_tx.phys); + xilinx_write(chan, XILINX_DMA_REG_CURDESC, + head_desc->async_tx.phys); xilinx_dma_start(chan); @@ -1201,11 +1230,11 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan) /* Start the transfer */ if (chan->has_sg) { if (chan->cyclic) - dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, - chan->cyclic_seg_v->phys); + xilinx_write(chan, XILINX_DMA_REG_TAILDESC, + chan->cyclic_seg_v->phys); else - dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, - tail_segment->phys); + xilinx_write(chan, XILINX_DMA_REG_TAILDESC, + tail_segment->phys); } else { struct xilinx_axidma_tx_segment *segment; struct xilinx_axidma_desc_hw *hw; @@ -1215,7 +1244,7 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan) node); hw = &segment->hw; - dma_ctrl_write(chan, XILINX_DMA_REG_SRCDSTADDR, hw->buf_addr); + xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR, hw->buf_addr); /* Start the transfer */ dma_ctrl_write(chan, XILINX_DMA_REG_BTT, @@ -1679,7 +1708,8 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg( hw = &segment->hw; /* Fill in the descriptor */ - hw->buf_addr = sg_dma_address(sg) + sg_used; + xilinx_axidma_buf(chan, hw, sg_dma_address(sg), + sg_used, 0); hw->control = copy; @@ -1783,7 +1813,8 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic( copy = min_t(size_t, period_len - sg_used, XILINX_DMA_MAX_TRANS_LEN); hw = &segment->hw; - hw->buf_addr = buf_addr + sg_used + (period_len * i); + xilinx_axidma_buf(chan, hw, buf_addr, sg_used, + period_len * i); hw->control = copy; if (prev) -- cgit v0.10.2 From 9791e71a0632d9ebf3995d202ad2707c7460507f Mon Sep 17 00:00:00 2001 From: Kedareswara rao Appana Date: Tue, 7 Jun 2016 19:21:16 +0530 Subject: dmaengine: vdma: Add 64 bit addressing support for the axi cdma The AXI CDMA is a soft ip, which can be programmed to support 32 bit addressing or greater than 32 bit addressing. When the AXI CDMA ip is configured for 32 bit address space in simple dma mode the source/destination buffer address is specified by a single register(18h for Source buffer address and 20h for Destination buffer address). When configured in SG mode the current descriptor and tail descriptor are specified by a Single register(08h for curdesc 10h for tail desc). When the AXI CDMA core is configured for an address space greater than 32 then each buffer address or descriptor address is specified by a combination of two registers. The first register specifies the LSB 32 bits of address, while the next register specifies the MSB 32 bits of address. For example, 08h will specify the LSB 32 bits while 0Ch will specify the MSB 32 bits of the first start address. So we need to program two registers at a time. This patch adds the 64 bit addressing support to the axicdma IP in the driver. Signed-off-by: Kedareswara rao Appana Signed-off-by: Vinod Koul diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c index 40509a4..40f754b 100644 --- a/drivers/dma/xilinx/xilinx_vdma.c +++ b/drivers/dma/xilinx/xilinx_vdma.c @@ -220,21 +220,21 @@ struct xilinx_axidma_desc_hw { /** * struct xilinx_cdma_desc_hw - Hardware Descriptor * @next_desc: Next Descriptor Pointer @0x00 - * @pad1: Reserved @0x04 + * @next_descmsb: Next Descriptor Pointer MSB @0x04 * @src_addr: Source address @0x08 - * @pad2: Reserved @0x0C + * @src_addrmsb: Source address MSB @0x0C * @dest_addr: Destination address @0x10 - * @pad3: Reserved @0x14 + * @dest_addrmsb: Destination address MSB @0x14 * @control: Control field @0x18 * @status: Status field @0x1C */ struct xilinx_cdma_desc_hw { u32 next_desc; - u32 pad1; + u32 next_desc_msb; u32 src_addr; - u32 pad2; + u32 src_addr_msb; u32 dest_addr; - u32 pad3; + u32 dest_addr_msb; u32 control; u32 status; } __aligned(64); @@ -1137,12 +1137,12 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan) } if (chan->has_sg) { - dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, - head_desc->async_tx.phys); + xilinx_write(chan, XILINX_DMA_REG_CURDESC, + head_desc->async_tx.phys); /* Update tail ptr register which will start the transfer */ - dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, - tail_segment->phys); + xilinx_write(chan, XILINX_DMA_REG_TAILDESC, + tail_segment->phys); } else { /* In simple mode */ struct xilinx_cdma_tx_segment *segment; @@ -1154,8 +1154,8 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan) hw = &segment->hw; - dma_ctrl_write(chan, XILINX_CDMA_REG_SRCADDR, hw->src_addr); - dma_ctrl_write(chan, XILINX_CDMA_REG_DSTADDR, hw->dest_addr); + xilinx_write(chan, XILINX_CDMA_REG_SRCADDR, hw->src_addr); + xilinx_write(chan, XILINX_CDMA_REG_DSTADDR, hw->dest_addr); /* Start the transfer */ dma_ctrl_write(chan, XILINX_DMA_REG_BTT, @@ -1626,6 +1626,10 @@ xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst, hw->control = len; hw->src_addr = dma_src; hw->dest_addr = dma_dst; + if (chan->ext_addr) { + hw->src_addr_msb = upper_32_bits(dma_src); + hw->dest_addr_msb = upper_32_bits(dma_dst); + } /* Fill the previous next descriptor with current */ prev = list_last_entry(&desc->segments, -- cgit v0.10.2 From 5e2fe1e7c2780651be85cbab0d065112f5fb4649 Mon Sep 17 00:00:00 2001 From: Peter Griffin Date: Tue, 7 Jun 2016 18:38:34 +0100 Subject: dmaengine: fsl-edma: Fix clock handling error paths Currently fsl-edma doesn't clk_disable_unprepare() its clocks on error conditions. This patch adds a fsl_disable_clocks helper for this, and also only disables clocks which were enabled if encountering an error whilst enabling clocks. Signed-off-by: Peter Griffin Signed-off-by: Vinod Koul diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c index be2e62b..7208fc9 100644 --- a/drivers/dma/fsl-edma.c +++ b/drivers/dma/fsl-edma.c @@ -852,6 +852,14 @@ fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma return 0; } +static void fsl_disable_clocks(struct fsl_edma_engine *fsl_edma) +{ + int i; + + for (i = 0; i < DMAMUX_NR; i++) + clk_disable_unprepare(fsl_edma->muxclk[i]); +} + static int fsl_edma_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; @@ -897,6 +905,10 @@ static int fsl_edma_probe(struct platform_device *pdev) ret = clk_prepare_enable(fsl_edma->muxclk[i]); if (ret) { + /* disable only clks which were enabled on error */ + for (; i >= 0; i--) + clk_disable_unprepare(fsl_edma->muxclk[i]); + dev_err(&pdev->dev, "DMAMUX clk block failed.\n"); return ret; } @@ -952,6 +964,7 @@ static int fsl_edma_probe(struct platform_device *pdev) ret = dma_async_device_register(&fsl_edma->dma_dev); if (ret) { dev_err(&pdev->dev, "Can't register Freescale eDMA engine.\n"); + fsl_disable_clocks(fsl_edma); return ret; } @@ -959,6 +972,7 @@ static int fsl_edma_probe(struct platform_device *pdev) if (ret) { dev_err(&pdev->dev, "Can't register Freescale eDMA of_dma.\n"); dma_async_device_unregister(&fsl_edma->dma_dev); + fsl_disable_clocks(fsl_edma); return ret; } @@ -972,13 +986,10 @@ static int fsl_edma_remove(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct fsl_edma_engine *fsl_edma = platform_get_drvdata(pdev); - int i; of_dma_controller_free(np); dma_async_device_unregister(&fsl_edma->dma_dev); - - for (i = 0; i < DMAMUX_NR; i++) - clk_disable_unprepare(fsl_edma->muxclk[i]); + fsl_disable_clocks(fsl_edma); return 0; } -- cgit v0.10.2 From a86144da9d1a439733a5aea526fe7c7b28b31d4a Mon Sep 17 00:00:00 2001 From: Peter Griffin Date: Tue, 7 Jun 2016 18:38:35 +0100 Subject: dmaengine: fsl-edma: print error code in error messages. It is useful to print the error code as part of the error message. Signed-off-by: Peter Griffin Signed-off-by: Vinod Koul diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c index 7208fc9..cc06eea 100644 --- a/drivers/dma/fsl-edma.c +++ b/drivers/dma/fsl-edma.c @@ -963,14 +963,16 @@ static int fsl_edma_probe(struct platform_device *pdev) ret = dma_async_device_register(&fsl_edma->dma_dev); if (ret) { - dev_err(&pdev->dev, "Can't register Freescale eDMA engine.\n"); + dev_err(&pdev->dev, + "Can't register Freescale eDMA engine. (%d)\n", ret); fsl_disable_clocks(fsl_edma); return ret; } ret = of_dma_controller_register(np, fsl_edma_xlate, fsl_edma); if (ret) { - dev_err(&pdev->dev, "Can't register Freescale eDMA of_dma.\n"); + dev_err(&pdev->dev, + "Can't register Freescale eDMA of_dma. (%d)\n", ret); dma_async_device_unregister(&fsl_edma->dma_dev); fsl_disable_clocks(fsl_edma); return ret; -- cgit v0.10.2 From 95b0aa3e10b08b9ee20b5f5e19d162d049744a9b Mon Sep 17 00:00:00 2001 From: Peter Griffin Date: Tue, 7 Jun 2016 18:38:36 +0100 Subject: dmaengine: coh901318: Only calculate residue if txstate exists. There is no point in calculating the residue if there is no txstate to store the value. Signed-off-by: Peter Griffin Acked-by: Linus Walleij Signed-off-by: Vinod Koul diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index c340ca9..c100616 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c @@ -2422,7 +2422,7 @@ coh901318_tx_status(struct dma_chan *chan, dma_cookie_t cookie, enum dma_status ret; ret = dma_cookie_status(chan, cookie, txstate); - if (ret == DMA_COMPLETE) + if (ret == DMA_COMPLETE || !txstate) return ret; dma_set_residue(txstate, coh901318_get_bytes_left(chan)); -- cgit v0.10.2 From e841b80f450ba788c4855af5cc4c535ed9ab7e47 Mon Sep 17 00:00:00 2001 From: Peter Griffin Date: Tue, 7 Jun 2016 18:38:37 +0100 Subject: dmaengine: s3c24xx: Simplify code in s3c24xx_dma_tx_status() Doing so saves a few lines of code in the driver. Signed-off-by: Peter Griffin Signed-off-by: Vinod Koul diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c index 17ccdfd..f7d2c7a 100644 --- a/drivers/dma/s3c24xx-dma.c +++ b/drivers/dma/s3c24xx-dma.c @@ -768,16 +768,12 @@ static enum dma_status s3c24xx_dma_tx_status(struct dma_chan *chan, spin_lock_irqsave(&s3cchan->vc.lock, flags); ret = dma_cookie_status(chan, cookie, txstate); - if (ret == DMA_COMPLETE) { - spin_unlock_irqrestore(&s3cchan->vc.lock, flags); - return ret; - } /* * There's no point calculating the residue if there's * no txstate to store the value. */ - if (!txstate) { + if (ret == DMA_COMPLETE || !txstate) { spin_unlock_irqrestore(&s3cchan->vc.lock, flags); return ret; } -- cgit v0.10.2 From a90e56e5b1779836132645e3352c7d5b60cddca8 Mon Sep 17 00:00:00 2001 From: Peter Griffin Date: Tue, 7 Jun 2016 18:38:38 +0100 Subject: dmaengine: ste_dma40: Only calculate residue if txstate exists. There is no point calculating the residue if there is no txstate to store the value. Signed-off-by: Peter Griffin Acked-by: Linus Walleij Signed-off-by: Vinod Koul diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 6fb8307..378cc47 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -2588,7 +2588,7 @@ static enum dma_status d40_tx_status(struct dma_chan *chan, } ret = dma_cookie_status(chan, cookie, txstate); - if (ret != DMA_COMPLETE) + if (ret != DMA_COMPLETE && txstate) dma_set_residue(txstate, stedma40_residue(chan)); if (d40_is_paused(d40c)) -- cgit v0.10.2 From b9ab9d10d9898cd022fece44a474e5ddb785d639 Mon Sep 17 00:00:00 2001 From: Peter Griffin Date: Tue, 7 Jun 2016 18:38:39 +0100 Subject: dmaengine: sun6i-dma: Only calculate residue if state exists. There is no point in calculating the residue if state does not exist to store the value. Signed-off-by: Peter Griffin Signed-off-by: Vinod Koul diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c index 5065ca4..3835fcd 100644 --- a/drivers/dma/sun6i-dma.c +++ b/drivers/dma/sun6i-dma.c @@ -865,7 +865,7 @@ static enum dma_status sun6i_dma_tx_status(struct dma_chan *chan, size_t bytes = 0; ret = dma_cookie_status(chan, cookie, state); - if (ret == DMA_COMPLETE) + if (ret == DMA_COMPLETE || !state) return ret; spin_lock_irqsave(&vchan->vc.lock, flags); -- cgit v0.10.2 From 71f7e6cc55003ca6f616ff4094253d28de5d9254 Mon Sep 17 00:00:00 2001 From: Peter Griffin Date: Tue, 7 Jun 2016 18:38:40 +0100 Subject: dmaengine: tegra20-apb-dma: Only calculate residue if txstate exists. There is no point calculating the residue if there is no txstate to store the value. Signed-off-by: Peter Griffin Signed-off-by: Vinod Koul diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index 01e316f..7f4af8c 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c @@ -814,7 +814,7 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc, unsigned int residual; ret = dma_cookie_status(dc, cookie, txstate); - if (ret == DMA_COMPLETE) + if (ret == DMA_COMPLETE || !txstate) return ret; spin_lock_irqsave(&tdc->lock, flags); -- cgit v0.10.2 From aef94fea97eb77f86159375825a370b45d9f2fec Mon Sep 17 00:00:00 2001 From: Peter Griffin Date: Tue, 7 Jun 2016 18:38:41 +0100 Subject: dmaengine: Remove site specific OOM error messages on kzalloc If kzalloc() fails it will issue it's own error message including a dump_stack(). So remove the site specific error messages. Signed-off-by: Peter Griffin Acked-by: Jon Hunter Acked-by: Linus Walleij Signed-off-by: Vinod Koul diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 81db1c4..939a7c3 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -1443,8 +1443,6 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT); if (!dsg) { pl08x_free_txd(pl08x, txd); - dev_err(&pl08x->adev->dev, "%s no memory for pl080 sg\n", - __func__); return NULL; } list_add_tail(&dsg->node, &txd->dsg_list); @@ -1901,11 +1899,8 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, */ for (i = 0; i < channels; i++) { chan = kzalloc(sizeof(*chan), GFP_KERNEL); - if (!chan) { - dev_err(&pl08x->adev->dev, - "%s no memory for channel\n", __func__); + if (!chan) return -ENOMEM; - } chan->host = pl08x; chan->state = PL08X_CHAN_IDLE; @@ -2360,9 +2355,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) pl08x->phy_chans = kzalloc((vd->channels * sizeof(*pl08x->phy_chans)), GFP_KERNEL); if (!pl08x->phy_chans) { - dev_err(&adev->dev, "%s failed to allocate " - "physical channel holders\n", - __func__); ret = -ENOMEM; goto out_no_phychans; } diff --git a/drivers/dma/bestcomm/bestcomm.c b/drivers/dma/bestcomm/bestcomm.c index 180fedb..7ce8437 100644 --- a/drivers/dma/bestcomm/bestcomm.c +++ b/drivers/dma/bestcomm/bestcomm.c @@ -397,8 +397,6 @@ static int mpc52xx_bcom_probe(struct platform_device *op) /* Get a clean struct */ bcom_eng = kzalloc(sizeof(struct bcom_engine), GFP_KERNEL); if (!bcom_eng) { - printk(KERN_ERR DRIVER_NAME ": " - "Can't allocate state structure\n"); rv = -ENOMEM; goto error_sramclean; } diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index 8181ed1..3c84cd8 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c @@ -1069,10 +1069,8 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( edesc = kzalloc(sizeof(*edesc) + sg_len * sizeof(edesc->pset[0]), GFP_ATOMIC); - if (!edesc) { - dev_err(dev, "%s: Failed to allocate a descriptor\n", __func__); + if (!edesc) return NULL; - } edesc->pset_nr = sg_len; edesc->residue = 0; @@ -1173,10 +1171,8 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy( edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]), GFP_ATOMIC); - if (!edesc) { - dev_dbg(dev, "Failed to allocate a descriptor\n"); + if (!edesc) return NULL; - } edesc->pset_nr = nslots; edesc->residue = edesc->residue_stat = len; @@ -1298,10 +1294,8 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]), GFP_ATOMIC); - if (!edesc) { - dev_err(dev, "%s: Failed to allocate a descriptor\n", __func__); + if (!edesc) return NULL; - } edesc->cyclic = 1; edesc->pset_nr = nslots; @@ -2207,10 +2201,8 @@ static int edma_probe(struct platform_device *pdev) return ret; ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL); - if (!ecc) { - dev_err(dev, "Can't allocate controller\n"); + if (!ecc) return -ENOMEM; - } ecc->dev = dev; ecc->id = pdev->id; diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index a8828ed..911b717 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -1234,7 +1234,6 @@ static int fsl_dma_chan_probe(struct fsldma_device *fdev, /* alloc channel */ chan = kzalloc(sizeof(*chan), GFP_KERNEL); if (!chan) { - dev_err(fdev->dev, "no free memory for DMA channels!\n"); err = -ENOMEM; goto out_return; } @@ -1340,7 +1339,6 @@ static int fsldma_of_probe(struct platform_device *op) fdev = kzalloc(sizeof(*fdev), GFP_KERNEL); if (!fdev) { - dev_err(&op->dev, "No enough memory for 'priv'\n"); err = -ENOMEM; goto out_return; } diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c index 1ba2fd7..35961af 100644 --- a/drivers/dma/k3dma.c +++ b/drivers/dma/k3dma.c @@ -425,10 +425,9 @@ static struct dma_async_tx_descriptor *k3_dma_prep_memcpy( num = DIV_ROUND_UP(len, DMA_MAX_SIZE); ds = kzalloc(sizeof(*ds) + num * sizeof(ds->desc_hw[0]), GFP_ATOMIC); - if (!ds) { - dev_dbg(chan->device->dev, "vchan %p: kzalloc fail\n", &c->vc); + if (!ds) return NULL; - } + ds->desc_hw_lli = __virt_to_phys((unsigned long)&ds->desc_hw[0]); ds->size = len; ds->desc_num = num; @@ -481,10 +480,9 @@ static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg( } ds = kzalloc(sizeof(*ds) + num * sizeof(ds->desc_hw[0]), GFP_ATOMIC); - if (!ds) { - dev_dbg(chan->device->dev, "vchan %p: kzalloc fail\n", &c->vc); + if (!ds) return NULL; - } + ds->desc_hw_lli = __virt_to_phys((unsigned long)&ds->desc_hw[0]); ds->desc_num = num; num = 0; diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c index 3df0422..ba7f412 100644 --- a/drivers/dma/mmp_tdma.c +++ b/drivers/dma/mmp_tdma.c @@ -551,10 +551,9 @@ static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev, /* alloc channel */ tdmac = devm_kzalloc(tdev->dev, sizeof(*tdmac), GFP_KERNEL); - if (!tdmac) { - dev_err(tdev->dev, "no free memory for DMA channels!\n"); + if (!tdmac) return -ENOMEM; - } + if (irq) tdmac->irq = irq; tdmac->dev = tdev->dev; diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c index 631c443..b3a1d9a 100644 --- a/drivers/dma/moxart-dma.c +++ b/drivers/dma/moxart-dma.c @@ -574,10 +574,8 @@ static int moxart_probe(struct platform_device *pdev) struct moxart_dmadev *mdc; mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL); - if (!mdc) { - dev_err(dev, "can't allocate DMA container\n"); + if (!mdc) return -ENOMEM; - } irq = irq_of_parse_and_map(node, 0); if (irq == NO_IRQ) { diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c index 2b5a198..9f0e98b 100644 --- a/drivers/dma/nbpfaxi.c +++ b/drivers/dma/nbpfaxi.c @@ -1300,10 +1300,9 @@ static int nbpf_probe(struct platform_device *pdev) nbpf = devm_kzalloc(dev, sizeof(*nbpf) + num_channels * sizeof(nbpf->chan[0]), GFP_KERNEL); - if (!nbpf) { - dev_err(dev, "Memory allocation failed\n"); + if (!nbpf) return -ENOMEM; - } + dma_dev = &nbpf->dma_dev; dma_dev->dev = dev; diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 372b435..c8767d3 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -2828,10 +2828,8 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) /* Allocate a new DMAC and its Channels */ pl330 = devm_kzalloc(&adev->dev, sizeof(*pl330), GFP_KERNEL); - if (!pl330) { - dev_err(&adev->dev, "unable to allocate mem\n"); + if (!pl330) return -ENOMEM; - } pd = &pl330->ddma; pd->dev = &adev->dev; @@ -2890,7 +2888,6 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) pl330->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL); if (!pl330->peripherals) { ret = -ENOMEM; - dev_err(&adev->dev, "unable to allocate pl330->peripherals\n"); goto probe_err2; } diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c index 9217f89..da3688b 100644 --- a/drivers/dma/ppc4xx/adma.c +++ b/drivers/dma/ppc4xx/adma.c @@ -4084,7 +4084,6 @@ static int ppc440spe_adma_probe(struct platform_device *ofdev) /* create a device */ adev = kzalloc(sizeof(*adev), GFP_KERNEL); if (!adev) { - dev_err(&ofdev->dev, "failed to allocate device\n"); initcode = PPC_ADMA_INIT_ALLOC; ret = -ENOMEM; goto err_adev_alloc; @@ -4145,7 +4144,6 @@ static int ppc440spe_adma_probe(struct platform_device *ofdev) /* create a channel */ chan = kzalloc(sizeof(*chan), GFP_KERNEL); if (!chan) { - dev_err(&ofdev->dev, "can't allocate channel structure\n"); initcode = PPC_ADMA_INIT_CHANNEL; ret = -ENOMEM; goto err_chan_alloc; diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c index f7d2c7a..0d2d187 100644 --- a/drivers/dma/s3c24xx-dma.c +++ b/drivers/dma/s3c24xx-dma.c @@ -1101,11 +1101,8 @@ static int s3c24xx_dma_init_virtual_channels(struct s3c24xx_dma_engine *s3cdma, */ for (i = 0; i < channels; i++) { chan = devm_kzalloc(dmadev->dev, sizeof(*chan), GFP_KERNEL); - if (!chan) { - dev_err(dmadev->dev, - "%s no memory for channel\n", __func__); + if (!chan) return -ENOMEM; - } chan->id = i; chan->host = s3cdma; diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c index 80d8640..c94ffab 100644 --- a/drivers/dma/sh/shdmac.c +++ b/drivers/dma/sh/shdmac.c @@ -532,11 +532,8 @@ static int sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id, sh_chan = devm_kzalloc(sdev->dma_dev.dev, sizeof(struct sh_dmae_chan), GFP_KERNEL); - if (!sh_chan) { - dev_err(sdev->dma_dev.dev, - "No free memory for allocating dma channels!\n"); + if (!sh_chan) return -ENOMEM; - } schan = &sh_chan->shdma_chan; schan->max_xfer_len = SH_DMA_TCR_MAX + 1; @@ -732,10 +729,8 @@ static int sh_dmae_probe(struct platform_device *pdev) shdev = devm_kzalloc(&pdev->dev, sizeof(struct sh_dmae_device), GFP_KERNEL); - if (!shdev) { - dev_err(&pdev->dev, "Not enough memory\n"); + if (!shdev) return -ENOMEM; - } dma_dev = &shdev->shdma_dev.dma_dev; diff --git a/drivers/dma/sh/sudmac.c b/drivers/dma/sh/sudmac.c index 6da2eaa..69b9564 100644 --- a/drivers/dma/sh/sudmac.c +++ b/drivers/dma/sh/sudmac.c @@ -245,11 +245,8 @@ static int sudmac_chan_probe(struct sudmac_device *su_dev, int id, int irq, int err; sc = devm_kzalloc(&pdev->dev, sizeof(struct sudmac_chan), GFP_KERNEL); - if (!sc) { - dev_err(sdev->dma_dev.dev, - "No free memory for allocating dma channels!\n"); + if (!sc) return -ENOMEM; - } schan = &sc->shdma_chan; schan->max_xfer_len = 64 * 1024 * 1024 - 1; @@ -349,10 +346,8 @@ static int sudmac_probe(struct platform_device *pdev) err = -ENOMEM; su_dev = devm_kzalloc(&pdev->dev, sizeof(struct sudmac_device), GFP_KERNEL); - if (!su_dev) { - dev_err(&pdev->dev, "Not enough memory\n"); + if (!su_dev) return err; - } dma_dev = &su_dev->shdma_dev.dma_dev; diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c index e48350e..8ea51c7 100644 --- a/drivers/dma/sirf-dma.c +++ b/drivers/dma/sirf-dma.c @@ -854,10 +854,9 @@ static int sirfsoc_dma_probe(struct platform_device *op) int ret, i; sdma = devm_kzalloc(dev, sizeof(*sdma), GFP_KERNEL); - if (!sdma) { - dev_err(dev, "Memory exhausted!\n"); + if (!sdma) return -ENOMEM; - } + data = (struct sirfsoc_dmadata *) (of_match_device(op->dev.driver->of_match_table, &op->dev)->data); diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 378cc47..8b18e44 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -3237,10 +3237,8 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) (num_phy_chans + num_log_chans + num_memcpy_chans) * sizeof(struct d40_chan), GFP_KERNEL); - if (base == NULL) { - d40_err(&pdev->dev, "Out of memory\n"); + if (base == NULL) goto failure; - } base->rev = rev; base->clk = clk; diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index 7f4af8c..032884f 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c @@ -300,10 +300,8 @@ static struct tegra_dma_desc *tegra_dma_desc_get( /* Allocate DMA desc */ dma_desc = kzalloc(sizeof(*dma_desc), GFP_NOWAIT); - if (!dma_desc) { - dev_err(tdc2dev(tdc), "dma_desc alloc failed\n"); + if (!dma_desc) return NULL; - } dma_async_tx_descriptor_init(&dma_desc->txd, &tdc->dma_chan); dma_desc->txd.tx_submit = tegra_dma_tx_submit; @@ -340,8 +338,7 @@ static struct tegra_dma_sg_req *tegra_dma_sg_req_get( spin_unlock_irqrestore(&tdc->lock, flags); sg_req = kzalloc(sizeof(struct tegra_dma_sg_req), GFP_NOWAIT); - if (!sg_req) - dev_err(tdc2dev(tdc), "sg_req alloc failed\n"); + return sg_req; } @@ -1319,10 +1316,8 @@ static int tegra_dma_probe(struct platform_device *pdev) tdma = devm_kzalloc(&pdev->dev, sizeof(*tdma) + cdata->nr_channels * sizeof(struct tegra_dma_channel), GFP_KERNEL); - if (!tdma) { - dev_err(&pdev->dev, "Error: memory allocation failed\n"); + if (!tdma) return -ENOMEM; - } tdma->dev = &pdev->dev; tdma->chip_data = cdata; diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c index 559cd40..e82745a 100644 --- a/drivers/dma/timb_dma.c +++ b/drivers/dma/timb_dma.c @@ -337,18 +337,14 @@ static struct timb_dma_desc *td_alloc_init_desc(struct timb_dma_chan *td_chan) int err; td_desc = kzalloc(sizeof(struct timb_dma_desc), GFP_KERNEL); - if (!td_desc) { - dev_err(chan2dev(chan), "Failed to alloc descriptor\n"); + if (!td_desc) goto out; - } td_desc->desc_list_len = td_chan->desc_elems * TIMB_DMA_DESC_SIZE; td_desc->desc_list = kzalloc(td_desc->desc_list_len, GFP_KERNEL); - if (!td_desc->desc_list) { - dev_err(chan2dev(chan), "Failed to alloc descriptor\n"); + if (!td_desc->desc_list) goto err; - } dma_async_tx_descriptor_init(&td_desc->txd, chan); td_desc->txd.tx_submit = td_tx_submit; -- cgit v0.10.2 From d646162b8b74b10aeb2451762065904201859bd2 Mon Sep 17 00:00:00 2001 From: Ben Dooks Date: Tue, 21 Jun 2016 18:12:39 +0100 Subject: dmaengine: ti-dma-crossbar: make omap_dmaxbar_init static The omap_dmaxbar_init() function is not exported or declared outside the driver, so make it static to fix the following sparse warning: drivers/dma/ti-dma-crossbar.c:455:5: warning: symbol 'omap_dmaxbar_init' was not declared. Should it be static? Signed-off-by: Ben Dooks Signed-off-by: Vinod Koul diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c index e107779..5ae294b 100644 --- a/drivers/dma/ti-dma-crossbar.c +++ b/drivers/dma/ti-dma-crossbar.c @@ -452,7 +452,7 @@ static struct platform_driver ti_dma_xbar_driver = { .probe = ti_dma_xbar_probe, }; -int omap_dmaxbar_init(void) +static int omap_dmaxbar_init(void) { return platform_driver_register(&ti_dma_xbar_driver); } -- cgit v0.10.2 From 3935e08768ff777da6496521b1fc36f72823672c Mon Sep 17 00:00:00 2001 From: Alexandre Belloni Date: Wed, 29 Jun 2016 19:44:51 +0200 Subject: dmaengine: at_xdmac: fix debug string mbr_ds is an integer, don't use %pad to print it. Fixes: commit 268914f4e7a0 ("dmaengine: at_xdmac: use %pad format string for dma_addr_t") Reported-by: Dan Carpenter Signed-off-by: Alexandre Belloni Signed-off-by: Vinod Koul diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index 8e304b1..e4c5522 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -1195,8 +1195,8 @@ static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan, desc->lld.mbr_cfg = chan_cc; dev_dbg(chan2dev(chan), - "%s: lld: mbr_da=%pad, mbr_ds=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n", - __func__, &desc->lld.mbr_da, &desc->lld.mbr_ds, desc->lld.mbr_ubc, + "%s: lld: mbr_da=%pad, mbr_ds=0x%08x, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n", + __func__, &desc->lld.mbr_da, desc->lld.mbr_ds, desc->lld.mbr_ubc, desc->lld.mbr_cfg); return desc; -- cgit v0.10.2 From 019bfcc65baa40a36ff0d1af611ecdc27199dad1 Mon Sep 17 00:00:00 2001 From: Jon Hunter Date: Wed, 29 Jun 2016 17:08:37 +0100 Subject: dmaengine: tegra-apb: Correct grammar in TX status debug message Correct the grammar in the debug message when no descriptor is found. Signed-off-by: Jon Hunter Signed-off-by: Vinod Koul diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index 032884f..bc91fea 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c @@ -843,7 +843,7 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc, } } - dev_dbg(tdc2dev(tdc), "cookie %d does not found\n", cookie); + dev_dbg(tdc2dev(tdc), "cookie %d not found\n", cookie); spin_unlock_irqrestore(&tdc->lock, flags); return ret; } -- cgit v0.10.2 From 004f614eded1b5746ef610bbe17eba11e4fb5733 Mon Sep 17 00:00:00 2001 From: Jon Hunter Date: Wed, 29 Jun 2016 17:08:38 +0100 Subject: dmaengine: tegra-apb: Remove duplicated residue calculation The calculation of the DMA residue for the Tegra APB DMA is duplicated in two places in the tegra_dma_tx_status() function. Remove this duplicated code by moving calculation to the end of the function and only calculating if we found a valid descriptor. Signed-off-by: Jon Hunter Signed-off-by: Vinod Koul diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index bc91fea..a8de08b 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c @@ -819,13 +819,8 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc, /* Check on wait_ack desc status */ list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) { if (dma_desc->txd.cookie == cookie) { - residual = dma_desc->bytes_requested - - (dma_desc->bytes_transferred % - dma_desc->bytes_requested); - dma_set_residue(txstate, residual); ret = dma_desc->dma_status; - spin_unlock_irqrestore(&tdc->lock, flags); - return ret; + goto found; } } @@ -833,17 +828,22 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc, list_for_each_entry(sg_req, &tdc->pending_sg_req, node) { dma_desc = sg_req->dma_desc; if (dma_desc->txd.cookie == cookie) { - residual = dma_desc->bytes_requested - - (dma_desc->bytes_transferred % - dma_desc->bytes_requested); - dma_set_residue(txstate, residual); ret = dma_desc->dma_status; - spin_unlock_irqrestore(&tdc->lock, flags); - return ret; + goto found; } } dev_dbg(tdc2dev(tdc), "cookie %d not found\n", cookie); + dma_desc = NULL; + +found: + if (dma_desc) { + residual = dma_desc->bytes_requested - + (dma_desc->bytes_transferred % + dma_desc->bytes_requested); + dma_set_residue(txstate, residual); + } + spin_unlock_irqrestore(&tdc->lock, flags); return ret; } -- cgit v0.10.2 From d3183447ef561efe7e734cb4bc331519c276a786 Mon Sep 17 00:00:00 2001 From: Jon Hunter Date: Wed, 29 Jun 2016 17:08:39 +0100 Subject: dmaengine: tegra-apb: Return the actual descriptor status Commit 71f7e6cc5500 ('dmaengine: tegra20-apb-dma: Only calculate residue if txstate exists') changed the tegra_dma_tx_status() function to only calculate the residue if there is a valid 'txstate' pointer for storing the residue. Although this makes sense, this changed the behaviour of the function tegra_dma_tx_status() such that if the pointer 'txstate' is not valid, then we will return whatever state is returned by dma_cookie_status() and no longer return the state by looking up the DMA descriptor and returning it's state. Please note that dma_cookie_status() will either return DMA_COMPLETE or DMA_IN_PROGRESS. However, if dma_cookie_status() returns DMA_IN_PROGRESS the actual status could be DMA_ERROR which will only be seen from checking the descriptor status. Therefore, even if 'txstate' is not valid, still check to see if there is a valid descriptor for the cookie in question and if so return the descriptor state. Finally, ensure the residue is still not calculated if the 'txstate' is not valid. Signed-off-by: Jon Hunter Signed-off-by: Vinod Koul diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index a8de08b..9d70b3a 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c @@ -811,7 +811,7 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc, unsigned int residual; ret = dma_cookie_status(dc, cookie, txstate); - if (ret == DMA_COMPLETE || !txstate) + if (ret == DMA_COMPLETE) return ret; spin_lock_irqsave(&tdc->lock, flags); @@ -837,7 +837,7 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc, dma_desc = NULL; found: - if (dma_desc) { + if (dma_desc && txstate) { residual = dma_desc->bytes_requested - (dma_desc->bytes_transferred % dma_desc->bytes_requested); -- cgit v0.10.2 From 7d2545599f5b09ccf6cdcab9ced58644a9cd038e Mon Sep 17 00:00:00 2001 From: Pramod Gurav Date: Fri, 17 Jun 2016 15:56:03 +0530 Subject: dmaengine: qcom-bam-dma: Add pm_runtime support Adds pm_runtime support for BAM DMA so that clock is enabled only when there is a transaction going on to help save power. Signed-off-by: Pramod Gurav Signed-off-by: Vinod Koul diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c index 969b481..4754891 100644 --- a/drivers/dma/qcom/bam_dma.c +++ b/drivers/dma/qcom/bam_dma.c @@ -48,6 +48,7 @@ #include #include #include +#include #include "../dmaengine.h" #include "../virt-dma.h" @@ -58,6 +59,8 @@ struct bam_desc_hw { __le16 flags; }; +#define BAM_DMA_AUTOSUSPEND_DELAY 100 + #define DESC_FLAG_INT BIT(15) #define DESC_FLAG_EOT BIT(14) #define DESC_FLAG_EOB BIT(13) @@ -527,12 +530,17 @@ static void bam_free_chan(struct dma_chan *chan) struct bam_device *bdev = bchan->bdev; u32 val; unsigned long flags; + int ret; + + ret = pm_runtime_get_sync(bdev->dev); + if (ret < 0) + return; vchan_free_chan_resources(to_virt_chan(chan)); if (bchan->curr_txd) { dev_err(bchan->bdev->dev, "Cannot free busy channel\n"); - return; + goto err; } spin_lock_irqsave(&bchan->vc.lock, flags); @@ -550,6 +558,10 @@ static void bam_free_chan(struct dma_chan *chan) /* disable irq */ writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_IRQ_EN)); + +err: + pm_runtime_mark_last_busy(bdev->dev); + pm_runtime_put_autosuspend(bdev->dev); } /** @@ -696,11 +708,18 @@ static int bam_pause(struct dma_chan *chan) struct bam_chan *bchan = to_bam_chan(chan); struct bam_device *bdev = bchan->bdev; unsigned long flag; + int ret; + + ret = pm_runtime_get_sync(bdev->dev); + if (ret < 0) + return ret; spin_lock_irqsave(&bchan->vc.lock, flag); writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_HALT)); bchan->paused = 1; spin_unlock_irqrestore(&bchan->vc.lock, flag); + pm_runtime_mark_last_busy(bdev->dev); + pm_runtime_put_autosuspend(bdev->dev); return 0; } @@ -715,11 +734,18 @@ static int bam_resume(struct dma_chan *chan) struct bam_chan *bchan = to_bam_chan(chan); struct bam_device *bdev = bchan->bdev; unsigned long flag; + int ret; + + ret = pm_runtime_get_sync(bdev->dev); + if (ret < 0) + return ret; spin_lock_irqsave(&bchan->vc.lock, flag); writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_HALT)); bchan->paused = 0; spin_unlock_irqrestore(&bchan->vc.lock, flag); + pm_runtime_mark_last_busy(bdev->dev); + pm_runtime_put_autosuspend(bdev->dev); return 0; } @@ -795,6 +821,7 @@ static irqreturn_t bam_dma_irq(int irq, void *data) { struct bam_device *bdev = data; u32 clr_mask = 0, srcs = 0; + int ret; srcs |= process_channel_irqs(bdev); @@ -802,6 +829,10 @@ static irqreturn_t bam_dma_irq(int irq, void *data) if (srcs & P_IRQ) tasklet_schedule(&bdev->task); + ret = pm_runtime_get_sync(bdev->dev); + if (ret < 0) + return ret; + if (srcs & BAM_IRQ) { clr_mask = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_STTS)); @@ -814,6 +845,9 @@ static irqreturn_t bam_dma_irq(int irq, void *data) writel_relaxed(clr_mask, bam_addr(bdev, 0, BAM_IRQ_CLR)); } + pm_runtime_mark_last_busy(bdev->dev); + pm_runtime_put_autosuspend(bdev->dev); + return IRQ_HANDLED; } @@ -893,6 +927,7 @@ static void bam_start_dma(struct bam_chan *bchan) struct bam_desc_hw *desc; struct bam_desc_hw *fifo = PTR_ALIGN(bchan->fifo_virt, sizeof(struct bam_desc_hw)); + int ret; lockdep_assert_held(&bchan->vc.lock); @@ -904,6 +939,10 @@ static void bam_start_dma(struct bam_chan *bchan) async_desc = container_of(vd, struct bam_async_desc, vd); bchan->curr_txd = async_desc; + ret = pm_runtime_get_sync(bdev->dev); + if (ret < 0) + return; + /* on first use, initialize the channel hardware */ if (!bchan->initialized) bam_chan_init_hw(bchan, async_desc->dir); @@ -946,6 +985,9 @@ static void bam_start_dma(struct bam_chan *bchan) wmb(); writel_relaxed(bchan->tail * sizeof(struct bam_desc_hw), bam_addr(bdev, bchan->id, BAM_P_EVNT_REG)); + + pm_runtime_mark_last_busy(bdev->dev); + pm_runtime_put_autosuspend(bdev->dev); } /** @@ -970,6 +1012,7 @@ static void dma_tasklet(unsigned long data) bam_start_dma(bchan); spin_unlock_irqrestore(&bchan->vc.lock, flags); } + } /** @@ -1213,6 +1256,13 @@ static int bam_dma_probe(struct platform_device *pdev) if (ret) goto err_unregister_dma; + pm_runtime_irq_safe(&pdev->dev); + pm_runtime_set_autosuspend_delay(&pdev->dev, BAM_DMA_AUTOSUSPEND_DELAY); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_mark_last_busy(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + return 0; err_unregister_dma: @@ -1233,6 +1283,8 @@ static int bam_dma_remove(struct platform_device *pdev) struct bam_device *bdev = platform_get_drvdata(pdev); u32 i; + pm_runtime_force_suspend(&pdev->dev); + of_dma_controller_free(pdev->dev.of_node); dma_async_device_unregister(&bdev->common); @@ -1260,11 +1312,67 @@ static int bam_dma_remove(struct platform_device *pdev) return 0; } +static int bam_dma_runtime_suspend(struct device *dev) +{ + struct bam_device *bdev = dev_get_drvdata(dev); + + clk_disable(bdev->bamclk); + + return 0; +} + +static int bam_dma_runtime_resume(struct device *dev) +{ + struct bam_device *bdev = dev_get_drvdata(dev); + int ret; + + ret = clk_enable(bdev->bamclk); + if (ret < 0) { + dev_err(dev, "clk_enable failed: %d\n", ret); + return ret; + } + + return 0; +} +#ifdef CONFIG_PM_SLEEP +static int bam_dma_suspend(struct device *dev) +{ + struct bam_device *bdev = dev_get_drvdata(dev); + + pm_runtime_force_suspend(dev); + + clk_unprepare(bdev->bamclk); + + return 0; +} + +static int bam_dma_resume(struct device *dev) +{ + struct bam_device *bdev = dev_get_drvdata(dev); + int ret; + + ret = clk_prepare(bdev->bamclk); + if (ret) + return ret; + + pm_runtime_force_resume(dev); + + return 0; +} +#endif + +static const struct dev_pm_ops bam_dma_pm_ops = { + SET_LATE_SYSTEM_SLEEP_PM_OPS(bam_dma_suspend, bam_dma_resume) + SET_RUNTIME_PM_OPS(bam_dma_runtime_suspend, bam_dma_runtime_resume, + NULL) +}; + static struct platform_driver bam_dma_driver = { .probe = bam_dma_probe, .remove = bam_dma_remove, .driver = { .name = "bam-dma-engine", + .pm = &bam_dma_pm_ops, .of_match_table = bam_of_match, }, }; -- cgit v0.10.2 From 4aa819c79bb73f3d1bdf5244d0a0f7f15261504c Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 30 Jun 2016 14:47:10 +0200 Subject: dmaengine: bcm2835: fix 64-bit warning When building this driver on arm64, we get a harmless type mismatch warning: drivers/dma/bcm2835-dma.c: In function 'bcm2835_dma_fill_cb_chain_with_sg': include/linux/kernel.h:743:17: warning: comparison of distinct pointer types lacks a cast (void) (&_min1 == &_min2); \ ^ drivers/dma/bcm2835-dma.c:409:21: note: in expansion of macro 'min' cb->cb->length = min(len, max_len); This changes the type of the 'len' variable to size_t, which avoids the problem. Signed-off-by: Arnd Bergmann Fixes: 388cc7a281c0 ("dmaengine: bcm2835: add slave_sg support to bcm2835-dma") Signed-off-by: Vinod Koul diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c index 9ecb942..e18dc59 100644 --- a/drivers/dma/bcm2835-dma.c +++ b/drivers/dma/bcm2835-dma.c @@ -393,11 +393,12 @@ static void bcm2835_dma_fill_cb_chain_with_sg( unsigned int sg_len) { struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); - size_t max_len = bcm2835_dma_max_frame_length(c); - unsigned int i, len; + size_t len, max_len; + unsigned int i; dma_addr_t addr; struct scatterlist *sgent; + max_len = bcm2835_dma_max_frame_length(c); for_each_sg(sgl, sgent, sg_len, i) { for (addr = sg_dma_address(sgent), len = sg_dma_len(sgent); len > 0; -- cgit v0.10.2 From 9bcfe38f58a442d512d3f3e5a7dfab9bc6797c3d Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Fri, 1 Jul 2016 17:45:56 +0200 Subject: dmaengine: axi-dmac: Add MODULE_DEVICE_TABLE() Add MODULE_DEVICE_TABLE() for the axi-dmac driver. This allows the driver to be loaded on demand when built as a module. Signed-off-by: Lars-Peter Clausen Signed-off-by: Vinod Koul diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c index c346809..8b0de8c 100644 --- a/drivers/dma/dma-axi-dmac.c +++ b/drivers/dma/dma-axi-dmac.c @@ -683,6 +683,7 @@ static const struct of_device_id axi_dmac_of_match_table[] = { { .compatible = "adi,axi-dmac-1.00.a" }, { }, }; +MODULE_DEVICE_TABLE(of, axi_dmac_of_match_table); static struct platform_driver axi_dmac_driver = { .driver = { -- cgit v0.10.2 From 50dc60a25597e10a731c8a0813cb3e88db345f54 Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Fri, 1 Jul 2016 17:45:57 +0200 Subject: dmaengine: axi-dmac: Propagate errors from platform_get_irq() Propagate errors returned by platform_get_irq() to the driver core. This will enable proper probe deferring for the driver in case the IRQ provider has not been registered yet. Signed-off-by: Lars-Peter Clausen Signed-off-by: Vinod Koul diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c index 8b0de8c..0e0dfc0 100644 --- a/drivers/dma/dma-axi-dmac.c +++ b/drivers/dma/dma-axi-dmac.c @@ -579,7 +579,9 @@ static int axi_dmac_probe(struct platform_device *pdev) return -ENOMEM; dmac->irq = platform_get_irq(pdev, 0); - if (dmac->irq <= 0) + if (dmac->irq < 0) + return dmac->irq; + if (dmac->irq == 0) return -EINVAL; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -- cgit v0.10.2 From 71831f652968f05270ace83a0bfd607bfed20760 Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Fri, 1 Jul 2016 17:45:58 +0200 Subject: dmaengine: axi-dmac: Return IRQ_NONE if no IRQs are pending Return IRQ_NONE in the interrupt handler when it is called but no IRQs are pending. This allows the system to recover in case of an interrupt storm e.g. due to a wrong interrupt configuration setup. Signed-off-by: Lars-Peter Clausen Signed-off-by: Vinod Koul diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c index 0e0dfc0..7f0b9aa 100644 --- a/drivers/dma/dma-axi-dmac.c +++ b/drivers/dma/dma-axi-dmac.c @@ -270,6 +270,9 @@ static irqreturn_t axi_dmac_interrupt_handler(int irq, void *devid) unsigned int pending; pending = axi_dmac_read(dmac, AXI_DMAC_REG_IRQ_PENDING); + if (!pending) + return IRQ_NONE; + axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_PENDING, pending); spin_lock(&dmac->chan.vchan.lock); -- cgit v0.10.2 From 9a8d0efaff4b343e3cff8b2cfaba847a18e2c0d9 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 30 Jun 2016 14:47:10 +0200 Subject: dmaengine: bcm2835: fix 64-bit warning When building this driver on arm64, we get a harmless type mismatch warning: drivers/dma/bcm2835-dma.c: In function 'bcm2835_dma_fill_cb_chain_with_sg': include/linux/kernel.h:743:17: warning: comparison of distinct pointer types lacks a cast (void) (&_min1 == &_min2); \ ^ drivers/dma/bcm2835-dma.c:409:21: note: in expansion of macro 'min' cb->cb->length = min(len, max_len); This changes the type of the 'len' variable to size_t, which avoids the problem. Signed-off-by: Arnd Bergmann Fixes: 388cc7a281c0 ("dmaengine: bcm2835: add slave_sg support to bcm2835-dma") Signed-off-by: Vinod Koul diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c index 6149b27..c8dd5b0 100644 --- a/drivers/dma/bcm2835-dma.c +++ b/drivers/dma/bcm2835-dma.c @@ -393,11 +393,12 @@ static void bcm2835_dma_fill_cb_chain_with_sg( unsigned int sg_len) { struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); - size_t max_len = bcm2835_dma_max_frame_length(c); - unsigned int i, len; + size_t len, max_len; + unsigned int i; dma_addr_t addr; struct scatterlist *sgent; + max_len = bcm2835_dma_max_frame_length(c); for_each_sg(sgl, sgent, sg_len, i) { for (addr = sg_dma_address(sgent), len = sg_dma_len(sgent); len > 0; -- cgit v0.10.2 From 184f337e6d859c20e0d3c6954980cbb744e014fb Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 4 Jul 2016 15:14:08 +0200 Subject: dmaengine: qcom-bam-dma: add __maybe_unused annotations for PM The bam_dma driver gained runtime PM support, but that causes build warnings whenever CONFIG_PM is disabled: drivers/dma/qcom/bam_dma.c:1324:12: error: 'bam_dma_runtime_resume' defined but not used [-Werror=unused-function] static int bam_dma_runtime_resume(struct device *dev) ^~~~~~~~~~~~~~~~~~~~~~ drivers/dma/qcom/bam_dma.c:1315:12: error: 'bam_dma_runtime_suspend' defined but not used [-Werror=unused-function] static int bam_dma_runtime_suspend(struct device *dev) This removes the incomplete #ifdef guard and instead marks all four PM functions as __maybe_unused, which avoids this kind of warning. Signed-off-by: Arnd Bergmann Fixes: 7d2545599f5b ("dmaengine: qcom-bam-dma: Add pm_runtime support") Signed-off-by: Vinod Koul diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c index 4754891..03c4eb3 100644 --- a/drivers/dma/qcom/bam_dma.c +++ b/drivers/dma/qcom/bam_dma.c @@ -1312,7 +1312,7 @@ static int bam_dma_remove(struct platform_device *pdev) return 0; } -static int bam_dma_runtime_suspend(struct device *dev) +static int __maybe_unused bam_dma_runtime_suspend(struct device *dev) { struct bam_device *bdev = dev_get_drvdata(dev); @@ -1321,7 +1321,7 @@ static int bam_dma_runtime_suspend(struct device *dev) return 0; } -static int bam_dma_runtime_resume(struct device *dev) +static int __maybe_unused bam_dma_runtime_resume(struct device *dev) { struct bam_device *bdev = dev_get_drvdata(dev); int ret; @@ -1334,8 +1334,8 @@ static int bam_dma_runtime_resume(struct device *dev) return 0; } -#ifdef CONFIG_PM_SLEEP -static int bam_dma_suspend(struct device *dev) + +static int __maybe_unused bam_dma_suspend(struct device *dev) { struct bam_device *bdev = dev_get_drvdata(dev); @@ -1346,7 +1346,7 @@ static int bam_dma_suspend(struct device *dev) return 0; } -static int bam_dma_resume(struct device *dev) +static int __maybe_unused bam_dma_resume(struct device *dev) { struct bam_device *bdev = dev_get_drvdata(dev); int ret; @@ -1359,7 +1359,6 @@ static int bam_dma_resume(struct device *dev) return 0; } -#endif static const struct dev_pm_ops bam_dma_pm_ops = { SET_LATE_SYSTEM_SLEEP_PM_OPS(bam_dma_suspend, bam_dma_resume) -- cgit v0.10.2 From ba2c194e6c20b3b1d01cb1f1cffd4910a3b20cfc Mon Sep 17 00:00:00 2001 From: Kedareswara rao Appana Date: Fri, 24 Jun 2016 10:51:22 +0530 Subject: Documentation: DT: vdma: Update binding doc for multi-channel dma mode This patch updates the device-tree binding doc for AXI DMA multi channel dma mode. Acked-by: Rob Herring Signed-off-by: Kedareswara rao Appana Signed-off-by: Vinod Koul diff --git a/Documentation/devicetree/bindings/dma/xilinx/xilinx_vdma.txt b/Documentation/devicetree/bindings/dma/xilinx/xilinx_vdma.txt index a1f2683..0faa189 100644 --- a/Documentation/devicetree/bindings/dma/xilinx/xilinx_vdma.txt +++ b/Documentation/devicetree/bindings/dma/xilinx/xilinx_vdma.txt @@ -40,6 +40,8 @@ Required properties for VDMA: Optional properties: - xlnx,include-sg: Tells configured for Scatter-mode in the hardware. +Optional properties for AXI DMA: +- xlnx,mcdma: Tells whether configured for multi-channel mode in the hardware. Optional properties for VDMA: - xlnx,flush-fsync: Tells which channel to Flush on Frame sync. It takes following values: @@ -60,6 +62,8 @@ Optional child node properties: Optional child node properties for VDMA: - xlnx,genlock-mode: Tells Genlock synchronization is enabled/disabled in hardware. +Optional child node properties for AXI DMA: +-dma-channels: Number of dma channels in child node. Example: ++++++++ -- cgit v0.10.2 From 1a9e7a03c761b57652ea532fa940264aa9dd699f Mon Sep 17 00:00:00 2001 From: Kedareswara rao Appana Date: Fri, 24 Jun 2016 10:51:23 +0530 Subject: dmaengine: vdma: Add support for mulit-channel dma mode This patch adds support for AXI DMA multi-channel dma mode Multichannel mode enables DMA to connect to multiple masters and slaves on the streaming side. In Multichannel mode AXI DMA supports 2D transfers. Signed-off-by: Kedareswara rao Appana Signed-off-by: Vinod Koul diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c index 40f754b..0768d9f 100644 --- a/drivers/dma/xilinx/xilinx_vdma.c +++ b/drivers/dma/xilinx/xilinx_vdma.c @@ -114,7 +114,7 @@ #define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n)) /* HW specific definitions */ -#define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x2 +#define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x20 #define XILINX_DMA_DMAXR_ALL_IRQ_MASK \ (XILINX_DMA_DMASR_FRM_CNT_IRQ | \ @@ -165,6 +165,18 @@ #define XILINX_DMA_COALESCE_MAX 255 #define XILINX_DMA_NUM_APP_WORDS 5 +/* Multi-Channel DMA Descriptor offsets*/ +#define XILINX_DMA_MCRX_CDESC(x) (0x40 + (x-1) * 0x20) +#define XILINX_DMA_MCRX_TDESC(x) (0x48 + (x-1) * 0x20) + +/* Multi-Channel DMA Masks/Shifts */ +#define XILINX_DMA_BD_HSIZE_MASK GENMASK(15, 0) +#define XILINX_DMA_BD_STRIDE_MASK GENMASK(15, 0) +#define XILINX_DMA_BD_VSIZE_MASK GENMASK(31, 19) +#define XILINX_DMA_BD_TDEST_MASK GENMASK(4, 0) +#define XILINX_DMA_BD_STRIDE_SHIFT 0 +#define XILINX_DMA_BD_VSIZE_SHIFT 19 + /* AXI CDMA Specific Registers/Offsets */ #define XILINX_CDMA_REG_SRCADDR 0x18 #define XILINX_CDMA_REG_DSTADDR 0x20 @@ -210,8 +222,8 @@ struct xilinx_axidma_desc_hw { u32 next_desc_msb; u32 buf_addr; u32 buf_addr_msb; - u32 pad1; - u32 pad2; + u32 mcdma_control; + u32 vsize_stride; u32 control; u32 status; u32 app[XILINX_DMA_NUM_APP_WORDS]; @@ -349,6 +361,7 @@ struct xilinx_dma_chan { struct xilinx_axidma_tx_segment *seg_v; struct xilinx_axidma_tx_segment *cyclic_seg_v; void (*start_transfer)(struct xilinx_dma_chan *chan); + u16 tdest; }; struct xilinx_dma_config { @@ -365,6 +378,7 @@ struct xilinx_dma_config { * @common: DMA device structure * @chan: Driver specific DMA channel * @has_sg: Specifies whether Scatter-Gather is present or not + * @mcdma: Specifies whether Multi-Channel is present or not * @flush_on_fsync: Flush on frame sync * @ext_addr: Indicates 64 bit addressing is supported by dma device * @pdev: Platform device structure pointer @@ -374,6 +388,8 @@ struct xilinx_dma_config { * @txs_clk: DMA mm2s stream clock * @rx_clk: DMA s2mm clock * @rxs_clk: DMA s2mm stream clock + * @nr_channels: Number of channels DMA device supports + * @chan_id: DMA channel identifier */ struct xilinx_dma_device { void __iomem *regs; @@ -381,6 +397,7 @@ struct xilinx_dma_device { struct dma_device common; struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE]; bool has_sg; + bool mcdma; u32 flush_on_fsync; bool ext_addr; struct platform_device *pdev; @@ -390,6 +407,8 @@ struct xilinx_dma_device { struct clk *txs_clk; struct clk *rx_clk; struct clk *rxs_clk; + u32 nr_channels; + u32 chan_id; }; /* Macros */ @@ -1196,18 +1215,20 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan) tail_segment = list_last_entry(&tail_desc->segments, struct xilinx_axidma_tx_segment, node); - old_head = list_first_entry(&head_desc->segments, - struct xilinx_axidma_tx_segment, node); - new_head = chan->seg_v; - /* Copy Buffer Descriptor fields. */ - new_head->hw = old_head->hw; + if (chan->has_sg && !chan->xdev->mcdma) { + old_head = list_first_entry(&head_desc->segments, + struct xilinx_axidma_tx_segment, node); + new_head = chan->seg_v; + /* Copy Buffer Descriptor fields. */ + new_head->hw = old_head->hw; - /* Swap and save new reserve */ - list_replace_init(&old_head->node, &new_head->node); - chan->seg_v = old_head; + /* Swap and save new reserve */ + list_replace_init(&old_head->node, &new_head->node); + chan->seg_v = old_head; - tail_segment->hw.next_desc = chan->seg_v->phys; - head_desc->async_tx.phys = new_head->phys; + tail_segment->hw.next_desc = chan->seg_v->phys; + head_desc->async_tx.phys = new_head->phys; + } reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); @@ -1218,23 +1239,53 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan) dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); } - if (chan->has_sg) + if (chan->has_sg && !chan->xdev->mcdma) xilinx_write(chan, XILINX_DMA_REG_CURDESC, head_desc->async_tx.phys); + if (chan->has_sg && chan->xdev->mcdma) { + if (chan->direction == DMA_MEM_TO_DEV) { + dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, + head_desc->async_tx.phys); + } else { + if (!chan->tdest) { + dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, + head_desc->async_tx.phys); + } else { + dma_ctrl_write(chan, + XILINX_DMA_MCRX_CDESC(chan->tdest), + head_desc->async_tx.phys); + } + } + } + xilinx_dma_start(chan); if (chan->err) return; /* Start the transfer */ - if (chan->has_sg) { + if (chan->has_sg && !chan->xdev->mcdma) { if (chan->cyclic) xilinx_write(chan, XILINX_DMA_REG_TAILDESC, chan->cyclic_seg_v->phys); else xilinx_write(chan, XILINX_DMA_REG_TAILDESC, tail_segment->phys); + } else if (chan->has_sg && chan->xdev->mcdma) { + if (chan->direction == DMA_MEM_TO_DEV) { + dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, + tail_segment->phys); + } else { + if (!chan->tdest) { + dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, + tail_segment->phys); + } else { + dma_ctrl_write(chan, + XILINX_DMA_MCRX_TDESC(chan->tdest), + tail_segment->phys); + } + } } else { struct xilinx_axidma_tx_segment *segment; struct xilinx_axidma_desc_hw *hw; @@ -1862,6 +1913,90 @@ error: } /** + * xilinx_dma_prep_interleaved - prepare a descriptor for a + * DMA_SLAVE transaction + * @dchan: DMA channel + * @xt: Interleaved template pointer + * @flags: transfer ack flags + * + * Return: Async transaction descriptor on success and NULL on failure + */ +static struct dma_async_tx_descriptor * +xilinx_dma_prep_interleaved(struct dma_chan *dchan, + struct dma_interleaved_template *xt, + unsigned long flags) +{ + struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); + struct xilinx_dma_tx_descriptor *desc; + struct xilinx_axidma_tx_segment *segment; + struct xilinx_axidma_desc_hw *hw; + + if (!is_slave_direction(xt->dir)) + return NULL; + + if (!xt->numf || !xt->sgl[0].size) + return NULL; + + if (xt->frame_size != 1) + return NULL; + + /* Allocate a transaction descriptor. */ + desc = xilinx_dma_alloc_tx_descriptor(chan); + if (!desc) + return NULL; + + chan->direction = xt->dir; + dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); + desc->async_tx.tx_submit = xilinx_dma_tx_submit; + + /* Get a free segment */ + segment = xilinx_axidma_alloc_tx_segment(chan); + if (!segment) + goto error; + + hw = &segment->hw; + + /* Fill in the descriptor */ + if (xt->dir != DMA_MEM_TO_DEV) + hw->buf_addr = xt->dst_start; + else + hw->buf_addr = xt->src_start; + + hw->mcdma_control = chan->tdest & XILINX_DMA_BD_TDEST_MASK; + hw->vsize_stride = (xt->numf << XILINX_DMA_BD_VSIZE_SHIFT) & + XILINX_DMA_BD_VSIZE_MASK; + hw->vsize_stride |= (xt->sgl[0].icg + xt->sgl[0].size) & + XILINX_DMA_BD_STRIDE_MASK; + hw->control = xt->sgl[0].size & XILINX_DMA_BD_HSIZE_MASK; + + /* + * Insert the segment into the descriptor segments + * list. + */ + list_add_tail(&segment->node, &desc->segments); + + + segment = list_first_entry(&desc->segments, + struct xilinx_axidma_tx_segment, node); + desc->async_tx.phys = segment->phys; + + /* For the last DMA_MEM_TO_DEV transfer, set EOP */ + if (xt->dir == DMA_MEM_TO_DEV) { + segment->hw.control |= XILINX_DMA_BD_SOP; + segment = list_last_entry(&desc->segments, + struct xilinx_axidma_tx_segment, + node); + segment->hw.control |= XILINX_DMA_BD_EOP; + } + + return &desc->async_tx; + +error: + xilinx_dma_free_tx_descriptor(chan, desc); + return NULL; +} + +/** * xilinx_dma_terminate_all - Halt the channel and free descriptors * @chan: Driver specific DMA Channel pointer */ @@ -2176,7 +2311,7 @@ static void xdma_disable_allclks(struct xilinx_dma_device *xdev) * Return: '0' on success and failure value on error */ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, - struct device_node *node) + struct device_node *node, int chan_id) { struct xilinx_dma_chan *chan; bool has_dre = false; @@ -2220,7 +2355,8 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel")) { chan->direction = DMA_MEM_TO_DEV; - chan->id = 0; + chan->id = chan_id; + chan->tdest = chan_id; chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET; if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { @@ -2233,7 +2369,8 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, } else if (of_device_is_compatible(node, "xlnx,axi-vdma-s2mm-channel")) { chan->direction = DMA_DEV_TO_MEM; - chan->id = 1; + chan->id = chan_id; + chan->tdest = chan_id - xdev->nr_channels; chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET; if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { @@ -2288,6 +2425,32 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, } /** + * xilinx_dma_child_probe - Per child node probe + * It get number of dma-channels per child node from + * device-tree and initializes all the channels. + * + * @xdev: Driver specific device structure + * @node: Device node + * + * Return: 0 always. + */ +static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev, + struct device_node *node) { + int ret, i, nr_channels = 1; + + ret = of_property_read_u32(node, "dma-channels", &nr_channels); + if ((ret < 0) && xdev->mcdma) + dev_warn(xdev->dev, "missing dma-channels property\n"); + + for (i = 0; i < nr_channels; i++) + xilinx_dma_chan_probe(xdev, node, xdev->chan_id++); + + xdev->nr_channels += nr_channels; + + return 0; +} + +/** * of_dma_xilinx_xlate - Translation function * @dma_spec: Pointer to DMA specifier as found in the device tree * @ofdma: Pointer to DMA controller data @@ -2300,7 +2463,7 @@ static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec, struct xilinx_dma_device *xdev = ofdma->of_dma_data; int chan_id = dma_spec->args[0]; - if (chan_id >= XILINX_DMA_MAX_CHANS_PER_DEVICE || !xdev->chan[chan_id]) + if (chan_id >= xdev->nr_channels || !xdev->chan[chan_id]) return NULL; return dma_get_slave_channel(&xdev->chan[chan_id]->common); @@ -2376,6 +2539,8 @@ static int xilinx_dma_probe(struct platform_device *pdev) /* Retrieve the DMA engine properties from the device tree */ xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg"); + if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) + xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma"); if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { err = of_property_read_u32(node, "xlnx,num-fstores", @@ -2426,6 +2591,8 @@ static int xilinx_dma_probe(struct platform_device *pdev) xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg; xdev->common.device_prep_dma_cyclic = xilinx_dma_prep_dma_cyclic; + xdev->common.device_prep_interleaved_dma = + xilinx_dma_prep_interleaved; /* Residue calculation is supported by only AXI DMA */ xdev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; @@ -2441,13 +2608,13 @@ static int xilinx_dma_probe(struct platform_device *pdev) /* Initialize the channels */ for_each_child_of_node(node, child) { - err = xilinx_dma_chan_probe(xdev, child); + err = xilinx_dma_child_probe(xdev, child); if (err < 0) goto disable_clks; } if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { - for (i = 0; i < XILINX_DMA_MAX_CHANS_PER_DEVICE; i++) + for (i = 0; i < xdev->nr_channels; i++) if (xdev->chan[i]) xdev->chan[i]->num_frms = num_frames; } @@ -2470,7 +2637,7 @@ static int xilinx_dma_probe(struct platform_device *pdev) disable_clks: xdma_disable_allclks(xdev); error: - for (i = 0; i < XILINX_DMA_MAX_CHANS_PER_DEVICE; i++) + for (i = 0; i < xdev->nr_channels; i++) if (xdev->chan[i]) xilinx_dma_chan_remove(xdev->chan[i]); @@ -2492,7 +2659,7 @@ static int xilinx_dma_remove(struct platform_device *pdev) dma_async_device_unregister(&xdev->common); - for (i = 0; i < XILINX_DMA_MAX_CHANS_PER_DEVICE; i++) + for (i = 0; i < xdev->nr_channels; i++) if (xdev->chan[i]) xilinx_dma_chan_remove(xdev->chan[i]); -- cgit v0.10.2 From 5cd0749af203a2f321d07721b0ebd744fe4ac129 Mon Sep 17 00:00:00 2001 From: Kedareswara rao Appana Date: Fri, 24 Jun 2016 10:51:24 +0530 Subject: Documentation: DT: dma: Delete binding doc for AXI DMA The AXI DMA support is added to the existing AXI VDMA driver. Device tree binding information also updated in the VDMA binding doc. Acked-by: Rob Herring Signed-off-by: Kedareswara rao Appana Signed-off-by: Vinod Koul diff --git a/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt b/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt deleted file mode 100644 index 3cf0072..0000000 --- a/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt +++ /dev/null @@ -1,65 +0,0 @@ -Xilinx AXI DMA engine, it does transfers between memory and AXI4 stream -target devices. It can be configured to have one channel or two channels. -If configured as two channels, one is to transmit to the device and another -is to receive from the device. - -Required properties: -- compatible: Should be "xlnx,axi-dma-1.00.a" -- #dma-cells: Should be <1>, see "dmas" property below -- reg: Should contain DMA registers location and length. -- dma-channel child node: Should have at least one channel and can have up to - two channels per device. This node specifies the properties of each - DMA channel (see child node properties below). - -Optional properties: -- xlnx,include-sg: Tells whether configured for Scatter-mode in - the hardware. - -Required child node properties: -- compatible: It should be either "xlnx,axi-dma-mm2s-channel" or - "xlnx,axi-dma-s2mm-channel". -- interrupts: Should contain per channel DMA interrupts. -- xlnx,datawidth: Should contain the stream data width, take values - {32,64...1024}. - -Option child node properties: -- xlnx,include-dre: Tells whether hardware is configured for Data - Realignment Engine. - -Example: -++++++++ - -axi_dma_0: axidma@40400000 { - compatible = "xlnx,axi-dma-1.00.a"; - #dma_cells = <1>; - reg = < 0x40400000 0x10000 >; - dma-channel@40400000 { - compatible = "xlnx,axi-dma-mm2s-channel"; - interrupts = < 0 59 4 >; - xlnx,datawidth = <0x40>; - } ; - dma-channel@40400030 { - compatible = "xlnx,axi-dma-s2mm-channel"; - interrupts = < 0 58 4 >; - xlnx,datawidth = <0x40>; - } ; -} ; - - -* DMA client - -Required properties: -- dmas: a list of <[DMA device phandle] [Channel ID]> pairs, - where Channel ID is '0' for write/tx and '1' for read/rx - channel. -- dma-names: a list of DMA channel names, one per "dmas" entry - -Example: -++++++++ - -dmatest_0: dmatest@0 { - compatible ="xlnx,axi-dma-test-1.00.a"; - dmas = <&axi_dma_0 0 - &axi_dma_0 1>; - dma-names = "dma0", "dma1"; -} ; -- cgit v0.10.2 From fde57a7c4474b2d00b7b82b94aa2dd0160b1bbee Mon Sep 17 00:00:00 2001 From: Kedareswara rao Appana Date: Fri, 24 Jun 2016 10:51:25 +0530 Subject: dmaengine: xilinx: Rename driver and config In the existing vdma driver support for AXI DMA and CDMA got added so the driver is no longer VDMA specific. This patch renames the driver and DT binding doc to xilinx_dma and updates the Kconfig description for all the DMAS. Signed-off-by: Kedareswara rao Appana Signed-off-by: Vinod Koul diff --git a/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt b/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt new file mode 100644 index 0000000..0faa189 --- /dev/null +++ b/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt @@ -0,0 +1,111 @@ +Xilinx AXI VDMA engine, it does transfers between memory and video devices. +It can be configured to have one channel or two channels. If configured +as two channels, one is to transmit to the video device and another is +to receive from the video device. + +Xilinx AXI DMA engine, it does transfers between memory and AXI4 stream +target devices. It can be configured to have one channel or two channels. +If configured as two channels, one is to transmit to the device and another +is to receive from the device. + +Xilinx AXI CDMA engine, it does transfers between memory-mapped source +address and a memory-mapped destination address. + +Required properties: +- compatible: Should be "xlnx,axi-vdma-1.00.a" or "xlnx,axi-dma-1.00.a" or + "xlnx,axi-cdma-1.00.a"" +- #dma-cells: Should be <1>, see "dmas" property below +- reg: Should contain VDMA registers location and length. +- xlnx,addrwidth: Should be the vdma addressing size in bits(ex: 32 bits). +- dma-ranges: Should be as the following . +- dma-channel child node: Should have at least one channel and can have up to + two channels per device. This node specifies the properties of each + DMA channel (see child node properties below). +- clocks: Input clock specifier. Refer to common clock bindings. +- clock-names: List of input clocks + For VDMA: + Required elements: "s_axi_lite_aclk" + Optional elements: "m_axi_mm2s_aclk" "m_axi_s2mm_aclk", + "m_axis_mm2s_aclk", "s_axis_s2mm_aclk" + For CDMA: + Required elements: "s_axi_lite_aclk", "m_axi_aclk" + FOR AXIDMA: + Required elements: "s_axi_lite_aclk" + Optional elements: "m_axi_mm2s_aclk", "m_axi_s2mm_aclk", + "m_axi_sg_aclk" + +Required properties for VDMA: +- xlnx,num-fstores: Should be the number of framebuffers as configured in h/w. + +Optional properties: +- xlnx,include-sg: Tells configured for Scatter-mode in + the hardware. +Optional properties for AXI DMA: +- xlnx,mcdma: Tells whether configured for multi-channel mode in the hardware. +Optional properties for VDMA: +- xlnx,flush-fsync: Tells which channel to Flush on Frame sync. + It takes following values: + {1}, flush both channels + {2}, flush mm2s channel + {3}, flush s2mm channel + +Required child node properties: +- compatible: It should be either "xlnx,axi-vdma-mm2s-channel" or + "xlnx,axi-vdma-s2mm-channel". +- interrupts: Should contain per channel VDMA interrupts. +- xlnx,datawidth: Should contain the stream data width, take values + {32,64...1024}. + +Optional child node properties: +- xlnx,include-dre: Tells hardware is configured for Data + Realignment Engine. +Optional child node properties for VDMA: +- xlnx,genlock-mode: Tells Genlock synchronization is + enabled/disabled in hardware. +Optional child node properties for AXI DMA: +-dma-channels: Number of dma channels in child node. + +Example: +++++++++ + +axi_vdma_0: axivdma@40030000 { + compatible = "xlnx,axi-vdma-1.00.a"; + #dma_cells = <1>; + reg = < 0x40030000 0x10000 >; + dma-ranges = <0x00000000 0x00000000 0x40000000>; + xlnx,num-fstores = <0x8>; + xlnx,flush-fsync = <0x1>; + xlnx,addrwidth = <0x20>; + clocks = <&clk 0>, <&clk 1>, <&clk 2>, <&clk 3>, <&clk 4>; + clock-names = "s_axi_lite_aclk", "m_axi_mm2s_aclk", "m_axi_s2mm_aclk", + "m_axis_mm2s_aclk", "s_axis_s2mm_aclk"; + dma-channel@40030000 { + compatible = "xlnx,axi-vdma-mm2s-channel"; + interrupts = < 0 54 4 >; + xlnx,datawidth = <0x40>; + } ; + dma-channel@40030030 { + compatible = "xlnx,axi-vdma-s2mm-channel"; + interrupts = < 0 53 4 >; + xlnx,datawidth = <0x40>; + } ; +} ; + + +* DMA client + +Required properties: +- dmas: a list of <[Video DMA device phandle] [Channel ID]> pairs, + where Channel ID is '0' for write/tx and '1' for read/rx + channel. +- dma-names: a list of DMA channel names, one per "dmas" entry + +Example: +++++++++ + +vdmatest_0: vdmatest@0 { + compatible ="xlnx,axi-vdma-test-1.00.a"; + dmas = <&axi_vdma_0 0 + &axi_vdma_0 1>; + dma-names = "vdma0", "vdma1"; +} ; diff --git a/Documentation/devicetree/bindings/dma/xilinx/xilinx_vdma.txt b/Documentation/devicetree/bindings/dma/xilinx/xilinx_vdma.txt deleted file mode 100644 index 0faa189..0000000 --- a/Documentation/devicetree/bindings/dma/xilinx/xilinx_vdma.txt +++ /dev/null @@ -1,111 +0,0 @@ -Xilinx AXI VDMA engine, it does transfers between memory and video devices. -It can be configured to have one channel or two channels. If configured -as two channels, one is to transmit to the video device and another is -to receive from the video device. - -Xilinx AXI DMA engine, it does transfers between memory and AXI4 stream -target devices. It can be configured to have one channel or two channels. -If configured as two channels, one is to transmit to the device and another -is to receive from the device. - -Xilinx AXI CDMA engine, it does transfers between memory-mapped source -address and a memory-mapped destination address. - -Required properties: -- compatible: Should be "xlnx,axi-vdma-1.00.a" or "xlnx,axi-dma-1.00.a" or - "xlnx,axi-cdma-1.00.a"" -- #dma-cells: Should be <1>, see "dmas" property below -- reg: Should contain VDMA registers location and length. -- xlnx,addrwidth: Should be the vdma addressing size in bits(ex: 32 bits). -- dma-ranges: Should be as the following . -- dma-channel child node: Should have at least one channel and can have up to - two channels per device. This node specifies the properties of each - DMA channel (see child node properties below). -- clocks: Input clock specifier. Refer to common clock bindings. -- clock-names: List of input clocks - For VDMA: - Required elements: "s_axi_lite_aclk" - Optional elements: "m_axi_mm2s_aclk" "m_axi_s2mm_aclk", - "m_axis_mm2s_aclk", "s_axis_s2mm_aclk" - For CDMA: - Required elements: "s_axi_lite_aclk", "m_axi_aclk" - FOR AXIDMA: - Required elements: "s_axi_lite_aclk" - Optional elements: "m_axi_mm2s_aclk", "m_axi_s2mm_aclk", - "m_axi_sg_aclk" - -Required properties for VDMA: -- xlnx,num-fstores: Should be the number of framebuffers as configured in h/w. - -Optional properties: -- xlnx,include-sg: Tells configured for Scatter-mode in - the hardware. -Optional properties for AXI DMA: -- xlnx,mcdma: Tells whether configured for multi-channel mode in the hardware. -Optional properties for VDMA: -- xlnx,flush-fsync: Tells which channel to Flush on Frame sync. - It takes following values: - {1}, flush both channels - {2}, flush mm2s channel - {3}, flush s2mm channel - -Required child node properties: -- compatible: It should be either "xlnx,axi-vdma-mm2s-channel" or - "xlnx,axi-vdma-s2mm-channel". -- interrupts: Should contain per channel VDMA interrupts. -- xlnx,datawidth: Should contain the stream data width, take values - {32,64...1024}. - -Optional child node properties: -- xlnx,include-dre: Tells hardware is configured for Data - Realignment Engine. -Optional child node properties for VDMA: -- xlnx,genlock-mode: Tells Genlock synchronization is - enabled/disabled in hardware. -Optional child node properties for AXI DMA: --dma-channels: Number of dma channels in child node. - -Example: -++++++++ - -axi_vdma_0: axivdma@40030000 { - compatible = "xlnx,axi-vdma-1.00.a"; - #dma_cells = <1>; - reg = < 0x40030000 0x10000 >; - dma-ranges = <0x00000000 0x00000000 0x40000000>; - xlnx,num-fstores = <0x8>; - xlnx,flush-fsync = <0x1>; - xlnx,addrwidth = <0x20>; - clocks = <&clk 0>, <&clk 1>, <&clk 2>, <&clk 3>, <&clk 4>; - clock-names = "s_axi_lite_aclk", "m_axi_mm2s_aclk", "m_axi_s2mm_aclk", - "m_axis_mm2s_aclk", "s_axis_s2mm_aclk"; - dma-channel@40030000 { - compatible = "xlnx,axi-vdma-mm2s-channel"; - interrupts = < 0 54 4 >; - xlnx,datawidth = <0x40>; - } ; - dma-channel@40030030 { - compatible = "xlnx,axi-vdma-s2mm-channel"; - interrupts = < 0 53 4 >; - xlnx,datawidth = <0x40>; - } ; -} ; - - -* DMA client - -Required properties: -- dmas: a list of <[Video DMA device phandle] [Channel ID]> pairs, - where Channel ID is '0' for write/tx and '1' for read/rx - channel. -- dma-names: a list of DMA channel names, one per "dmas" entry - -Example: -++++++++ - -vdmatest_0: vdmatest@0 { - compatible ="xlnx,axi-vdma-test-1.00.a"; - dmas = <&axi_vdma_0 0 - &axi_vdma_0 1>; - dma-names = "vdma0", "vdma1"; -} ; diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 8c98779..1f39f3e 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -519,19 +519,24 @@ config XGENE_DMA help Enable support for the APM X-Gene SoC DMA engine. -config XILINX_VDMA - tristate "Xilinx AXI VDMA Engine" +config XILINX_DMA + tristate "Xilinx AXI DMAS Engine" depends on (ARCH_ZYNQ || MICROBLAZE || ARM64) select DMA_ENGINE help Enable support for Xilinx AXI VDMA Soft IP. - This engine provides high-bandwidth direct memory access + AXI VDMA engine provides high-bandwidth direct memory access between memory and AXI4-Stream video type target peripherals including peripherals which support AXI4- Stream Video Protocol. It has two stream interfaces/ channels, Memory Mapped to Stream (MM2S) and Stream to Memory Mapped (S2MM) for the data transfers. + AXI CDMA engine provides high-bandwidth direct memory access + between a memory-mapped source address and a memory-mapped + destination address. + AXI DMA engine provides high-bandwidth one dimensional direct + memory access between memory and AXI4-Stream target peripherals. config ZX_DMA tristate "ZTE ZX296702 DMA support" diff --git a/drivers/dma/xilinx/Makefile b/drivers/dma/xilinx/Makefile index 3c4e9f2..af9e69a4 100644 --- a/drivers/dma/xilinx/Makefile +++ b/drivers/dma/xilinx/Makefile @@ -1 +1 @@ -obj-$(CONFIG_XILINX_VDMA) += xilinx_vdma.o +obj-$(CONFIG_XILINX_DMA) += xilinx_dma.o diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c new file mode 100644 index 0000000..0768d9f --- /dev/null +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -0,0 +1,2684 @@ +/* + * DMA driver for Xilinx Video DMA Engine + * + * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved. + * + * Based on the Freescale DMA driver. + * + * Description: + * The AXI Video Direct Memory Access (AXI VDMA) core is a soft Xilinx IP + * core that provides high-bandwidth direct memory access between memory + * and AXI4-Stream type video target peripherals. The core provides efficient + * two dimensional DMA operations with independent asynchronous read (S2MM) + * and write (MM2S) channel operation. It can be configured to have either + * one channel or two channels. If configured as two channels, one is to + * transmit to the video device (MM2S) and another is to receive from the + * video device (S2MM). Initialization, status, interrupt and management + * registers are accessed through an AXI4-Lite slave interface. + * + * The AXI Direct Memory Access (AXI DMA) core is a soft Xilinx IP core that + * provides high-bandwidth one dimensional direct memory access between memory + * and AXI4-Stream target peripherals. It supports one receive and one + * transmit channel, both of them optional at synthesis time. + * + * The AXI CDMA, is a soft IP, which provides high-bandwidth Direct Memory + * Access (DMA) between a memory-mapped source address and a memory-mapped + * destination address. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../dmaengine.h" + +/* Register/Descriptor Offsets */ +#define XILINX_DMA_MM2S_CTRL_OFFSET 0x0000 +#define XILINX_DMA_S2MM_CTRL_OFFSET 0x0030 +#define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050 +#define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0 + +/* Control Registers */ +#define XILINX_DMA_REG_DMACR 0x0000 +#define XILINX_DMA_DMACR_DELAY_MAX 0xff +#define XILINX_DMA_DMACR_DELAY_SHIFT 24 +#define XILINX_DMA_DMACR_FRAME_COUNT_MAX 0xff +#define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT 16 +#define XILINX_DMA_DMACR_ERR_IRQ BIT(14) +#define XILINX_DMA_DMACR_DLY_CNT_IRQ BIT(13) +#define XILINX_DMA_DMACR_FRM_CNT_IRQ BIT(12) +#define XILINX_DMA_DMACR_MASTER_SHIFT 8 +#define XILINX_DMA_DMACR_FSYNCSRC_SHIFT 5 +#define XILINX_DMA_DMACR_FRAMECNT_EN BIT(4) +#define XILINX_DMA_DMACR_GENLOCK_EN BIT(3) +#define XILINX_DMA_DMACR_RESET BIT(2) +#define XILINX_DMA_DMACR_CIRC_EN BIT(1) +#define XILINX_DMA_DMACR_RUNSTOP BIT(0) +#define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5) + +#define XILINX_DMA_REG_DMASR 0x0004 +#define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15) +#define XILINX_DMA_DMASR_ERR_IRQ BIT(14) +#define XILINX_DMA_DMASR_DLY_CNT_IRQ BIT(13) +#define XILINX_DMA_DMASR_FRM_CNT_IRQ BIT(12) +#define XILINX_DMA_DMASR_SOF_LATE_ERR BIT(11) +#define XILINX_DMA_DMASR_SG_DEC_ERR BIT(10) +#define XILINX_DMA_DMASR_SG_SLV_ERR BIT(9) +#define XILINX_DMA_DMASR_EOF_EARLY_ERR BIT(8) +#define XILINX_DMA_DMASR_SOF_EARLY_ERR BIT(7) +#define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6) +#define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5) +#define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4) +#define XILINX_DMA_DMASR_IDLE BIT(1) +#define XILINX_DMA_DMASR_HALTED BIT(0) +#define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24) +#define XILINX_DMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16) + +#define XILINX_DMA_REG_CURDESC 0x0008 +#define XILINX_DMA_REG_TAILDESC 0x0010 +#define XILINX_DMA_REG_REG_INDEX 0x0014 +#define XILINX_DMA_REG_FRMSTORE 0x0018 +#define XILINX_DMA_REG_THRESHOLD 0x001c +#define XILINX_DMA_REG_FRMPTR_STS 0x0024 +#define XILINX_DMA_REG_PARK_PTR 0x0028 +#define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8 +#define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0 +#define XILINX_DMA_REG_VDMA_VERSION 0x002c + +/* Register Direct Mode Registers */ +#define XILINX_DMA_REG_VSIZE 0x0000 +#define XILINX_DMA_REG_HSIZE 0x0004 + +#define XILINX_DMA_REG_FRMDLY_STRIDE 0x0008 +#define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24 +#define XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT 0 + +#define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n)) +#define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n)) + +/* HW specific definitions */ +#define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x20 + +#define XILINX_DMA_DMAXR_ALL_IRQ_MASK \ + (XILINX_DMA_DMASR_FRM_CNT_IRQ | \ + XILINX_DMA_DMASR_DLY_CNT_IRQ | \ + XILINX_DMA_DMASR_ERR_IRQ) + +#define XILINX_DMA_DMASR_ALL_ERR_MASK \ + (XILINX_DMA_DMASR_EOL_LATE_ERR | \ + XILINX_DMA_DMASR_SOF_LATE_ERR | \ + XILINX_DMA_DMASR_SG_DEC_ERR | \ + XILINX_DMA_DMASR_SG_SLV_ERR | \ + XILINX_DMA_DMASR_EOF_EARLY_ERR | \ + XILINX_DMA_DMASR_SOF_EARLY_ERR | \ + XILINX_DMA_DMASR_DMA_DEC_ERR | \ + XILINX_DMA_DMASR_DMA_SLAVE_ERR | \ + XILINX_DMA_DMASR_DMA_INT_ERR) + +/* + * Recoverable errors are DMA Internal error, SOF Early, EOF Early + * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC + * is enabled in the h/w system. + */ +#define XILINX_DMA_DMASR_ERR_RECOVER_MASK \ + (XILINX_DMA_DMASR_SOF_LATE_ERR | \ + XILINX_DMA_DMASR_EOF_EARLY_ERR | \ + XILINX_DMA_DMASR_SOF_EARLY_ERR | \ + XILINX_DMA_DMASR_DMA_INT_ERR) + +/* Axi VDMA Flush on Fsync bits */ +#define XILINX_DMA_FLUSH_S2MM 3 +#define XILINX_DMA_FLUSH_MM2S 2 +#define XILINX_DMA_FLUSH_BOTH 1 + +/* Delay loop counter to prevent hardware failure */ +#define XILINX_DMA_LOOP_COUNT 1000000 + +/* AXI DMA Specific Registers/Offsets */ +#define XILINX_DMA_REG_SRCDSTADDR 0x18 +#define XILINX_DMA_REG_BTT 0x28 + +/* AXI DMA Specific Masks/Bit fields */ +#define XILINX_DMA_MAX_TRANS_LEN GENMASK(22, 0) +#define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16) +#define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4) +#define XILINX_DMA_CR_COALESCE_SHIFT 16 +#define XILINX_DMA_BD_SOP BIT(27) +#define XILINX_DMA_BD_EOP BIT(26) +#define XILINX_DMA_COALESCE_MAX 255 +#define XILINX_DMA_NUM_APP_WORDS 5 + +/* Multi-Channel DMA Descriptor offsets*/ +#define XILINX_DMA_MCRX_CDESC(x) (0x40 + (x-1) * 0x20) +#define XILINX_DMA_MCRX_TDESC(x) (0x48 + (x-1) * 0x20) + +/* Multi-Channel DMA Masks/Shifts */ +#define XILINX_DMA_BD_HSIZE_MASK GENMASK(15, 0) +#define XILINX_DMA_BD_STRIDE_MASK GENMASK(15, 0) +#define XILINX_DMA_BD_VSIZE_MASK GENMASK(31, 19) +#define XILINX_DMA_BD_TDEST_MASK GENMASK(4, 0) +#define XILINX_DMA_BD_STRIDE_SHIFT 0 +#define XILINX_DMA_BD_VSIZE_SHIFT 19 + +/* AXI CDMA Specific Registers/Offsets */ +#define XILINX_CDMA_REG_SRCADDR 0x18 +#define XILINX_CDMA_REG_DSTADDR 0x20 + +/* AXI CDMA Specific Masks */ +#define XILINX_CDMA_CR_SGMODE BIT(3) + +/** + * struct xilinx_vdma_desc_hw - Hardware Descriptor + * @next_desc: Next Descriptor Pointer @0x00 + * @pad1: Reserved @0x04 + * @buf_addr: Buffer address @0x08 + * @buf_addr_msb: MSB of Buffer address @0x0C + * @vsize: Vertical Size @0x10 + * @hsize: Horizontal Size @0x14 + * @stride: Number of bytes between the first + * pixels of each horizontal line @0x18 + */ +struct xilinx_vdma_desc_hw { + u32 next_desc; + u32 pad1; + u32 buf_addr; + u32 buf_addr_msb; + u32 vsize; + u32 hsize; + u32 stride; +} __aligned(64); + +/** + * struct xilinx_axidma_desc_hw - Hardware Descriptor for AXI DMA + * @next_desc: Next Descriptor Pointer @0x00 + * @next_desc_msb: MSB of Next Descriptor Pointer @0x04 + * @buf_addr: Buffer address @0x08 + * @buf_addr_msb: MSB of Buffer address @0x0C + * @pad1: Reserved @0x10 + * @pad2: Reserved @0x14 + * @control: Control field @0x18 + * @status: Status field @0x1C + * @app: APP Fields @0x20 - 0x30 + */ +struct xilinx_axidma_desc_hw { + u32 next_desc; + u32 next_desc_msb; + u32 buf_addr; + u32 buf_addr_msb; + u32 mcdma_control; + u32 vsize_stride; + u32 control; + u32 status; + u32 app[XILINX_DMA_NUM_APP_WORDS]; +} __aligned(64); + +/** + * struct xilinx_cdma_desc_hw - Hardware Descriptor + * @next_desc: Next Descriptor Pointer @0x00 + * @next_descmsb: Next Descriptor Pointer MSB @0x04 + * @src_addr: Source address @0x08 + * @src_addrmsb: Source address MSB @0x0C + * @dest_addr: Destination address @0x10 + * @dest_addrmsb: Destination address MSB @0x14 + * @control: Control field @0x18 + * @status: Status field @0x1C + */ +struct xilinx_cdma_desc_hw { + u32 next_desc; + u32 next_desc_msb; + u32 src_addr; + u32 src_addr_msb; + u32 dest_addr; + u32 dest_addr_msb; + u32 control; + u32 status; +} __aligned(64); + +/** + * struct xilinx_vdma_tx_segment - Descriptor segment + * @hw: Hardware descriptor + * @node: Node in the descriptor segments list + * @phys: Physical address of segment + */ +struct xilinx_vdma_tx_segment { + struct xilinx_vdma_desc_hw hw; + struct list_head node; + dma_addr_t phys; +} __aligned(64); + +/** + * struct xilinx_axidma_tx_segment - Descriptor segment + * @hw: Hardware descriptor + * @node: Node in the descriptor segments list + * @phys: Physical address of segment + */ +struct xilinx_axidma_tx_segment { + struct xilinx_axidma_desc_hw hw; + struct list_head node; + dma_addr_t phys; +} __aligned(64); + +/** + * struct xilinx_cdma_tx_segment - Descriptor segment + * @hw: Hardware descriptor + * @node: Node in the descriptor segments list + * @phys: Physical address of segment + */ +struct xilinx_cdma_tx_segment { + struct xilinx_cdma_desc_hw hw; + struct list_head node; + dma_addr_t phys; +} __aligned(64); + +/** + * struct xilinx_dma_tx_descriptor - Per Transaction structure + * @async_tx: Async transaction descriptor + * @segments: TX segments list + * @node: Node in the channel descriptors list + * @cyclic: Check for cyclic transfers. + */ +struct xilinx_dma_tx_descriptor { + struct dma_async_tx_descriptor async_tx; + struct list_head segments; + struct list_head node; + bool cyclic; +}; + +/** + * struct xilinx_dma_chan - Driver specific DMA channel structure + * @xdev: Driver specific device structure + * @ctrl_offset: Control registers offset + * @desc_offset: TX descriptor registers offset + * @lock: Descriptor operation lock + * @pending_list: Descriptors waiting + * @active_list: Descriptors ready to submit + * @done_list: Complete descriptors + * @common: DMA common channel + * @desc_pool: Descriptors pool + * @dev: The dma device + * @irq: Channel IRQ + * @id: Channel ID + * @direction: Transfer direction + * @num_frms: Number of frames + * @has_sg: Support scatter transfers + * @cyclic: Check for cyclic transfers. + * @genlock: Support genlock mode + * @err: Channel has errors + * @tasklet: Cleanup work after irq + * @config: Device configuration info + * @flush_on_fsync: Flush on Frame sync + * @desc_pendingcount: Descriptor pending count + * @ext_addr: Indicates 64 bit addressing is supported by dma channel + * @desc_submitcount: Descriptor h/w submitted count + * @residue: Residue for AXI DMA + * @seg_v: Statically allocated segments base + * @cyclic_seg_v: Statically allocated segment base for cyclic transfers + * @start_transfer: Differentiate b/w DMA IP's transfer + */ +struct xilinx_dma_chan { + struct xilinx_dma_device *xdev; + u32 ctrl_offset; + u32 desc_offset; + spinlock_t lock; + struct list_head pending_list; + struct list_head active_list; + struct list_head done_list; + struct dma_chan common; + struct dma_pool *desc_pool; + struct device *dev; + int irq; + int id; + enum dma_transfer_direction direction; + int num_frms; + bool has_sg; + bool cyclic; + bool genlock; + bool err; + struct tasklet_struct tasklet; + struct xilinx_vdma_config config; + bool flush_on_fsync; + u32 desc_pendingcount; + bool ext_addr; + u32 desc_submitcount; + u32 residue; + struct xilinx_axidma_tx_segment *seg_v; + struct xilinx_axidma_tx_segment *cyclic_seg_v; + void (*start_transfer)(struct xilinx_dma_chan *chan); + u16 tdest; +}; + +struct xilinx_dma_config { + enum xdma_ip_type dmatype; + int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk, + struct clk **tx_clk, struct clk **txs_clk, + struct clk **rx_clk, struct clk **rxs_clk); +}; + +/** + * struct xilinx_dma_device - DMA device structure + * @regs: I/O mapped base address + * @dev: Device Structure + * @common: DMA device structure + * @chan: Driver specific DMA channel + * @has_sg: Specifies whether Scatter-Gather is present or not + * @mcdma: Specifies whether Multi-Channel is present or not + * @flush_on_fsync: Flush on frame sync + * @ext_addr: Indicates 64 bit addressing is supported by dma device + * @pdev: Platform device structure pointer + * @dma_config: DMA config structure + * @axi_clk: DMA Axi4-lite interace clock + * @tx_clk: DMA mm2s clock + * @txs_clk: DMA mm2s stream clock + * @rx_clk: DMA s2mm clock + * @rxs_clk: DMA s2mm stream clock + * @nr_channels: Number of channels DMA device supports + * @chan_id: DMA channel identifier + */ +struct xilinx_dma_device { + void __iomem *regs; + struct device *dev; + struct dma_device common; + struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE]; + bool has_sg; + bool mcdma; + u32 flush_on_fsync; + bool ext_addr; + struct platform_device *pdev; + const struct xilinx_dma_config *dma_config; + struct clk *axi_clk; + struct clk *tx_clk; + struct clk *txs_clk; + struct clk *rx_clk; + struct clk *rxs_clk; + u32 nr_channels; + u32 chan_id; +}; + +/* Macros */ +#define to_xilinx_chan(chan) \ + container_of(chan, struct xilinx_dma_chan, common) +#define to_dma_tx_descriptor(tx) \ + container_of(tx, struct xilinx_dma_tx_descriptor, async_tx) +#define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \ + readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \ + cond, delay_us, timeout_us) + +/* IO accessors */ +static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg) +{ + return ioread32(chan->xdev->regs + reg); +} + +static inline void dma_write(struct xilinx_dma_chan *chan, u32 reg, u32 value) +{ + iowrite32(value, chan->xdev->regs + reg); +} + +static inline void vdma_desc_write(struct xilinx_dma_chan *chan, u32 reg, + u32 value) +{ + dma_write(chan, chan->desc_offset + reg, value); +} + +static inline u32 dma_ctrl_read(struct xilinx_dma_chan *chan, u32 reg) +{ + return dma_read(chan, chan->ctrl_offset + reg); +} + +static inline void dma_ctrl_write(struct xilinx_dma_chan *chan, u32 reg, + u32 value) +{ + dma_write(chan, chan->ctrl_offset + reg, value); +} + +static inline void dma_ctrl_clr(struct xilinx_dma_chan *chan, u32 reg, + u32 clr) +{ + dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) & ~clr); +} + +static inline void dma_ctrl_set(struct xilinx_dma_chan *chan, u32 reg, + u32 set) +{ + dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) | set); +} + +/** + * vdma_desc_write_64 - 64-bit descriptor write + * @chan: Driver specific VDMA channel + * @reg: Register to write + * @value_lsb: lower address of the descriptor. + * @value_msb: upper address of the descriptor. + * + * Since vdma driver is trying to write to a register offset which is not a + * multiple of 64 bits(ex : 0x5c), we are writing as two separate 32 bits + * instead of a single 64 bit register write. + */ +static inline void vdma_desc_write_64(struct xilinx_dma_chan *chan, u32 reg, + u32 value_lsb, u32 value_msb) +{ + /* Write the lsb 32 bits*/ + writel(value_lsb, chan->xdev->regs + chan->desc_offset + reg); + + /* Write the msb 32 bits */ + writel(value_msb, chan->xdev->regs + chan->desc_offset + reg + 4); +} + +static inline void dma_writeq(struct xilinx_dma_chan *chan, u32 reg, u64 value) +{ + lo_hi_writeq(value, chan->xdev->regs + chan->ctrl_offset + reg); +} + +static inline void xilinx_write(struct xilinx_dma_chan *chan, u32 reg, + dma_addr_t addr) +{ + if (chan->ext_addr) + dma_writeq(chan, reg, addr); + else + dma_ctrl_write(chan, reg, addr); +} + +static inline void xilinx_axidma_buf(struct xilinx_dma_chan *chan, + struct xilinx_axidma_desc_hw *hw, + dma_addr_t buf_addr, size_t sg_used, + size_t period_len) +{ + if (chan->ext_addr) { + hw->buf_addr = lower_32_bits(buf_addr + sg_used + period_len); + hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used + + period_len); + } else { + hw->buf_addr = buf_addr + sg_used + period_len; + } +} + +/* ----------------------------------------------------------------------------- + * Descriptors and segments alloc and free + */ + +/** + * xilinx_vdma_alloc_tx_segment - Allocate transaction segment + * @chan: Driver specific DMA channel + * + * Return: The allocated segment on success and NULL on failure. + */ +static struct xilinx_vdma_tx_segment * +xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan *chan) +{ + struct xilinx_vdma_tx_segment *segment; + dma_addr_t phys; + + segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys); + if (!segment) + return NULL; + + segment->phys = phys; + + return segment; +} + +/** + * xilinx_cdma_alloc_tx_segment - Allocate transaction segment + * @chan: Driver specific DMA channel + * + * Return: The allocated segment on success and NULL on failure. + */ +static struct xilinx_cdma_tx_segment * +xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan) +{ + struct xilinx_cdma_tx_segment *segment; + dma_addr_t phys; + + segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys); + if (!segment) + return NULL; + + segment->phys = phys; + + return segment; +} + +/** + * xilinx_axidma_alloc_tx_segment - Allocate transaction segment + * @chan: Driver specific DMA channel + * + * Return: The allocated segment on success and NULL on failure. + */ +static struct xilinx_axidma_tx_segment * +xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan) +{ + struct xilinx_axidma_tx_segment *segment; + dma_addr_t phys; + + segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys); + if (!segment) + return NULL; + + segment->phys = phys; + + return segment; +} + +/** + * xilinx_dma_free_tx_segment - Free transaction segment + * @chan: Driver specific DMA channel + * @segment: DMA transaction segment + */ +static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan, + struct xilinx_axidma_tx_segment *segment) +{ + dma_pool_free(chan->desc_pool, segment, segment->phys); +} + +/** + * xilinx_cdma_free_tx_segment - Free transaction segment + * @chan: Driver specific DMA channel + * @segment: DMA transaction segment + */ +static void xilinx_cdma_free_tx_segment(struct xilinx_dma_chan *chan, + struct xilinx_cdma_tx_segment *segment) +{ + dma_pool_free(chan->desc_pool, segment, segment->phys); +} + +/** + * xilinx_vdma_free_tx_segment - Free transaction segment + * @chan: Driver specific DMA channel + * @segment: DMA transaction segment + */ +static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan *chan, + struct xilinx_vdma_tx_segment *segment) +{ + dma_pool_free(chan->desc_pool, segment, segment->phys); +} + +/** + * xilinx_dma_tx_descriptor - Allocate transaction descriptor + * @chan: Driver specific DMA channel + * + * Return: The allocated descriptor on success and NULL on failure. + */ +static struct xilinx_dma_tx_descriptor * +xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan *chan) +{ + struct xilinx_dma_tx_descriptor *desc; + + desc = kzalloc(sizeof(*desc), GFP_KERNEL); + if (!desc) + return NULL; + + INIT_LIST_HEAD(&desc->segments); + + return desc; +} + +/** + * xilinx_dma_free_tx_descriptor - Free transaction descriptor + * @chan: Driver specific DMA channel + * @desc: DMA transaction descriptor + */ +static void +xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan, + struct xilinx_dma_tx_descriptor *desc) +{ + struct xilinx_vdma_tx_segment *segment, *next; + struct xilinx_cdma_tx_segment *cdma_segment, *cdma_next; + struct xilinx_axidma_tx_segment *axidma_segment, *axidma_next; + + if (!desc) + return; + + if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { + list_for_each_entry_safe(segment, next, &desc->segments, node) { + list_del(&segment->node); + xilinx_vdma_free_tx_segment(chan, segment); + } + } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { + list_for_each_entry_safe(cdma_segment, cdma_next, + &desc->segments, node) { + list_del(&cdma_segment->node); + xilinx_cdma_free_tx_segment(chan, cdma_segment); + } + } else { + list_for_each_entry_safe(axidma_segment, axidma_next, + &desc->segments, node) { + list_del(&axidma_segment->node); + xilinx_dma_free_tx_segment(chan, axidma_segment); + } + } + + kfree(desc); +} + +/* Required functions */ + +/** + * xilinx_dma_free_desc_list - Free descriptors list + * @chan: Driver specific DMA channel + * @list: List to parse and delete the descriptor + */ +static void xilinx_dma_free_desc_list(struct xilinx_dma_chan *chan, + struct list_head *list) +{ + struct xilinx_dma_tx_descriptor *desc, *next; + + list_for_each_entry_safe(desc, next, list, node) { + list_del(&desc->node); + xilinx_dma_free_tx_descriptor(chan, desc); + } +} + +/** + * xilinx_dma_free_descriptors - Free channel descriptors + * @chan: Driver specific DMA channel + */ +static void xilinx_dma_free_descriptors(struct xilinx_dma_chan *chan) +{ + unsigned long flags; + + spin_lock_irqsave(&chan->lock, flags); + + xilinx_dma_free_desc_list(chan, &chan->pending_list); + xilinx_dma_free_desc_list(chan, &chan->done_list); + xilinx_dma_free_desc_list(chan, &chan->active_list); + + spin_unlock_irqrestore(&chan->lock, flags); +} + +/** + * xilinx_dma_free_chan_resources - Free channel resources + * @dchan: DMA channel + */ +static void xilinx_dma_free_chan_resources(struct dma_chan *dchan) +{ + struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); + + dev_dbg(chan->dev, "Free all channel resources.\n"); + + xilinx_dma_free_descriptors(chan); + if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { + xilinx_dma_free_tx_segment(chan, chan->cyclic_seg_v); + xilinx_dma_free_tx_segment(chan, chan->seg_v); + } + dma_pool_destroy(chan->desc_pool); + chan->desc_pool = NULL; +} + +/** + * xilinx_dma_chan_handle_cyclic - Cyclic dma callback + * @chan: Driver specific dma channel + * @desc: dma transaction descriptor + * @flags: flags for spin lock + */ +static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan *chan, + struct xilinx_dma_tx_descriptor *desc, + unsigned long *flags) +{ + dma_async_tx_callback callback; + void *callback_param; + + callback = desc->async_tx.callback; + callback_param = desc->async_tx.callback_param; + if (callback) { + spin_unlock_irqrestore(&chan->lock, *flags); + callback(callback_param); + spin_lock_irqsave(&chan->lock, *flags); + } +} + +/** + * xilinx_dma_chan_desc_cleanup - Clean channel descriptors + * @chan: Driver specific DMA channel + */ +static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan) +{ + struct xilinx_dma_tx_descriptor *desc, *next; + unsigned long flags; + + spin_lock_irqsave(&chan->lock, flags); + + list_for_each_entry_safe(desc, next, &chan->done_list, node) { + dma_async_tx_callback callback; + void *callback_param; + + if (desc->cyclic) { + xilinx_dma_chan_handle_cyclic(chan, desc, &flags); + break; + } + + /* Remove from the list of running transactions */ + list_del(&desc->node); + + /* Run the link descriptor callback function */ + callback = desc->async_tx.callback; + callback_param = desc->async_tx.callback_param; + if (callback) { + spin_unlock_irqrestore(&chan->lock, flags); + callback(callback_param); + spin_lock_irqsave(&chan->lock, flags); + } + + /* Run any dependencies, then free the descriptor */ + dma_run_dependencies(&desc->async_tx); + xilinx_dma_free_tx_descriptor(chan, desc); + } + + spin_unlock_irqrestore(&chan->lock, flags); +} + +/** + * xilinx_dma_do_tasklet - Schedule completion tasklet + * @data: Pointer to the Xilinx DMA channel structure + */ +static void xilinx_dma_do_tasklet(unsigned long data) +{ + struct xilinx_dma_chan *chan = (struct xilinx_dma_chan *)data; + + xilinx_dma_chan_desc_cleanup(chan); +} + +/** + * xilinx_dma_alloc_chan_resources - Allocate channel resources + * @dchan: DMA channel + * + * Return: '0' on success and failure value on error + */ +static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan) +{ + struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); + + /* Has this channel already been allocated? */ + if (chan->desc_pool) + return 0; + + /* + * We need the descriptor to be aligned to 64bytes + * for meeting Xilinx VDMA specification requirement. + */ + if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { + chan->desc_pool = dma_pool_create("xilinx_dma_desc_pool", + chan->dev, + sizeof(struct xilinx_axidma_tx_segment), + __alignof__(struct xilinx_axidma_tx_segment), + 0); + } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { + chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool", + chan->dev, + sizeof(struct xilinx_cdma_tx_segment), + __alignof__(struct xilinx_cdma_tx_segment), + 0); + } else { + chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool", + chan->dev, + sizeof(struct xilinx_vdma_tx_segment), + __alignof__(struct xilinx_vdma_tx_segment), + 0); + } + + if (!chan->desc_pool) { + dev_err(chan->dev, + "unable to allocate channel %d descriptor pool\n", + chan->id); + return -ENOMEM; + } + + if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { + /* + * For AXI DMA case after submitting a pending_list, keep + * an extra segment allocated so that the "next descriptor" + * pointer on the tail descriptor always points to a + * valid descriptor, even when paused after reaching taildesc. + * This way, it is possible to issue additional + * transfers without halting and restarting the channel. + */ + chan->seg_v = xilinx_axidma_alloc_tx_segment(chan); + + /* + * For cyclic DMA mode we need to program the tail Descriptor + * register with a value which is not a part of the BD chain + * so allocating a desc segment during channel allocation for + * programming tail descriptor. + */ + chan->cyclic_seg_v = xilinx_axidma_alloc_tx_segment(chan); + } + + dma_cookie_init(dchan); + + if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { + /* For AXI DMA resetting once channel will reset the + * other channel as well so enable the interrupts here. + */ + dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, + XILINX_DMA_DMAXR_ALL_IRQ_MASK); + } + + if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg) + dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, + XILINX_CDMA_CR_SGMODE); + + return 0; +} + +/** + * xilinx_dma_tx_status - Get DMA transaction status + * @dchan: DMA channel + * @cookie: Transaction identifier + * @txstate: Transaction state + * + * Return: DMA transaction status + */ +static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan, + dma_cookie_t cookie, + struct dma_tx_state *txstate) +{ + struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); + struct xilinx_dma_tx_descriptor *desc; + struct xilinx_axidma_tx_segment *segment; + struct xilinx_axidma_desc_hw *hw; + enum dma_status ret; + unsigned long flags; + u32 residue = 0; + + ret = dma_cookie_status(dchan, cookie, txstate); + if (ret == DMA_COMPLETE || !txstate) + return ret; + + if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { + spin_lock_irqsave(&chan->lock, flags); + + desc = list_last_entry(&chan->active_list, + struct xilinx_dma_tx_descriptor, node); + if (chan->has_sg) { + list_for_each_entry(segment, &desc->segments, node) { + hw = &segment->hw; + residue += (hw->control - hw->status) & + XILINX_DMA_MAX_TRANS_LEN; + } + } + spin_unlock_irqrestore(&chan->lock, flags); + + chan->residue = residue; + dma_set_residue(txstate, chan->residue); + } + + return ret; +} + +/** + * xilinx_dma_is_running - Check if DMA channel is running + * @chan: Driver specific DMA channel + * + * Return: '1' if running, '0' if not. + */ +static bool xilinx_dma_is_running(struct xilinx_dma_chan *chan) +{ + return !(dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) & + XILINX_DMA_DMASR_HALTED) && + (dma_ctrl_read(chan, XILINX_DMA_REG_DMACR) & + XILINX_DMA_DMACR_RUNSTOP); +} + +/** + * xilinx_dma_is_idle - Check if DMA channel is idle + * @chan: Driver specific DMA channel + * + * Return: '1' if idle, '0' if not. + */ +static bool xilinx_dma_is_idle(struct xilinx_dma_chan *chan) +{ + return dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) & + XILINX_DMA_DMASR_IDLE; +} + +/** + * xilinx_dma_halt - Halt DMA channel + * @chan: Driver specific DMA channel + */ +static void xilinx_dma_halt(struct xilinx_dma_chan *chan) +{ + int err; + u32 val; + + dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP); + + /* Wait for the hardware to halt */ + err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val, + (val & XILINX_DMA_DMASR_HALTED), 0, + XILINX_DMA_LOOP_COUNT); + + if (err) { + dev_err(chan->dev, "Cannot stop channel %p: %x\n", + chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR)); + chan->err = true; + } +} + +/** + * xilinx_dma_start - Start DMA channel + * @chan: Driver specific DMA channel + */ +static void xilinx_dma_start(struct xilinx_dma_chan *chan) +{ + int err; + u32 val; + + dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP); + + /* Wait for the hardware to start */ + err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val, + !(val & XILINX_DMA_DMASR_HALTED), 0, + XILINX_DMA_LOOP_COUNT); + + if (err) { + dev_err(chan->dev, "Cannot start channel %p: %x\n", + chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR)); + + chan->err = true; + } +} + +/** + * xilinx_vdma_start_transfer - Starts VDMA transfer + * @chan: Driver specific channel struct pointer + */ +static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) +{ + struct xilinx_vdma_config *config = &chan->config; + struct xilinx_dma_tx_descriptor *desc, *tail_desc; + u32 reg; + struct xilinx_vdma_tx_segment *tail_segment; + + /* This function was invoked with lock held */ + if (chan->err) + return; + + if (list_empty(&chan->pending_list)) + return; + + desc = list_first_entry(&chan->pending_list, + struct xilinx_dma_tx_descriptor, node); + tail_desc = list_last_entry(&chan->pending_list, + struct xilinx_dma_tx_descriptor, node); + + tail_segment = list_last_entry(&tail_desc->segments, + struct xilinx_vdma_tx_segment, node); + + /* If it is SG mode and hardware is busy, cannot submit */ + if (chan->has_sg && xilinx_dma_is_running(chan) && + !xilinx_dma_is_idle(chan)) { + dev_dbg(chan->dev, "DMA controller still busy\n"); + return; + } + + /* + * If hardware is idle, then all descriptors on the running lists are + * done, start new transfers + */ + if (chan->has_sg) + dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, + desc->async_tx.phys); + + /* Configure the hardware using info in the config structure */ + reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); + + if (config->frm_cnt_en) + reg |= XILINX_DMA_DMACR_FRAMECNT_EN; + else + reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN; + + /* Configure channel to allow number frame buffers */ + dma_ctrl_write(chan, XILINX_DMA_REG_FRMSTORE, + chan->desc_pendingcount); + + /* + * With SG, start with circular mode, so that BDs can be fetched. + * In direct register mode, if not parking, enable circular mode + */ + if (chan->has_sg || !config->park) + reg |= XILINX_DMA_DMACR_CIRC_EN; + + if (config->park) + reg &= ~XILINX_DMA_DMACR_CIRC_EN; + + dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); + + if (config->park && (config->park_frm >= 0) && + (config->park_frm < chan->num_frms)) { + if (chan->direction == DMA_MEM_TO_DEV) + dma_write(chan, XILINX_DMA_REG_PARK_PTR, + config->park_frm << + XILINX_DMA_PARK_PTR_RD_REF_SHIFT); + else + dma_write(chan, XILINX_DMA_REG_PARK_PTR, + config->park_frm << + XILINX_DMA_PARK_PTR_WR_REF_SHIFT); + } + + /* Start the hardware */ + xilinx_dma_start(chan); + + if (chan->err) + return; + + /* Start the transfer */ + if (chan->has_sg) { + dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, + tail_segment->phys); + } else { + struct xilinx_vdma_tx_segment *segment, *last = NULL; + int i = 0; + + if (chan->desc_submitcount < chan->num_frms) + i = chan->desc_submitcount; + + list_for_each_entry(segment, &desc->segments, node) { + if (chan->ext_addr) + vdma_desc_write_64(chan, + XILINX_VDMA_REG_START_ADDRESS_64(i++), + segment->hw.buf_addr, + segment->hw.buf_addr_msb); + else + vdma_desc_write(chan, + XILINX_VDMA_REG_START_ADDRESS(i++), + segment->hw.buf_addr); + + last = segment; + } + + if (!last) + return; + + /* HW expects these parameters to be same for one transaction */ + vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize); + vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE, + last->hw.stride); + vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize); + } + + if (!chan->has_sg) { + list_del(&desc->node); + list_add_tail(&desc->node, &chan->active_list); + chan->desc_submitcount++; + chan->desc_pendingcount--; + if (chan->desc_submitcount == chan->num_frms) + chan->desc_submitcount = 0; + } else { + list_splice_tail_init(&chan->pending_list, &chan->active_list); + chan->desc_pendingcount = 0; + } +} + +/** + * xilinx_cdma_start_transfer - Starts cdma transfer + * @chan: Driver specific channel struct pointer + */ +static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan) +{ + struct xilinx_dma_tx_descriptor *head_desc, *tail_desc; + struct xilinx_cdma_tx_segment *tail_segment; + u32 ctrl_reg = dma_read(chan, XILINX_DMA_REG_DMACR); + + if (chan->err) + return; + + if (list_empty(&chan->pending_list)) + return; + + head_desc = list_first_entry(&chan->pending_list, + struct xilinx_dma_tx_descriptor, node); + tail_desc = list_last_entry(&chan->pending_list, + struct xilinx_dma_tx_descriptor, node); + tail_segment = list_last_entry(&tail_desc->segments, + struct xilinx_cdma_tx_segment, node); + + if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) { + ctrl_reg &= ~XILINX_DMA_CR_COALESCE_MAX; + ctrl_reg |= chan->desc_pendingcount << + XILINX_DMA_CR_COALESCE_SHIFT; + dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, ctrl_reg); + } + + if (chan->has_sg) { + xilinx_write(chan, XILINX_DMA_REG_CURDESC, + head_desc->async_tx.phys); + + /* Update tail ptr register which will start the transfer */ + xilinx_write(chan, XILINX_DMA_REG_TAILDESC, + tail_segment->phys); + } else { + /* In simple mode */ + struct xilinx_cdma_tx_segment *segment; + struct xilinx_cdma_desc_hw *hw; + + segment = list_first_entry(&head_desc->segments, + struct xilinx_cdma_tx_segment, + node); + + hw = &segment->hw; + + xilinx_write(chan, XILINX_CDMA_REG_SRCADDR, hw->src_addr); + xilinx_write(chan, XILINX_CDMA_REG_DSTADDR, hw->dest_addr); + + /* Start the transfer */ + dma_ctrl_write(chan, XILINX_DMA_REG_BTT, + hw->control & XILINX_DMA_MAX_TRANS_LEN); + } + + list_splice_tail_init(&chan->pending_list, &chan->active_list); + chan->desc_pendingcount = 0; +} + +/** + * xilinx_dma_start_transfer - Starts DMA transfer + * @chan: Driver specific channel struct pointer + */ +static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan) +{ + struct xilinx_dma_tx_descriptor *head_desc, *tail_desc; + struct xilinx_axidma_tx_segment *tail_segment, *old_head, *new_head; + u32 reg; + + if (chan->err) + return; + + if (list_empty(&chan->pending_list)) + return; + + /* If it is SG mode and hardware is busy, cannot submit */ + if (chan->has_sg && xilinx_dma_is_running(chan) && + !xilinx_dma_is_idle(chan)) { + dev_dbg(chan->dev, "DMA controller still busy\n"); + return; + } + + head_desc = list_first_entry(&chan->pending_list, + struct xilinx_dma_tx_descriptor, node); + tail_desc = list_last_entry(&chan->pending_list, + struct xilinx_dma_tx_descriptor, node); + tail_segment = list_last_entry(&tail_desc->segments, + struct xilinx_axidma_tx_segment, node); + + if (chan->has_sg && !chan->xdev->mcdma) { + old_head = list_first_entry(&head_desc->segments, + struct xilinx_axidma_tx_segment, node); + new_head = chan->seg_v; + /* Copy Buffer Descriptor fields. */ + new_head->hw = old_head->hw; + + /* Swap and save new reserve */ + list_replace_init(&old_head->node, &new_head->node); + chan->seg_v = old_head; + + tail_segment->hw.next_desc = chan->seg_v->phys; + head_desc->async_tx.phys = new_head->phys; + } + + reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); + + if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) { + reg &= ~XILINX_DMA_CR_COALESCE_MAX; + reg |= chan->desc_pendingcount << + XILINX_DMA_CR_COALESCE_SHIFT; + dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); + } + + if (chan->has_sg && !chan->xdev->mcdma) + xilinx_write(chan, XILINX_DMA_REG_CURDESC, + head_desc->async_tx.phys); + + if (chan->has_sg && chan->xdev->mcdma) { + if (chan->direction == DMA_MEM_TO_DEV) { + dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, + head_desc->async_tx.phys); + } else { + if (!chan->tdest) { + dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, + head_desc->async_tx.phys); + } else { + dma_ctrl_write(chan, + XILINX_DMA_MCRX_CDESC(chan->tdest), + head_desc->async_tx.phys); + } + } + } + + xilinx_dma_start(chan); + + if (chan->err) + return; + + /* Start the transfer */ + if (chan->has_sg && !chan->xdev->mcdma) { + if (chan->cyclic) + xilinx_write(chan, XILINX_DMA_REG_TAILDESC, + chan->cyclic_seg_v->phys); + else + xilinx_write(chan, XILINX_DMA_REG_TAILDESC, + tail_segment->phys); + } else if (chan->has_sg && chan->xdev->mcdma) { + if (chan->direction == DMA_MEM_TO_DEV) { + dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, + tail_segment->phys); + } else { + if (!chan->tdest) { + dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, + tail_segment->phys); + } else { + dma_ctrl_write(chan, + XILINX_DMA_MCRX_TDESC(chan->tdest), + tail_segment->phys); + } + } + } else { + struct xilinx_axidma_tx_segment *segment; + struct xilinx_axidma_desc_hw *hw; + + segment = list_first_entry(&head_desc->segments, + struct xilinx_axidma_tx_segment, + node); + hw = &segment->hw; + + xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR, hw->buf_addr); + + /* Start the transfer */ + dma_ctrl_write(chan, XILINX_DMA_REG_BTT, + hw->control & XILINX_DMA_MAX_TRANS_LEN); + } + + list_splice_tail_init(&chan->pending_list, &chan->active_list); + chan->desc_pendingcount = 0; +} + +/** + * xilinx_dma_issue_pending - Issue pending transactions + * @dchan: DMA channel + */ +static void xilinx_dma_issue_pending(struct dma_chan *dchan) +{ + struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); + unsigned long flags; + + spin_lock_irqsave(&chan->lock, flags); + chan->start_transfer(chan); + spin_unlock_irqrestore(&chan->lock, flags); +} + +/** + * xilinx_dma_complete_descriptor - Mark the active descriptor as complete + * @chan : xilinx DMA channel + * + * CONTEXT: hardirq + */ +static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan) +{ + struct xilinx_dma_tx_descriptor *desc, *next; + + /* This function was invoked with lock held */ + if (list_empty(&chan->active_list)) + return; + + list_for_each_entry_safe(desc, next, &chan->active_list, node) { + list_del(&desc->node); + if (!desc->cyclic) + dma_cookie_complete(&desc->async_tx); + list_add_tail(&desc->node, &chan->done_list); + } +} + +/** + * xilinx_dma_reset - Reset DMA channel + * @chan: Driver specific DMA channel + * + * Return: '0' on success and failure value on error + */ +static int xilinx_dma_reset(struct xilinx_dma_chan *chan) +{ + int err; + u32 tmp; + + dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RESET); + + /* Wait for the hardware to finish reset */ + err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMACR, tmp, + !(tmp & XILINX_DMA_DMACR_RESET), 0, + XILINX_DMA_LOOP_COUNT); + + if (err) { + dev_err(chan->dev, "reset timeout, cr %x, sr %x\n", + dma_ctrl_read(chan, XILINX_DMA_REG_DMACR), + dma_ctrl_read(chan, XILINX_DMA_REG_DMASR)); + return -ETIMEDOUT; + } + + chan->err = false; + + return err; +} + +/** + * xilinx_dma_chan_reset - Reset DMA channel and enable interrupts + * @chan: Driver specific DMA channel + * + * Return: '0' on success and failure value on error + */ +static int xilinx_dma_chan_reset(struct xilinx_dma_chan *chan) +{ + int err; + + /* Reset VDMA */ + err = xilinx_dma_reset(chan); + if (err) + return err; + + /* Enable interrupts */ + dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, + XILINX_DMA_DMAXR_ALL_IRQ_MASK); + + return 0; +} + +/** + * xilinx_dma_irq_handler - DMA Interrupt handler + * @irq: IRQ number + * @data: Pointer to the Xilinx DMA channel structure + * + * Return: IRQ_HANDLED/IRQ_NONE + */ +static irqreturn_t xilinx_dma_irq_handler(int irq, void *data) +{ + struct xilinx_dma_chan *chan = data; + u32 status; + + /* Read the status and ack the interrupts. */ + status = dma_ctrl_read(chan, XILINX_DMA_REG_DMASR); + if (!(status & XILINX_DMA_DMAXR_ALL_IRQ_MASK)) + return IRQ_NONE; + + dma_ctrl_write(chan, XILINX_DMA_REG_DMASR, + status & XILINX_DMA_DMAXR_ALL_IRQ_MASK); + + if (status & XILINX_DMA_DMASR_ERR_IRQ) { + /* + * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the + * error is recoverable, ignore it. Otherwise flag the error. + * + * Only recoverable errors can be cleared in the DMASR register, + * make sure not to write to other error bits to 1. + */ + u32 errors = status & XILINX_DMA_DMASR_ALL_ERR_MASK; + + dma_ctrl_write(chan, XILINX_DMA_REG_DMASR, + errors & XILINX_DMA_DMASR_ERR_RECOVER_MASK); + + if (!chan->flush_on_fsync || + (errors & ~XILINX_DMA_DMASR_ERR_RECOVER_MASK)) { + dev_err(chan->dev, + "Channel %p has errors %x, cdr %x tdr %x\n", + chan, errors, + dma_ctrl_read(chan, XILINX_DMA_REG_CURDESC), + dma_ctrl_read(chan, XILINX_DMA_REG_TAILDESC)); + chan->err = true; + } + } + + if (status & XILINX_DMA_DMASR_DLY_CNT_IRQ) { + /* + * Device takes too long to do the transfer when user requires + * responsiveness. + */ + dev_dbg(chan->dev, "Inter-packet latency too long\n"); + } + + if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) { + spin_lock(&chan->lock); + xilinx_dma_complete_descriptor(chan); + chan->start_transfer(chan); + spin_unlock(&chan->lock); + } + + tasklet_schedule(&chan->tasklet); + return IRQ_HANDLED; +} + +/** + * append_desc_queue - Queuing descriptor + * @chan: Driver specific dma channel + * @desc: dma transaction descriptor + */ +static void append_desc_queue(struct xilinx_dma_chan *chan, + struct xilinx_dma_tx_descriptor *desc) +{ + struct xilinx_vdma_tx_segment *tail_segment; + struct xilinx_dma_tx_descriptor *tail_desc; + struct xilinx_axidma_tx_segment *axidma_tail_segment; + struct xilinx_cdma_tx_segment *cdma_tail_segment; + + if (list_empty(&chan->pending_list)) + goto append; + + /* + * Add the hardware descriptor to the chain of hardware descriptors + * that already exists in memory. + */ + tail_desc = list_last_entry(&chan->pending_list, + struct xilinx_dma_tx_descriptor, node); + if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { + tail_segment = list_last_entry(&tail_desc->segments, + struct xilinx_vdma_tx_segment, + node); + tail_segment->hw.next_desc = (u32)desc->async_tx.phys; + } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { + cdma_tail_segment = list_last_entry(&tail_desc->segments, + struct xilinx_cdma_tx_segment, + node); + cdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys; + } else { + axidma_tail_segment = list_last_entry(&tail_desc->segments, + struct xilinx_axidma_tx_segment, + node); + axidma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys; + } + + /* + * Add the software descriptor and all children to the list + * of pending transactions + */ +append: + list_add_tail(&desc->node, &chan->pending_list); + chan->desc_pendingcount++; + + if (chan->has_sg && (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) + && unlikely(chan->desc_pendingcount > chan->num_frms)) { + dev_dbg(chan->dev, "desc pendingcount is too high\n"); + chan->desc_pendingcount = chan->num_frms; + } +} + +/** + * xilinx_dma_tx_submit - Submit DMA transaction + * @tx: Async transaction descriptor + * + * Return: cookie value on success and failure value on error + */ +static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx) +{ + struct xilinx_dma_tx_descriptor *desc = to_dma_tx_descriptor(tx); + struct xilinx_dma_chan *chan = to_xilinx_chan(tx->chan); + dma_cookie_t cookie; + unsigned long flags; + int err; + + if (chan->cyclic) { + xilinx_dma_free_tx_descriptor(chan, desc); + return -EBUSY; + } + + if (chan->err) { + /* + * If reset fails, need to hard reset the system. + * Channel is no longer functional + */ + err = xilinx_dma_chan_reset(chan); + if (err < 0) + return err; + } + + spin_lock_irqsave(&chan->lock, flags); + + cookie = dma_cookie_assign(tx); + + /* Put this transaction onto the tail of the pending queue */ + append_desc_queue(chan, desc); + + if (desc->cyclic) + chan->cyclic = true; + + spin_unlock_irqrestore(&chan->lock, flags); + + return cookie; +} + +/** + * xilinx_vdma_dma_prep_interleaved - prepare a descriptor for a + * DMA_SLAVE transaction + * @dchan: DMA channel + * @xt: Interleaved template pointer + * @flags: transfer ack flags + * + * Return: Async transaction descriptor on success and NULL on failure + */ +static struct dma_async_tx_descriptor * +xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan, + struct dma_interleaved_template *xt, + unsigned long flags) +{ + struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); + struct xilinx_dma_tx_descriptor *desc; + struct xilinx_vdma_tx_segment *segment, *prev = NULL; + struct xilinx_vdma_desc_hw *hw; + + if (!is_slave_direction(xt->dir)) + return NULL; + + if (!xt->numf || !xt->sgl[0].size) + return NULL; + + if (xt->frame_size != 1) + return NULL; + + /* Allocate a transaction descriptor. */ + desc = xilinx_dma_alloc_tx_descriptor(chan); + if (!desc) + return NULL; + + dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); + desc->async_tx.tx_submit = xilinx_dma_tx_submit; + async_tx_ack(&desc->async_tx); + + /* Allocate the link descriptor from DMA pool */ + segment = xilinx_vdma_alloc_tx_segment(chan); + if (!segment) + goto error; + + /* Fill in the hardware descriptor */ + hw = &segment->hw; + hw->vsize = xt->numf; + hw->hsize = xt->sgl[0].size; + hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) << + XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT; + hw->stride |= chan->config.frm_dly << + XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT; + + if (xt->dir != DMA_MEM_TO_DEV) { + if (chan->ext_addr) { + hw->buf_addr = lower_32_bits(xt->dst_start); + hw->buf_addr_msb = upper_32_bits(xt->dst_start); + } else { + hw->buf_addr = xt->dst_start; + } + } else { + if (chan->ext_addr) { + hw->buf_addr = lower_32_bits(xt->src_start); + hw->buf_addr_msb = upper_32_bits(xt->src_start); + } else { + hw->buf_addr = xt->src_start; + } + } + + /* Insert the segment into the descriptor segments list. */ + list_add_tail(&segment->node, &desc->segments); + + prev = segment; + + /* Link the last hardware descriptor with the first. */ + segment = list_first_entry(&desc->segments, + struct xilinx_vdma_tx_segment, node); + desc->async_tx.phys = segment->phys; + + return &desc->async_tx; + +error: + xilinx_dma_free_tx_descriptor(chan, desc); + return NULL; +} + +/** + * xilinx_cdma_prep_memcpy - prepare descriptors for a memcpy transaction + * @dchan: DMA channel + * @dma_dst: destination address + * @dma_src: source address + * @len: transfer length + * @flags: transfer ack flags + * + * Return: Async transaction descriptor on success and NULL on failure + */ +static struct dma_async_tx_descriptor * +xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst, + dma_addr_t dma_src, size_t len, unsigned long flags) +{ + struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); + struct xilinx_dma_tx_descriptor *desc; + struct xilinx_cdma_tx_segment *segment, *prev; + struct xilinx_cdma_desc_hw *hw; + + if (!len || len > XILINX_DMA_MAX_TRANS_LEN) + return NULL; + + desc = xilinx_dma_alloc_tx_descriptor(chan); + if (!desc) + return NULL; + + dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); + desc->async_tx.tx_submit = xilinx_dma_tx_submit; + + /* Allocate the link descriptor from DMA pool */ + segment = xilinx_cdma_alloc_tx_segment(chan); + if (!segment) + goto error; + + hw = &segment->hw; + hw->control = len; + hw->src_addr = dma_src; + hw->dest_addr = dma_dst; + if (chan->ext_addr) { + hw->src_addr_msb = upper_32_bits(dma_src); + hw->dest_addr_msb = upper_32_bits(dma_dst); + } + + /* Fill the previous next descriptor with current */ + prev = list_last_entry(&desc->segments, + struct xilinx_cdma_tx_segment, node); + prev->hw.next_desc = segment->phys; + + /* Insert the segment into the descriptor segments list. */ + list_add_tail(&segment->node, &desc->segments); + + prev = segment; + + /* Link the last hardware descriptor with the first. */ + segment = list_first_entry(&desc->segments, + struct xilinx_cdma_tx_segment, node); + desc->async_tx.phys = segment->phys; + prev->hw.next_desc = segment->phys; + + return &desc->async_tx; + +error: + xilinx_dma_free_tx_descriptor(chan, desc); + return NULL; +} + +/** + * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction + * @dchan: DMA channel + * @sgl: scatterlist to transfer to/from + * @sg_len: number of entries in @scatterlist + * @direction: DMA direction + * @flags: transfer ack flags + * @context: APP words of the descriptor + * + * Return: Async transaction descriptor on success and NULL on failure + */ +static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg( + struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, + enum dma_transfer_direction direction, unsigned long flags, + void *context) +{ + struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); + struct xilinx_dma_tx_descriptor *desc; + struct xilinx_axidma_tx_segment *segment = NULL, *prev = NULL; + u32 *app_w = (u32 *)context; + struct scatterlist *sg; + size_t copy; + size_t sg_used; + unsigned int i; + + if (!is_slave_direction(direction)) + return NULL; + + /* Allocate a transaction descriptor. */ + desc = xilinx_dma_alloc_tx_descriptor(chan); + if (!desc) + return NULL; + + dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); + desc->async_tx.tx_submit = xilinx_dma_tx_submit; + + /* Build transactions using information in the scatter gather list */ + for_each_sg(sgl, sg, sg_len, i) { + sg_used = 0; + + /* Loop until the entire scatterlist entry is used */ + while (sg_used < sg_dma_len(sg)) { + struct xilinx_axidma_desc_hw *hw; + + /* Get a free segment */ + segment = xilinx_axidma_alloc_tx_segment(chan); + if (!segment) + goto error; + + /* + * Calculate the maximum number of bytes to transfer, + * making sure it is less than the hw limit + */ + copy = min_t(size_t, sg_dma_len(sg) - sg_used, + XILINX_DMA_MAX_TRANS_LEN); + hw = &segment->hw; + + /* Fill in the descriptor */ + xilinx_axidma_buf(chan, hw, sg_dma_address(sg), + sg_used, 0); + + hw->control = copy; + + if (chan->direction == DMA_MEM_TO_DEV) { + if (app_w) + memcpy(hw->app, app_w, sizeof(u32) * + XILINX_DMA_NUM_APP_WORDS); + } + + if (prev) + prev->hw.next_desc = segment->phys; + + prev = segment; + sg_used += copy; + + /* + * Insert the segment into the descriptor segments + * list. + */ + list_add_tail(&segment->node, &desc->segments); + } + } + + segment = list_first_entry(&desc->segments, + struct xilinx_axidma_tx_segment, node); + desc->async_tx.phys = segment->phys; + prev->hw.next_desc = segment->phys; + + /* For the last DMA_MEM_TO_DEV transfer, set EOP */ + if (chan->direction == DMA_MEM_TO_DEV) { + segment->hw.control |= XILINX_DMA_BD_SOP; + segment = list_last_entry(&desc->segments, + struct xilinx_axidma_tx_segment, + node); + segment->hw.control |= XILINX_DMA_BD_EOP; + } + + return &desc->async_tx; + +error: + xilinx_dma_free_tx_descriptor(chan, desc); + return NULL; +} + +/** + * xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction + * @chan: DMA channel + * @sgl: scatterlist to transfer to/from + * @sg_len: number of entries in @scatterlist + * @direction: DMA direction + * @flags: transfer ack flags + */ +static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic( + struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len, + size_t period_len, enum dma_transfer_direction direction, + unsigned long flags) +{ + struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); + struct xilinx_dma_tx_descriptor *desc; + struct xilinx_axidma_tx_segment *segment, *head_segment, *prev = NULL; + size_t copy, sg_used; + unsigned int num_periods; + int i; + u32 reg; + + if (!period_len) + return NULL; + + num_periods = buf_len / period_len; + + if (!num_periods) + return NULL; + + if (!is_slave_direction(direction)) + return NULL; + + /* Allocate a transaction descriptor. */ + desc = xilinx_dma_alloc_tx_descriptor(chan); + if (!desc) + return NULL; + + chan->direction = direction; + dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); + desc->async_tx.tx_submit = xilinx_dma_tx_submit; + + for (i = 0; i < num_periods; ++i) { + sg_used = 0; + + while (sg_used < period_len) { + struct xilinx_axidma_desc_hw *hw; + + /* Get a free segment */ + segment = xilinx_axidma_alloc_tx_segment(chan); + if (!segment) + goto error; + + /* + * Calculate the maximum number of bytes to transfer, + * making sure it is less than the hw limit + */ + copy = min_t(size_t, period_len - sg_used, + XILINX_DMA_MAX_TRANS_LEN); + hw = &segment->hw; + xilinx_axidma_buf(chan, hw, buf_addr, sg_used, + period_len * i); + hw->control = copy; + + if (prev) + prev->hw.next_desc = segment->phys; + + prev = segment; + sg_used += copy; + + /* + * Insert the segment into the descriptor segments + * list. + */ + list_add_tail(&segment->node, &desc->segments); + } + } + + head_segment = list_first_entry(&desc->segments, + struct xilinx_axidma_tx_segment, node); + desc->async_tx.phys = head_segment->phys; + + desc->cyclic = true; + reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); + reg |= XILINX_DMA_CR_CYCLIC_BD_EN_MASK; + dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); + + /* For the last DMA_MEM_TO_DEV transfer, set EOP */ + if (direction == DMA_MEM_TO_DEV) { + head_segment->hw.control |= XILINX_DMA_BD_SOP; + segment = list_last_entry(&desc->segments, + struct xilinx_axidma_tx_segment, + node); + segment->hw.control |= XILINX_DMA_BD_EOP; + segment->hw.next_desc = (u32) head_segment->phys; + } + + return &desc->async_tx; + +error: + xilinx_dma_free_tx_descriptor(chan, desc); + return NULL; +} + +/** + * xilinx_dma_prep_interleaved - prepare a descriptor for a + * DMA_SLAVE transaction + * @dchan: DMA channel + * @xt: Interleaved template pointer + * @flags: transfer ack flags + * + * Return: Async transaction descriptor on success and NULL on failure + */ +static struct dma_async_tx_descriptor * +xilinx_dma_prep_interleaved(struct dma_chan *dchan, + struct dma_interleaved_template *xt, + unsigned long flags) +{ + struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); + struct xilinx_dma_tx_descriptor *desc; + struct xilinx_axidma_tx_segment *segment; + struct xilinx_axidma_desc_hw *hw; + + if (!is_slave_direction(xt->dir)) + return NULL; + + if (!xt->numf || !xt->sgl[0].size) + return NULL; + + if (xt->frame_size != 1) + return NULL; + + /* Allocate a transaction descriptor. */ + desc = xilinx_dma_alloc_tx_descriptor(chan); + if (!desc) + return NULL; + + chan->direction = xt->dir; + dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); + desc->async_tx.tx_submit = xilinx_dma_tx_submit; + + /* Get a free segment */ + segment = xilinx_axidma_alloc_tx_segment(chan); + if (!segment) + goto error; + + hw = &segment->hw; + + /* Fill in the descriptor */ + if (xt->dir != DMA_MEM_TO_DEV) + hw->buf_addr = xt->dst_start; + else + hw->buf_addr = xt->src_start; + + hw->mcdma_control = chan->tdest & XILINX_DMA_BD_TDEST_MASK; + hw->vsize_stride = (xt->numf << XILINX_DMA_BD_VSIZE_SHIFT) & + XILINX_DMA_BD_VSIZE_MASK; + hw->vsize_stride |= (xt->sgl[0].icg + xt->sgl[0].size) & + XILINX_DMA_BD_STRIDE_MASK; + hw->control = xt->sgl[0].size & XILINX_DMA_BD_HSIZE_MASK; + + /* + * Insert the segment into the descriptor segments + * list. + */ + list_add_tail(&segment->node, &desc->segments); + + + segment = list_first_entry(&desc->segments, + struct xilinx_axidma_tx_segment, node); + desc->async_tx.phys = segment->phys; + + /* For the last DMA_MEM_TO_DEV transfer, set EOP */ + if (xt->dir == DMA_MEM_TO_DEV) { + segment->hw.control |= XILINX_DMA_BD_SOP; + segment = list_last_entry(&desc->segments, + struct xilinx_axidma_tx_segment, + node); + segment->hw.control |= XILINX_DMA_BD_EOP; + } + + return &desc->async_tx; + +error: + xilinx_dma_free_tx_descriptor(chan, desc); + return NULL; +} + +/** + * xilinx_dma_terminate_all - Halt the channel and free descriptors + * @chan: Driver specific DMA Channel pointer + */ +static int xilinx_dma_terminate_all(struct dma_chan *dchan) +{ + struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); + u32 reg; + + if (chan->cyclic) + xilinx_dma_chan_reset(chan); + + /* Halt the DMA engine */ + xilinx_dma_halt(chan); + + /* Remove and free all of the descriptors in the lists */ + xilinx_dma_free_descriptors(chan); + + if (chan->cyclic) { + reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); + reg &= ~XILINX_DMA_CR_CYCLIC_BD_EN_MASK; + dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); + chan->cyclic = false; + } + + return 0; +} + +/** + * xilinx_dma_channel_set_config - Configure VDMA channel + * Run-time configuration for Axi VDMA, supports: + * . halt the channel + * . configure interrupt coalescing and inter-packet delay threshold + * . start/stop parking + * . enable genlock + * + * @dchan: DMA channel + * @cfg: VDMA device configuration pointer + * + * Return: '0' on success and failure value on error + */ +int xilinx_vdma_channel_set_config(struct dma_chan *dchan, + struct xilinx_vdma_config *cfg) +{ + struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); + u32 dmacr; + + if (cfg->reset) + return xilinx_dma_chan_reset(chan); + + dmacr = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); + + chan->config.frm_dly = cfg->frm_dly; + chan->config.park = cfg->park; + + /* genlock settings */ + chan->config.gen_lock = cfg->gen_lock; + chan->config.master = cfg->master; + + if (cfg->gen_lock && chan->genlock) { + dmacr |= XILINX_DMA_DMACR_GENLOCK_EN; + dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT; + } + + chan->config.frm_cnt_en = cfg->frm_cnt_en; + if (cfg->park) + chan->config.park_frm = cfg->park_frm; + else + chan->config.park_frm = -1; + + chan->config.coalesc = cfg->coalesc; + chan->config.delay = cfg->delay; + + if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) { + dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT; + chan->config.coalesc = cfg->coalesc; + } + + if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) { + dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT; + chan->config.delay = cfg->delay; + } + + /* FSync Source selection */ + dmacr &= ~XILINX_DMA_DMACR_FSYNCSRC_MASK; + dmacr |= cfg->ext_fsync << XILINX_DMA_DMACR_FSYNCSRC_SHIFT; + + dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, dmacr); + + return 0; +} +EXPORT_SYMBOL(xilinx_vdma_channel_set_config); + +/* ----------------------------------------------------------------------------- + * Probe and remove + */ + +/** + * xilinx_dma_chan_remove - Per Channel remove function + * @chan: Driver specific DMA channel + */ +static void xilinx_dma_chan_remove(struct xilinx_dma_chan *chan) +{ + /* Disable all interrupts */ + dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, + XILINX_DMA_DMAXR_ALL_IRQ_MASK); + + if (chan->irq > 0) + free_irq(chan->irq, chan); + + tasklet_kill(&chan->tasklet); + + list_del(&chan->common.device_node); +} + +static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk, + struct clk **tx_clk, struct clk **rx_clk, + struct clk **sg_clk, struct clk **tmp_clk) +{ + int err; + + *tmp_clk = NULL; + + *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk"); + if (IS_ERR(*axi_clk)) { + err = PTR_ERR(*axi_clk); + dev_err(&pdev->dev, "failed to get axi_aclk (%u)\n", err); + return err; + } + + *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk"); + if (IS_ERR(*tx_clk)) + *tx_clk = NULL; + + *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk"); + if (IS_ERR(*rx_clk)) + *rx_clk = NULL; + + *sg_clk = devm_clk_get(&pdev->dev, "m_axi_sg_aclk"); + if (IS_ERR(*sg_clk)) + *sg_clk = NULL; + + err = clk_prepare_enable(*axi_clk); + if (err) { + dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err); + return err; + } + + err = clk_prepare_enable(*tx_clk); + if (err) { + dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err); + goto err_disable_axiclk; + } + + err = clk_prepare_enable(*rx_clk); + if (err) { + dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err); + goto err_disable_txclk; + } + + err = clk_prepare_enable(*sg_clk); + if (err) { + dev_err(&pdev->dev, "failed to enable sg_clk (%u)\n", err); + goto err_disable_rxclk; + } + + return 0; + +err_disable_rxclk: + clk_disable_unprepare(*rx_clk); +err_disable_txclk: + clk_disable_unprepare(*tx_clk); +err_disable_axiclk: + clk_disable_unprepare(*axi_clk); + + return err; +} + +static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk, + struct clk **dev_clk, struct clk **tmp_clk, + struct clk **tmp1_clk, struct clk **tmp2_clk) +{ + int err; + + *tmp_clk = NULL; + *tmp1_clk = NULL; + *tmp2_clk = NULL; + + *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk"); + if (IS_ERR(*axi_clk)) { + err = PTR_ERR(*axi_clk); + dev_err(&pdev->dev, "failed to get axi_clk (%u)\n", err); + return err; + } + + *dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk"); + if (IS_ERR(*dev_clk)) { + err = PTR_ERR(*dev_clk); + dev_err(&pdev->dev, "failed to get dev_clk (%u)\n", err); + return err; + } + + err = clk_prepare_enable(*axi_clk); + if (err) { + dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err); + return err; + } + + err = clk_prepare_enable(*dev_clk); + if (err) { + dev_err(&pdev->dev, "failed to enable dev_clk (%u)\n", err); + goto err_disable_axiclk; + } + + return 0; + +err_disable_axiclk: + clk_disable_unprepare(*axi_clk); + + return err; +} + +static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk, + struct clk **tx_clk, struct clk **txs_clk, + struct clk **rx_clk, struct clk **rxs_clk) +{ + int err; + + *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk"); + if (IS_ERR(*axi_clk)) { + err = PTR_ERR(*axi_clk); + dev_err(&pdev->dev, "failed to get axi_aclk (%u)\n", err); + return err; + } + + *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk"); + if (IS_ERR(*tx_clk)) + *tx_clk = NULL; + + *txs_clk = devm_clk_get(&pdev->dev, "m_axis_mm2s_aclk"); + if (IS_ERR(*txs_clk)) + *txs_clk = NULL; + + *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk"); + if (IS_ERR(*rx_clk)) + *rx_clk = NULL; + + *rxs_clk = devm_clk_get(&pdev->dev, "s_axis_s2mm_aclk"); + if (IS_ERR(*rxs_clk)) + *rxs_clk = NULL; + + err = clk_prepare_enable(*axi_clk); + if (err) { + dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err); + return err; + } + + err = clk_prepare_enable(*tx_clk); + if (err) { + dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err); + goto err_disable_axiclk; + } + + err = clk_prepare_enable(*txs_clk); + if (err) { + dev_err(&pdev->dev, "failed to enable txs_clk (%u)\n", err); + goto err_disable_txclk; + } + + err = clk_prepare_enable(*rx_clk); + if (err) { + dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err); + goto err_disable_txsclk; + } + + err = clk_prepare_enable(*rxs_clk); + if (err) { + dev_err(&pdev->dev, "failed to enable rxs_clk (%u)\n", err); + goto err_disable_rxclk; + } + + return 0; + +err_disable_rxclk: + clk_disable_unprepare(*rx_clk); +err_disable_txsclk: + clk_disable_unprepare(*txs_clk); +err_disable_txclk: + clk_disable_unprepare(*tx_clk); +err_disable_axiclk: + clk_disable_unprepare(*axi_clk); + + return err; +} + +static void xdma_disable_allclks(struct xilinx_dma_device *xdev) +{ + clk_disable_unprepare(xdev->rxs_clk); + clk_disable_unprepare(xdev->rx_clk); + clk_disable_unprepare(xdev->txs_clk); + clk_disable_unprepare(xdev->tx_clk); + clk_disable_unprepare(xdev->axi_clk); +} + +/** + * xilinx_dma_chan_probe - Per Channel Probing + * It get channel features from the device tree entry and + * initialize special channel handling routines + * + * @xdev: Driver specific device structure + * @node: Device node + * + * Return: '0' on success and failure value on error + */ +static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, + struct device_node *node, int chan_id) +{ + struct xilinx_dma_chan *chan; + bool has_dre = false; + u32 value, width; + int err; + + /* Allocate and initialize the channel structure */ + chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL); + if (!chan) + return -ENOMEM; + + chan->dev = xdev->dev; + chan->xdev = xdev; + chan->has_sg = xdev->has_sg; + chan->desc_pendingcount = 0x0; + chan->ext_addr = xdev->ext_addr; + + spin_lock_init(&chan->lock); + INIT_LIST_HEAD(&chan->pending_list); + INIT_LIST_HEAD(&chan->done_list); + INIT_LIST_HEAD(&chan->active_list); + + /* Retrieve the channel properties from the device tree */ + has_dre = of_property_read_bool(node, "xlnx,include-dre"); + + chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode"); + + err = of_property_read_u32(node, "xlnx,datawidth", &value); + if (err) { + dev_err(xdev->dev, "missing xlnx,datawidth property\n"); + return err; + } + width = value >> 3; /* Convert bits to bytes */ + + /* If data width is greater than 8 bytes, DRE is not in hw */ + if (width > 8) + has_dre = false; + + if (!has_dre) + xdev->common.copy_align = fls(width - 1); + + if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel")) { + chan->direction = DMA_MEM_TO_DEV; + chan->id = chan_id; + chan->tdest = chan_id; + + chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET; + if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { + chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET; + + if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH || + xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S) + chan->flush_on_fsync = true; + } + } else if (of_device_is_compatible(node, + "xlnx,axi-vdma-s2mm-channel")) { + chan->direction = DMA_DEV_TO_MEM; + chan->id = chan_id; + chan->tdest = chan_id - xdev->nr_channels; + + chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET; + if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { + chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET; + + if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH || + xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM) + chan->flush_on_fsync = true; + } + } else { + dev_err(xdev->dev, "Invalid channel compatible node\n"); + return -EINVAL; + } + + /* Request the interrupt */ + chan->irq = irq_of_parse_and_map(node, 0); + err = request_irq(chan->irq, xilinx_dma_irq_handler, IRQF_SHARED, + "xilinx-dma-controller", chan); + if (err) { + dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq); + return err; + } + + if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) + chan->start_transfer = xilinx_dma_start_transfer; + else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) + chan->start_transfer = xilinx_cdma_start_transfer; + else + chan->start_transfer = xilinx_vdma_start_transfer; + + /* Initialize the tasklet */ + tasklet_init(&chan->tasklet, xilinx_dma_do_tasklet, + (unsigned long)chan); + + /* + * Initialize the DMA channel and add it to the DMA engine channels + * list. + */ + chan->common.device = &xdev->common; + + list_add_tail(&chan->common.device_node, &xdev->common.channels); + xdev->chan[chan->id] = chan; + + /* Reset the channel */ + err = xilinx_dma_chan_reset(chan); + if (err < 0) { + dev_err(xdev->dev, "Reset channel failed\n"); + return err; + } + + return 0; +} + +/** + * xilinx_dma_child_probe - Per child node probe + * It get number of dma-channels per child node from + * device-tree and initializes all the channels. + * + * @xdev: Driver specific device structure + * @node: Device node + * + * Return: 0 always. + */ +static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev, + struct device_node *node) { + int ret, i, nr_channels = 1; + + ret = of_property_read_u32(node, "dma-channels", &nr_channels); + if ((ret < 0) && xdev->mcdma) + dev_warn(xdev->dev, "missing dma-channels property\n"); + + for (i = 0; i < nr_channels; i++) + xilinx_dma_chan_probe(xdev, node, xdev->chan_id++); + + xdev->nr_channels += nr_channels; + + return 0; +} + +/** + * of_dma_xilinx_xlate - Translation function + * @dma_spec: Pointer to DMA specifier as found in the device tree + * @ofdma: Pointer to DMA controller data + * + * Return: DMA channel pointer on success and NULL on error + */ +static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + struct xilinx_dma_device *xdev = ofdma->of_dma_data; + int chan_id = dma_spec->args[0]; + + if (chan_id >= xdev->nr_channels || !xdev->chan[chan_id]) + return NULL; + + return dma_get_slave_channel(&xdev->chan[chan_id]->common); +} + +static const struct xilinx_dma_config axidma_config = { + .dmatype = XDMA_TYPE_AXIDMA, + .clk_init = axidma_clk_init, +}; + +static const struct xilinx_dma_config axicdma_config = { + .dmatype = XDMA_TYPE_CDMA, + .clk_init = axicdma_clk_init, +}; + +static const struct xilinx_dma_config axivdma_config = { + .dmatype = XDMA_TYPE_VDMA, + .clk_init = axivdma_clk_init, +}; + +static const struct of_device_id xilinx_dma_of_ids[] = { + { .compatible = "xlnx,axi-dma-1.00.a", .data = &axidma_config }, + { .compatible = "xlnx,axi-cdma-1.00.a", .data = &axicdma_config }, + { .compatible = "xlnx,axi-vdma-1.00.a", .data = &axivdma_config }, + {} +}; +MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids); + +/** + * xilinx_dma_probe - Driver probe function + * @pdev: Pointer to the platform_device structure + * + * Return: '0' on success and failure value on error + */ +static int xilinx_dma_probe(struct platform_device *pdev) +{ + int (*clk_init)(struct platform_device *, struct clk **, struct clk **, + struct clk **, struct clk **, struct clk **) + = axivdma_clk_init; + struct device_node *node = pdev->dev.of_node; + struct xilinx_dma_device *xdev; + struct device_node *child, *np = pdev->dev.of_node; + struct resource *io; + u32 num_frames, addr_width; + int i, err; + + /* Allocate and initialize the DMA engine structure */ + xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL); + if (!xdev) + return -ENOMEM; + + xdev->dev = &pdev->dev; + if (np) { + const struct of_device_id *match; + + match = of_match_node(xilinx_dma_of_ids, np); + if (match && match->data) { + xdev->dma_config = match->data; + clk_init = xdev->dma_config->clk_init; + } + } + + err = clk_init(pdev, &xdev->axi_clk, &xdev->tx_clk, &xdev->txs_clk, + &xdev->rx_clk, &xdev->rxs_clk); + if (err) + return err; + + /* Request and map I/O memory */ + io = platform_get_resource(pdev, IORESOURCE_MEM, 0); + xdev->regs = devm_ioremap_resource(&pdev->dev, io); + if (IS_ERR(xdev->regs)) + return PTR_ERR(xdev->regs); + + /* Retrieve the DMA engine properties from the device tree */ + xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg"); + if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) + xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma"); + + if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { + err = of_property_read_u32(node, "xlnx,num-fstores", + &num_frames); + if (err < 0) { + dev_err(xdev->dev, + "missing xlnx,num-fstores property\n"); + return err; + } + + err = of_property_read_u32(node, "xlnx,flush-fsync", + &xdev->flush_on_fsync); + if (err < 0) + dev_warn(xdev->dev, + "missing xlnx,flush-fsync property\n"); + } + + err = of_property_read_u32(node, "xlnx,addrwidth", &addr_width); + if (err < 0) + dev_warn(xdev->dev, "missing xlnx,addrwidth property\n"); + + if (addr_width > 32) + xdev->ext_addr = true; + else + xdev->ext_addr = false; + + /* Set the dma mask bits */ + dma_set_mask(xdev->dev, DMA_BIT_MASK(addr_width)); + + /* Initialize the DMA engine */ + xdev->common.dev = &pdev->dev; + + INIT_LIST_HEAD(&xdev->common.channels); + if (!(xdev->dma_config->dmatype == XDMA_TYPE_CDMA)) { + dma_cap_set(DMA_SLAVE, xdev->common.cap_mask); + dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask); + } + + xdev->common.device_alloc_chan_resources = + xilinx_dma_alloc_chan_resources; + xdev->common.device_free_chan_resources = + xilinx_dma_free_chan_resources; + xdev->common.device_terminate_all = xilinx_dma_terminate_all; + xdev->common.device_tx_status = xilinx_dma_tx_status; + xdev->common.device_issue_pending = xilinx_dma_issue_pending; + if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { + dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask); + xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg; + xdev->common.device_prep_dma_cyclic = + xilinx_dma_prep_dma_cyclic; + xdev->common.device_prep_interleaved_dma = + xilinx_dma_prep_interleaved; + /* Residue calculation is supported by only AXI DMA */ + xdev->common.residue_granularity = + DMA_RESIDUE_GRANULARITY_SEGMENT; + } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { + dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask); + xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy; + } else { + xdev->common.device_prep_interleaved_dma = + xilinx_vdma_dma_prep_interleaved; + } + + platform_set_drvdata(pdev, xdev); + + /* Initialize the channels */ + for_each_child_of_node(node, child) { + err = xilinx_dma_child_probe(xdev, child); + if (err < 0) + goto disable_clks; + } + + if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { + for (i = 0; i < xdev->nr_channels; i++) + if (xdev->chan[i]) + xdev->chan[i]->num_frms = num_frames; + } + + /* Register the DMA engine with the core */ + dma_async_device_register(&xdev->common); + + err = of_dma_controller_register(node, of_dma_xilinx_xlate, + xdev); + if (err < 0) { + dev_err(&pdev->dev, "Unable to register DMA to DT\n"); + dma_async_device_unregister(&xdev->common); + goto error; + } + + dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n"); + + return 0; + +disable_clks: + xdma_disable_allclks(xdev); +error: + for (i = 0; i < xdev->nr_channels; i++) + if (xdev->chan[i]) + xilinx_dma_chan_remove(xdev->chan[i]); + + return err; +} + +/** + * xilinx_dma_remove - Driver remove function + * @pdev: Pointer to the platform_device structure + * + * Return: Always '0' + */ +static int xilinx_dma_remove(struct platform_device *pdev) +{ + struct xilinx_dma_device *xdev = platform_get_drvdata(pdev); + int i; + + of_dma_controller_free(pdev->dev.of_node); + + dma_async_device_unregister(&xdev->common); + + for (i = 0; i < xdev->nr_channels; i++) + if (xdev->chan[i]) + xilinx_dma_chan_remove(xdev->chan[i]); + + xdma_disable_allclks(xdev); + + return 0; +} + +static struct platform_driver xilinx_vdma_driver = { + .driver = { + .name = "xilinx-vdma", + .of_match_table = xilinx_dma_of_ids, + }, + .probe = xilinx_dma_probe, + .remove = xilinx_dma_remove, +}; + +module_platform_driver(xilinx_vdma_driver); + +MODULE_AUTHOR("Xilinx, Inc."); +MODULE_DESCRIPTION("Xilinx VDMA driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c deleted file mode 100644 index 0768d9f..0000000 --- a/drivers/dma/xilinx/xilinx_vdma.c +++ /dev/null @@ -1,2684 +0,0 @@ -/* - * DMA driver for Xilinx Video DMA Engine - * - * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved. - * - * Based on the Freescale DMA driver. - * - * Description: - * The AXI Video Direct Memory Access (AXI VDMA) core is a soft Xilinx IP - * core that provides high-bandwidth direct memory access between memory - * and AXI4-Stream type video target peripherals. The core provides efficient - * two dimensional DMA operations with independent asynchronous read (S2MM) - * and write (MM2S) channel operation. It can be configured to have either - * one channel or two channels. If configured as two channels, one is to - * transmit to the video device (MM2S) and another is to receive from the - * video device (S2MM). Initialization, status, interrupt and management - * registers are accessed through an AXI4-Lite slave interface. - * - * The AXI Direct Memory Access (AXI DMA) core is a soft Xilinx IP core that - * provides high-bandwidth one dimensional direct memory access between memory - * and AXI4-Stream target peripherals. It supports one receive and one - * transmit channel, both of them optional at synthesis time. - * - * The AXI CDMA, is a soft IP, which provides high-bandwidth Direct Memory - * Access (DMA) between a memory-mapped source address and a memory-mapped - * destination address. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or - * (at your option) any later version. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "../dmaengine.h" - -/* Register/Descriptor Offsets */ -#define XILINX_DMA_MM2S_CTRL_OFFSET 0x0000 -#define XILINX_DMA_S2MM_CTRL_OFFSET 0x0030 -#define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050 -#define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0 - -/* Control Registers */ -#define XILINX_DMA_REG_DMACR 0x0000 -#define XILINX_DMA_DMACR_DELAY_MAX 0xff -#define XILINX_DMA_DMACR_DELAY_SHIFT 24 -#define XILINX_DMA_DMACR_FRAME_COUNT_MAX 0xff -#define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT 16 -#define XILINX_DMA_DMACR_ERR_IRQ BIT(14) -#define XILINX_DMA_DMACR_DLY_CNT_IRQ BIT(13) -#define XILINX_DMA_DMACR_FRM_CNT_IRQ BIT(12) -#define XILINX_DMA_DMACR_MASTER_SHIFT 8 -#define XILINX_DMA_DMACR_FSYNCSRC_SHIFT 5 -#define XILINX_DMA_DMACR_FRAMECNT_EN BIT(4) -#define XILINX_DMA_DMACR_GENLOCK_EN BIT(3) -#define XILINX_DMA_DMACR_RESET BIT(2) -#define XILINX_DMA_DMACR_CIRC_EN BIT(1) -#define XILINX_DMA_DMACR_RUNSTOP BIT(0) -#define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5) - -#define XILINX_DMA_REG_DMASR 0x0004 -#define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15) -#define XILINX_DMA_DMASR_ERR_IRQ BIT(14) -#define XILINX_DMA_DMASR_DLY_CNT_IRQ BIT(13) -#define XILINX_DMA_DMASR_FRM_CNT_IRQ BIT(12) -#define XILINX_DMA_DMASR_SOF_LATE_ERR BIT(11) -#define XILINX_DMA_DMASR_SG_DEC_ERR BIT(10) -#define XILINX_DMA_DMASR_SG_SLV_ERR BIT(9) -#define XILINX_DMA_DMASR_EOF_EARLY_ERR BIT(8) -#define XILINX_DMA_DMASR_SOF_EARLY_ERR BIT(7) -#define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6) -#define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5) -#define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4) -#define XILINX_DMA_DMASR_IDLE BIT(1) -#define XILINX_DMA_DMASR_HALTED BIT(0) -#define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24) -#define XILINX_DMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16) - -#define XILINX_DMA_REG_CURDESC 0x0008 -#define XILINX_DMA_REG_TAILDESC 0x0010 -#define XILINX_DMA_REG_REG_INDEX 0x0014 -#define XILINX_DMA_REG_FRMSTORE 0x0018 -#define XILINX_DMA_REG_THRESHOLD 0x001c -#define XILINX_DMA_REG_FRMPTR_STS 0x0024 -#define XILINX_DMA_REG_PARK_PTR 0x0028 -#define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8 -#define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0 -#define XILINX_DMA_REG_VDMA_VERSION 0x002c - -/* Register Direct Mode Registers */ -#define XILINX_DMA_REG_VSIZE 0x0000 -#define XILINX_DMA_REG_HSIZE 0x0004 - -#define XILINX_DMA_REG_FRMDLY_STRIDE 0x0008 -#define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24 -#define XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT 0 - -#define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n)) -#define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n)) - -/* HW specific definitions */ -#define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x20 - -#define XILINX_DMA_DMAXR_ALL_IRQ_MASK \ - (XILINX_DMA_DMASR_FRM_CNT_IRQ | \ - XILINX_DMA_DMASR_DLY_CNT_IRQ | \ - XILINX_DMA_DMASR_ERR_IRQ) - -#define XILINX_DMA_DMASR_ALL_ERR_MASK \ - (XILINX_DMA_DMASR_EOL_LATE_ERR | \ - XILINX_DMA_DMASR_SOF_LATE_ERR | \ - XILINX_DMA_DMASR_SG_DEC_ERR | \ - XILINX_DMA_DMASR_SG_SLV_ERR | \ - XILINX_DMA_DMASR_EOF_EARLY_ERR | \ - XILINX_DMA_DMASR_SOF_EARLY_ERR | \ - XILINX_DMA_DMASR_DMA_DEC_ERR | \ - XILINX_DMA_DMASR_DMA_SLAVE_ERR | \ - XILINX_DMA_DMASR_DMA_INT_ERR) - -/* - * Recoverable errors are DMA Internal error, SOF Early, EOF Early - * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC - * is enabled in the h/w system. - */ -#define XILINX_DMA_DMASR_ERR_RECOVER_MASK \ - (XILINX_DMA_DMASR_SOF_LATE_ERR | \ - XILINX_DMA_DMASR_EOF_EARLY_ERR | \ - XILINX_DMA_DMASR_SOF_EARLY_ERR | \ - XILINX_DMA_DMASR_DMA_INT_ERR) - -/* Axi VDMA Flush on Fsync bits */ -#define XILINX_DMA_FLUSH_S2MM 3 -#define XILINX_DMA_FLUSH_MM2S 2 -#define XILINX_DMA_FLUSH_BOTH 1 - -/* Delay loop counter to prevent hardware failure */ -#define XILINX_DMA_LOOP_COUNT 1000000 - -/* AXI DMA Specific Registers/Offsets */ -#define XILINX_DMA_REG_SRCDSTADDR 0x18 -#define XILINX_DMA_REG_BTT 0x28 - -/* AXI DMA Specific Masks/Bit fields */ -#define XILINX_DMA_MAX_TRANS_LEN GENMASK(22, 0) -#define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16) -#define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4) -#define XILINX_DMA_CR_COALESCE_SHIFT 16 -#define XILINX_DMA_BD_SOP BIT(27) -#define XILINX_DMA_BD_EOP BIT(26) -#define XILINX_DMA_COALESCE_MAX 255 -#define XILINX_DMA_NUM_APP_WORDS 5 - -/* Multi-Channel DMA Descriptor offsets*/ -#define XILINX_DMA_MCRX_CDESC(x) (0x40 + (x-1) * 0x20) -#define XILINX_DMA_MCRX_TDESC(x) (0x48 + (x-1) * 0x20) - -/* Multi-Channel DMA Masks/Shifts */ -#define XILINX_DMA_BD_HSIZE_MASK GENMASK(15, 0) -#define XILINX_DMA_BD_STRIDE_MASK GENMASK(15, 0) -#define XILINX_DMA_BD_VSIZE_MASK GENMASK(31, 19) -#define XILINX_DMA_BD_TDEST_MASK GENMASK(4, 0) -#define XILINX_DMA_BD_STRIDE_SHIFT 0 -#define XILINX_DMA_BD_VSIZE_SHIFT 19 - -/* AXI CDMA Specific Registers/Offsets */ -#define XILINX_CDMA_REG_SRCADDR 0x18 -#define XILINX_CDMA_REG_DSTADDR 0x20 - -/* AXI CDMA Specific Masks */ -#define XILINX_CDMA_CR_SGMODE BIT(3) - -/** - * struct xilinx_vdma_desc_hw - Hardware Descriptor - * @next_desc: Next Descriptor Pointer @0x00 - * @pad1: Reserved @0x04 - * @buf_addr: Buffer address @0x08 - * @buf_addr_msb: MSB of Buffer address @0x0C - * @vsize: Vertical Size @0x10 - * @hsize: Horizontal Size @0x14 - * @stride: Number of bytes between the first - * pixels of each horizontal line @0x18 - */ -struct xilinx_vdma_desc_hw { - u32 next_desc; - u32 pad1; - u32 buf_addr; - u32 buf_addr_msb; - u32 vsize; - u32 hsize; - u32 stride; -} __aligned(64); - -/** - * struct xilinx_axidma_desc_hw - Hardware Descriptor for AXI DMA - * @next_desc: Next Descriptor Pointer @0x00 - * @next_desc_msb: MSB of Next Descriptor Pointer @0x04 - * @buf_addr: Buffer address @0x08 - * @buf_addr_msb: MSB of Buffer address @0x0C - * @pad1: Reserved @0x10 - * @pad2: Reserved @0x14 - * @control: Control field @0x18 - * @status: Status field @0x1C - * @app: APP Fields @0x20 - 0x30 - */ -struct xilinx_axidma_desc_hw { - u32 next_desc; - u32 next_desc_msb; - u32 buf_addr; - u32 buf_addr_msb; - u32 mcdma_control; - u32 vsize_stride; - u32 control; - u32 status; - u32 app[XILINX_DMA_NUM_APP_WORDS]; -} __aligned(64); - -/** - * struct xilinx_cdma_desc_hw - Hardware Descriptor - * @next_desc: Next Descriptor Pointer @0x00 - * @next_descmsb: Next Descriptor Pointer MSB @0x04 - * @src_addr: Source address @0x08 - * @src_addrmsb: Source address MSB @0x0C - * @dest_addr: Destination address @0x10 - * @dest_addrmsb: Destination address MSB @0x14 - * @control: Control field @0x18 - * @status: Status field @0x1C - */ -struct xilinx_cdma_desc_hw { - u32 next_desc; - u32 next_desc_msb; - u32 src_addr; - u32 src_addr_msb; - u32 dest_addr; - u32 dest_addr_msb; - u32 control; - u32 status; -} __aligned(64); - -/** - * struct xilinx_vdma_tx_segment - Descriptor segment - * @hw: Hardware descriptor - * @node: Node in the descriptor segments list - * @phys: Physical address of segment - */ -struct xilinx_vdma_tx_segment { - struct xilinx_vdma_desc_hw hw; - struct list_head node; - dma_addr_t phys; -} __aligned(64); - -/** - * struct xilinx_axidma_tx_segment - Descriptor segment - * @hw: Hardware descriptor - * @node: Node in the descriptor segments list - * @phys: Physical address of segment - */ -struct xilinx_axidma_tx_segment { - struct xilinx_axidma_desc_hw hw; - struct list_head node; - dma_addr_t phys; -} __aligned(64); - -/** - * struct xilinx_cdma_tx_segment - Descriptor segment - * @hw: Hardware descriptor - * @node: Node in the descriptor segments list - * @phys: Physical address of segment - */ -struct xilinx_cdma_tx_segment { - struct xilinx_cdma_desc_hw hw; - struct list_head node; - dma_addr_t phys; -} __aligned(64); - -/** - * struct xilinx_dma_tx_descriptor - Per Transaction structure - * @async_tx: Async transaction descriptor - * @segments: TX segments list - * @node: Node in the channel descriptors list - * @cyclic: Check for cyclic transfers. - */ -struct xilinx_dma_tx_descriptor { - struct dma_async_tx_descriptor async_tx; - struct list_head segments; - struct list_head node; - bool cyclic; -}; - -/** - * struct xilinx_dma_chan - Driver specific DMA channel structure - * @xdev: Driver specific device structure - * @ctrl_offset: Control registers offset - * @desc_offset: TX descriptor registers offset - * @lock: Descriptor operation lock - * @pending_list: Descriptors waiting - * @active_list: Descriptors ready to submit - * @done_list: Complete descriptors - * @common: DMA common channel - * @desc_pool: Descriptors pool - * @dev: The dma device - * @irq: Channel IRQ - * @id: Channel ID - * @direction: Transfer direction - * @num_frms: Number of frames - * @has_sg: Support scatter transfers - * @cyclic: Check for cyclic transfers. - * @genlock: Support genlock mode - * @err: Channel has errors - * @tasklet: Cleanup work after irq - * @config: Device configuration info - * @flush_on_fsync: Flush on Frame sync - * @desc_pendingcount: Descriptor pending count - * @ext_addr: Indicates 64 bit addressing is supported by dma channel - * @desc_submitcount: Descriptor h/w submitted count - * @residue: Residue for AXI DMA - * @seg_v: Statically allocated segments base - * @cyclic_seg_v: Statically allocated segment base for cyclic transfers - * @start_transfer: Differentiate b/w DMA IP's transfer - */ -struct xilinx_dma_chan { - struct xilinx_dma_device *xdev; - u32 ctrl_offset; - u32 desc_offset; - spinlock_t lock; - struct list_head pending_list; - struct list_head active_list; - struct list_head done_list; - struct dma_chan common; - struct dma_pool *desc_pool; - struct device *dev; - int irq; - int id; - enum dma_transfer_direction direction; - int num_frms; - bool has_sg; - bool cyclic; - bool genlock; - bool err; - struct tasklet_struct tasklet; - struct xilinx_vdma_config config; - bool flush_on_fsync; - u32 desc_pendingcount; - bool ext_addr; - u32 desc_submitcount; - u32 residue; - struct xilinx_axidma_tx_segment *seg_v; - struct xilinx_axidma_tx_segment *cyclic_seg_v; - void (*start_transfer)(struct xilinx_dma_chan *chan); - u16 tdest; -}; - -struct xilinx_dma_config { - enum xdma_ip_type dmatype; - int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk, - struct clk **tx_clk, struct clk **txs_clk, - struct clk **rx_clk, struct clk **rxs_clk); -}; - -/** - * struct xilinx_dma_device - DMA device structure - * @regs: I/O mapped base address - * @dev: Device Structure - * @common: DMA device structure - * @chan: Driver specific DMA channel - * @has_sg: Specifies whether Scatter-Gather is present or not - * @mcdma: Specifies whether Multi-Channel is present or not - * @flush_on_fsync: Flush on frame sync - * @ext_addr: Indicates 64 bit addressing is supported by dma device - * @pdev: Platform device structure pointer - * @dma_config: DMA config structure - * @axi_clk: DMA Axi4-lite interace clock - * @tx_clk: DMA mm2s clock - * @txs_clk: DMA mm2s stream clock - * @rx_clk: DMA s2mm clock - * @rxs_clk: DMA s2mm stream clock - * @nr_channels: Number of channels DMA device supports - * @chan_id: DMA channel identifier - */ -struct xilinx_dma_device { - void __iomem *regs; - struct device *dev; - struct dma_device common; - struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE]; - bool has_sg; - bool mcdma; - u32 flush_on_fsync; - bool ext_addr; - struct platform_device *pdev; - const struct xilinx_dma_config *dma_config; - struct clk *axi_clk; - struct clk *tx_clk; - struct clk *txs_clk; - struct clk *rx_clk; - struct clk *rxs_clk; - u32 nr_channels; - u32 chan_id; -}; - -/* Macros */ -#define to_xilinx_chan(chan) \ - container_of(chan, struct xilinx_dma_chan, common) -#define to_dma_tx_descriptor(tx) \ - container_of(tx, struct xilinx_dma_tx_descriptor, async_tx) -#define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \ - readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \ - cond, delay_us, timeout_us) - -/* IO accessors */ -static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg) -{ - return ioread32(chan->xdev->regs + reg); -} - -static inline void dma_write(struct xilinx_dma_chan *chan, u32 reg, u32 value) -{ - iowrite32(value, chan->xdev->regs + reg); -} - -static inline void vdma_desc_write(struct xilinx_dma_chan *chan, u32 reg, - u32 value) -{ - dma_write(chan, chan->desc_offset + reg, value); -} - -static inline u32 dma_ctrl_read(struct xilinx_dma_chan *chan, u32 reg) -{ - return dma_read(chan, chan->ctrl_offset + reg); -} - -static inline void dma_ctrl_write(struct xilinx_dma_chan *chan, u32 reg, - u32 value) -{ - dma_write(chan, chan->ctrl_offset + reg, value); -} - -static inline void dma_ctrl_clr(struct xilinx_dma_chan *chan, u32 reg, - u32 clr) -{ - dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) & ~clr); -} - -static inline void dma_ctrl_set(struct xilinx_dma_chan *chan, u32 reg, - u32 set) -{ - dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) | set); -} - -/** - * vdma_desc_write_64 - 64-bit descriptor write - * @chan: Driver specific VDMA channel - * @reg: Register to write - * @value_lsb: lower address of the descriptor. - * @value_msb: upper address of the descriptor. - * - * Since vdma driver is trying to write to a register offset which is not a - * multiple of 64 bits(ex : 0x5c), we are writing as two separate 32 bits - * instead of a single 64 bit register write. - */ -static inline void vdma_desc_write_64(struct xilinx_dma_chan *chan, u32 reg, - u32 value_lsb, u32 value_msb) -{ - /* Write the lsb 32 bits*/ - writel(value_lsb, chan->xdev->regs + chan->desc_offset + reg); - - /* Write the msb 32 bits */ - writel(value_msb, chan->xdev->regs + chan->desc_offset + reg + 4); -} - -static inline void dma_writeq(struct xilinx_dma_chan *chan, u32 reg, u64 value) -{ - lo_hi_writeq(value, chan->xdev->regs + chan->ctrl_offset + reg); -} - -static inline void xilinx_write(struct xilinx_dma_chan *chan, u32 reg, - dma_addr_t addr) -{ - if (chan->ext_addr) - dma_writeq(chan, reg, addr); - else - dma_ctrl_write(chan, reg, addr); -} - -static inline void xilinx_axidma_buf(struct xilinx_dma_chan *chan, - struct xilinx_axidma_desc_hw *hw, - dma_addr_t buf_addr, size_t sg_used, - size_t period_len) -{ - if (chan->ext_addr) { - hw->buf_addr = lower_32_bits(buf_addr + sg_used + period_len); - hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used + - period_len); - } else { - hw->buf_addr = buf_addr + sg_used + period_len; - } -} - -/* ----------------------------------------------------------------------------- - * Descriptors and segments alloc and free - */ - -/** - * xilinx_vdma_alloc_tx_segment - Allocate transaction segment - * @chan: Driver specific DMA channel - * - * Return: The allocated segment on success and NULL on failure. - */ -static struct xilinx_vdma_tx_segment * -xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan *chan) -{ - struct xilinx_vdma_tx_segment *segment; - dma_addr_t phys; - - segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys); - if (!segment) - return NULL; - - segment->phys = phys; - - return segment; -} - -/** - * xilinx_cdma_alloc_tx_segment - Allocate transaction segment - * @chan: Driver specific DMA channel - * - * Return: The allocated segment on success and NULL on failure. - */ -static struct xilinx_cdma_tx_segment * -xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan) -{ - struct xilinx_cdma_tx_segment *segment; - dma_addr_t phys; - - segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys); - if (!segment) - return NULL; - - segment->phys = phys; - - return segment; -} - -/** - * xilinx_axidma_alloc_tx_segment - Allocate transaction segment - * @chan: Driver specific DMA channel - * - * Return: The allocated segment on success and NULL on failure. - */ -static struct xilinx_axidma_tx_segment * -xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan) -{ - struct xilinx_axidma_tx_segment *segment; - dma_addr_t phys; - - segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys); - if (!segment) - return NULL; - - segment->phys = phys; - - return segment; -} - -/** - * xilinx_dma_free_tx_segment - Free transaction segment - * @chan: Driver specific DMA channel - * @segment: DMA transaction segment - */ -static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan, - struct xilinx_axidma_tx_segment *segment) -{ - dma_pool_free(chan->desc_pool, segment, segment->phys); -} - -/** - * xilinx_cdma_free_tx_segment - Free transaction segment - * @chan: Driver specific DMA channel - * @segment: DMA transaction segment - */ -static void xilinx_cdma_free_tx_segment(struct xilinx_dma_chan *chan, - struct xilinx_cdma_tx_segment *segment) -{ - dma_pool_free(chan->desc_pool, segment, segment->phys); -} - -/** - * xilinx_vdma_free_tx_segment - Free transaction segment - * @chan: Driver specific DMA channel - * @segment: DMA transaction segment - */ -static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan *chan, - struct xilinx_vdma_tx_segment *segment) -{ - dma_pool_free(chan->desc_pool, segment, segment->phys); -} - -/** - * xilinx_dma_tx_descriptor - Allocate transaction descriptor - * @chan: Driver specific DMA channel - * - * Return: The allocated descriptor on success and NULL on failure. - */ -static struct xilinx_dma_tx_descriptor * -xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan *chan) -{ - struct xilinx_dma_tx_descriptor *desc; - - desc = kzalloc(sizeof(*desc), GFP_KERNEL); - if (!desc) - return NULL; - - INIT_LIST_HEAD(&desc->segments); - - return desc; -} - -/** - * xilinx_dma_free_tx_descriptor - Free transaction descriptor - * @chan: Driver specific DMA channel - * @desc: DMA transaction descriptor - */ -static void -xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan, - struct xilinx_dma_tx_descriptor *desc) -{ - struct xilinx_vdma_tx_segment *segment, *next; - struct xilinx_cdma_tx_segment *cdma_segment, *cdma_next; - struct xilinx_axidma_tx_segment *axidma_segment, *axidma_next; - - if (!desc) - return; - - if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { - list_for_each_entry_safe(segment, next, &desc->segments, node) { - list_del(&segment->node); - xilinx_vdma_free_tx_segment(chan, segment); - } - } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { - list_for_each_entry_safe(cdma_segment, cdma_next, - &desc->segments, node) { - list_del(&cdma_segment->node); - xilinx_cdma_free_tx_segment(chan, cdma_segment); - } - } else { - list_for_each_entry_safe(axidma_segment, axidma_next, - &desc->segments, node) { - list_del(&axidma_segment->node); - xilinx_dma_free_tx_segment(chan, axidma_segment); - } - } - - kfree(desc); -} - -/* Required functions */ - -/** - * xilinx_dma_free_desc_list - Free descriptors list - * @chan: Driver specific DMA channel - * @list: List to parse and delete the descriptor - */ -static void xilinx_dma_free_desc_list(struct xilinx_dma_chan *chan, - struct list_head *list) -{ - struct xilinx_dma_tx_descriptor *desc, *next; - - list_for_each_entry_safe(desc, next, list, node) { - list_del(&desc->node); - xilinx_dma_free_tx_descriptor(chan, desc); - } -} - -/** - * xilinx_dma_free_descriptors - Free channel descriptors - * @chan: Driver specific DMA channel - */ -static void xilinx_dma_free_descriptors(struct xilinx_dma_chan *chan) -{ - unsigned long flags; - - spin_lock_irqsave(&chan->lock, flags); - - xilinx_dma_free_desc_list(chan, &chan->pending_list); - xilinx_dma_free_desc_list(chan, &chan->done_list); - xilinx_dma_free_desc_list(chan, &chan->active_list); - - spin_unlock_irqrestore(&chan->lock, flags); -} - -/** - * xilinx_dma_free_chan_resources - Free channel resources - * @dchan: DMA channel - */ -static void xilinx_dma_free_chan_resources(struct dma_chan *dchan) -{ - struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); - - dev_dbg(chan->dev, "Free all channel resources.\n"); - - xilinx_dma_free_descriptors(chan); - if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { - xilinx_dma_free_tx_segment(chan, chan->cyclic_seg_v); - xilinx_dma_free_tx_segment(chan, chan->seg_v); - } - dma_pool_destroy(chan->desc_pool); - chan->desc_pool = NULL; -} - -/** - * xilinx_dma_chan_handle_cyclic - Cyclic dma callback - * @chan: Driver specific dma channel - * @desc: dma transaction descriptor - * @flags: flags for spin lock - */ -static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan *chan, - struct xilinx_dma_tx_descriptor *desc, - unsigned long *flags) -{ - dma_async_tx_callback callback; - void *callback_param; - - callback = desc->async_tx.callback; - callback_param = desc->async_tx.callback_param; - if (callback) { - spin_unlock_irqrestore(&chan->lock, *flags); - callback(callback_param); - spin_lock_irqsave(&chan->lock, *flags); - } -} - -/** - * xilinx_dma_chan_desc_cleanup - Clean channel descriptors - * @chan: Driver specific DMA channel - */ -static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan) -{ - struct xilinx_dma_tx_descriptor *desc, *next; - unsigned long flags; - - spin_lock_irqsave(&chan->lock, flags); - - list_for_each_entry_safe(desc, next, &chan->done_list, node) { - dma_async_tx_callback callback; - void *callback_param; - - if (desc->cyclic) { - xilinx_dma_chan_handle_cyclic(chan, desc, &flags); - break; - } - - /* Remove from the list of running transactions */ - list_del(&desc->node); - - /* Run the link descriptor callback function */ - callback = desc->async_tx.callback; - callback_param = desc->async_tx.callback_param; - if (callback) { - spin_unlock_irqrestore(&chan->lock, flags); - callback(callback_param); - spin_lock_irqsave(&chan->lock, flags); - } - - /* Run any dependencies, then free the descriptor */ - dma_run_dependencies(&desc->async_tx); - xilinx_dma_free_tx_descriptor(chan, desc); - } - - spin_unlock_irqrestore(&chan->lock, flags); -} - -/** - * xilinx_dma_do_tasklet - Schedule completion tasklet - * @data: Pointer to the Xilinx DMA channel structure - */ -static void xilinx_dma_do_tasklet(unsigned long data) -{ - struct xilinx_dma_chan *chan = (struct xilinx_dma_chan *)data; - - xilinx_dma_chan_desc_cleanup(chan); -} - -/** - * xilinx_dma_alloc_chan_resources - Allocate channel resources - * @dchan: DMA channel - * - * Return: '0' on success and failure value on error - */ -static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan) -{ - struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); - - /* Has this channel already been allocated? */ - if (chan->desc_pool) - return 0; - - /* - * We need the descriptor to be aligned to 64bytes - * for meeting Xilinx VDMA specification requirement. - */ - if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { - chan->desc_pool = dma_pool_create("xilinx_dma_desc_pool", - chan->dev, - sizeof(struct xilinx_axidma_tx_segment), - __alignof__(struct xilinx_axidma_tx_segment), - 0); - } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { - chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool", - chan->dev, - sizeof(struct xilinx_cdma_tx_segment), - __alignof__(struct xilinx_cdma_tx_segment), - 0); - } else { - chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool", - chan->dev, - sizeof(struct xilinx_vdma_tx_segment), - __alignof__(struct xilinx_vdma_tx_segment), - 0); - } - - if (!chan->desc_pool) { - dev_err(chan->dev, - "unable to allocate channel %d descriptor pool\n", - chan->id); - return -ENOMEM; - } - - if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { - /* - * For AXI DMA case after submitting a pending_list, keep - * an extra segment allocated so that the "next descriptor" - * pointer on the tail descriptor always points to a - * valid descriptor, even when paused after reaching taildesc. - * This way, it is possible to issue additional - * transfers without halting and restarting the channel. - */ - chan->seg_v = xilinx_axidma_alloc_tx_segment(chan); - - /* - * For cyclic DMA mode we need to program the tail Descriptor - * register with a value which is not a part of the BD chain - * so allocating a desc segment during channel allocation for - * programming tail descriptor. - */ - chan->cyclic_seg_v = xilinx_axidma_alloc_tx_segment(chan); - } - - dma_cookie_init(dchan); - - if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { - /* For AXI DMA resetting once channel will reset the - * other channel as well so enable the interrupts here. - */ - dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, - XILINX_DMA_DMAXR_ALL_IRQ_MASK); - } - - if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg) - dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, - XILINX_CDMA_CR_SGMODE); - - return 0; -} - -/** - * xilinx_dma_tx_status - Get DMA transaction status - * @dchan: DMA channel - * @cookie: Transaction identifier - * @txstate: Transaction state - * - * Return: DMA transaction status - */ -static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan, - dma_cookie_t cookie, - struct dma_tx_state *txstate) -{ - struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); - struct xilinx_dma_tx_descriptor *desc; - struct xilinx_axidma_tx_segment *segment; - struct xilinx_axidma_desc_hw *hw; - enum dma_status ret; - unsigned long flags; - u32 residue = 0; - - ret = dma_cookie_status(dchan, cookie, txstate); - if (ret == DMA_COMPLETE || !txstate) - return ret; - - if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { - spin_lock_irqsave(&chan->lock, flags); - - desc = list_last_entry(&chan->active_list, - struct xilinx_dma_tx_descriptor, node); - if (chan->has_sg) { - list_for_each_entry(segment, &desc->segments, node) { - hw = &segment->hw; - residue += (hw->control - hw->status) & - XILINX_DMA_MAX_TRANS_LEN; - } - } - spin_unlock_irqrestore(&chan->lock, flags); - - chan->residue = residue; - dma_set_residue(txstate, chan->residue); - } - - return ret; -} - -/** - * xilinx_dma_is_running - Check if DMA channel is running - * @chan: Driver specific DMA channel - * - * Return: '1' if running, '0' if not. - */ -static bool xilinx_dma_is_running(struct xilinx_dma_chan *chan) -{ - return !(dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) & - XILINX_DMA_DMASR_HALTED) && - (dma_ctrl_read(chan, XILINX_DMA_REG_DMACR) & - XILINX_DMA_DMACR_RUNSTOP); -} - -/** - * xilinx_dma_is_idle - Check if DMA channel is idle - * @chan: Driver specific DMA channel - * - * Return: '1' if idle, '0' if not. - */ -static bool xilinx_dma_is_idle(struct xilinx_dma_chan *chan) -{ - return dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) & - XILINX_DMA_DMASR_IDLE; -} - -/** - * xilinx_dma_halt - Halt DMA channel - * @chan: Driver specific DMA channel - */ -static void xilinx_dma_halt(struct xilinx_dma_chan *chan) -{ - int err; - u32 val; - - dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP); - - /* Wait for the hardware to halt */ - err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val, - (val & XILINX_DMA_DMASR_HALTED), 0, - XILINX_DMA_LOOP_COUNT); - - if (err) { - dev_err(chan->dev, "Cannot stop channel %p: %x\n", - chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR)); - chan->err = true; - } -} - -/** - * xilinx_dma_start - Start DMA channel - * @chan: Driver specific DMA channel - */ -static void xilinx_dma_start(struct xilinx_dma_chan *chan) -{ - int err; - u32 val; - - dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP); - - /* Wait for the hardware to start */ - err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val, - !(val & XILINX_DMA_DMASR_HALTED), 0, - XILINX_DMA_LOOP_COUNT); - - if (err) { - dev_err(chan->dev, "Cannot start channel %p: %x\n", - chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR)); - - chan->err = true; - } -} - -/** - * xilinx_vdma_start_transfer - Starts VDMA transfer - * @chan: Driver specific channel struct pointer - */ -static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) -{ - struct xilinx_vdma_config *config = &chan->config; - struct xilinx_dma_tx_descriptor *desc, *tail_desc; - u32 reg; - struct xilinx_vdma_tx_segment *tail_segment; - - /* This function was invoked with lock held */ - if (chan->err) - return; - - if (list_empty(&chan->pending_list)) - return; - - desc = list_first_entry(&chan->pending_list, - struct xilinx_dma_tx_descriptor, node); - tail_desc = list_last_entry(&chan->pending_list, - struct xilinx_dma_tx_descriptor, node); - - tail_segment = list_last_entry(&tail_desc->segments, - struct xilinx_vdma_tx_segment, node); - - /* If it is SG mode and hardware is busy, cannot submit */ - if (chan->has_sg && xilinx_dma_is_running(chan) && - !xilinx_dma_is_idle(chan)) { - dev_dbg(chan->dev, "DMA controller still busy\n"); - return; - } - - /* - * If hardware is idle, then all descriptors on the running lists are - * done, start new transfers - */ - if (chan->has_sg) - dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, - desc->async_tx.phys); - - /* Configure the hardware using info in the config structure */ - reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); - - if (config->frm_cnt_en) - reg |= XILINX_DMA_DMACR_FRAMECNT_EN; - else - reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN; - - /* Configure channel to allow number frame buffers */ - dma_ctrl_write(chan, XILINX_DMA_REG_FRMSTORE, - chan->desc_pendingcount); - - /* - * With SG, start with circular mode, so that BDs can be fetched. - * In direct register mode, if not parking, enable circular mode - */ - if (chan->has_sg || !config->park) - reg |= XILINX_DMA_DMACR_CIRC_EN; - - if (config->park) - reg &= ~XILINX_DMA_DMACR_CIRC_EN; - - dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); - - if (config->park && (config->park_frm >= 0) && - (config->park_frm < chan->num_frms)) { - if (chan->direction == DMA_MEM_TO_DEV) - dma_write(chan, XILINX_DMA_REG_PARK_PTR, - config->park_frm << - XILINX_DMA_PARK_PTR_RD_REF_SHIFT); - else - dma_write(chan, XILINX_DMA_REG_PARK_PTR, - config->park_frm << - XILINX_DMA_PARK_PTR_WR_REF_SHIFT); - } - - /* Start the hardware */ - xilinx_dma_start(chan); - - if (chan->err) - return; - - /* Start the transfer */ - if (chan->has_sg) { - dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, - tail_segment->phys); - } else { - struct xilinx_vdma_tx_segment *segment, *last = NULL; - int i = 0; - - if (chan->desc_submitcount < chan->num_frms) - i = chan->desc_submitcount; - - list_for_each_entry(segment, &desc->segments, node) { - if (chan->ext_addr) - vdma_desc_write_64(chan, - XILINX_VDMA_REG_START_ADDRESS_64(i++), - segment->hw.buf_addr, - segment->hw.buf_addr_msb); - else - vdma_desc_write(chan, - XILINX_VDMA_REG_START_ADDRESS(i++), - segment->hw.buf_addr); - - last = segment; - } - - if (!last) - return; - - /* HW expects these parameters to be same for one transaction */ - vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize); - vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE, - last->hw.stride); - vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize); - } - - if (!chan->has_sg) { - list_del(&desc->node); - list_add_tail(&desc->node, &chan->active_list); - chan->desc_submitcount++; - chan->desc_pendingcount--; - if (chan->desc_submitcount == chan->num_frms) - chan->desc_submitcount = 0; - } else { - list_splice_tail_init(&chan->pending_list, &chan->active_list); - chan->desc_pendingcount = 0; - } -} - -/** - * xilinx_cdma_start_transfer - Starts cdma transfer - * @chan: Driver specific channel struct pointer - */ -static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan) -{ - struct xilinx_dma_tx_descriptor *head_desc, *tail_desc; - struct xilinx_cdma_tx_segment *tail_segment; - u32 ctrl_reg = dma_read(chan, XILINX_DMA_REG_DMACR); - - if (chan->err) - return; - - if (list_empty(&chan->pending_list)) - return; - - head_desc = list_first_entry(&chan->pending_list, - struct xilinx_dma_tx_descriptor, node); - tail_desc = list_last_entry(&chan->pending_list, - struct xilinx_dma_tx_descriptor, node); - tail_segment = list_last_entry(&tail_desc->segments, - struct xilinx_cdma_tx_segment, node); - - if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) { - ctrl_reg &= ~XILINX_DMA_CR_COALESCE_MAX; - ctrl_reg |= chan->desc_pendingcount << - XILINX_DMA_CR_COALESCE_SHIFT; - dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, ctrl_reg); - } - - if (chan->has_sg) { - xilinx_write(chan, XILINX_DMA_REG_CURDESC, - head_desc->async_tx.phys); - - /* Update tail ptr register which will start the transfer */ - xilinx_write(chan, XILINX_DMA_REG_TAILDESC, - tail_segment->phys); - } else { - /* In simple mode */ - struct xilinx_cdma_tx_segment *segment; - struct xilinx_cdma_desc_hw *hw; - - segment = list_first_entry(&head_desc->segments, - struct xilinx_cdma_tx_segment, - node); - - hw = &segment->hw; - - xilinx_write(chan, XILINX_CDMA_REG_SRCADDR, hw->src_addr); - xilinx_write(chan, XILINX_CDMA_REG_DSTADDR, hw->dest_addr); - - /* Start the transfer */ - dma_ctrl_write(chan, XILINX_DMA_REG_BTT, - hw->control & XILINX_DMA_MAX_TRANS_LEN); - } - - list_splice_tail_init(&chan->pending_list, &chan->active_list); - chan->desc_pendingcount = 0; -} - -/** - * xilinx_dma_start_transfer - Starts DMA transfer - * @chan: Driver specific channel struct pointer - */ -static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan) -{ - struct xilinx_dma_tx_descriptor *head_desc, *tail_desc; - struct xilinx_axidma_tx_segment *tail_segment, *old_head, *new_head; - u32 reg; - - if (chan->err) - return; - - if (list_empty(&chan->pending_list)) - return; - - /* If it is SG mode and hardware is busy, cannot submit */ - if (chan->has_sg && xilinx_dma_is_running(chan) && - !xilinx_dma_is_idle(chan)) { - dev_dbg(chan->dev, "DMA controller still busy\n"); - return; - } - - head_desc = list_first_entry(&chan->pending_list, - struct xilinx_dma_tx_descriptor, node); - tail_desc = list_last_entry(&chan->pending_list, - struct xilinx_dma_tx_descriptor, node); - tail_segment = list_last_entry(&tail_desc->segments, - struct xilinx_axidma_tx_segment, node); - - if (chan->has_sg && !chan->xdev->mcdma) { - old_head = list_first_entry(&head_desc->segments, - struct xilinx_axidma_tx_segment, node); - new_head = chan->seg_v; - /* Copy Buffer Descriptor fields. */ - new_head->hw = old_head->hw; - - /* Swap and save new reserve */ - list_replace_init(&old_head->node, &new_head->node); - chan->seg_v = old_head; - - tail_segment->hw.next_desc = chan->seg_v->phys; - head_desc->async_tx.phys = new_head->phys; - } - - reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); - - if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) { - reg &= ~XILINX_DMA_CR_COALESCE_MAX; - reg |= chan->desc_pendingcount << - XILINX_DMA_CR_COALESCE_SHIFT; - dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); - } - - if (chan->has_sg && !chan->xdev->mcdma) - xilinx_write(chan, XILINX_DMA_REG_CURDESC, - head_desc->async_tx.phys); - - if (chan->has_sg && chan->xdev->mcdma) { - if (chan->direction == DMA_MEM_TO_DEV) { - dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, - head_desc->async_tx.phys); - } else { - if (!chan->tdest) { - dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, - head_desc->async_tx.phys); - } else { - dma_ctrl_write(chan, - XILINX_DMA_MCRX_CDESC(chan->tdest), - head_desc->async_tx.phys); - } - } - } - - xilinx_dma_start(chan); - - if (chan->err) - return; - - /* Start the transfer */ - if (chan->has_sg && !chan->xdev->mcdma) { - if (chan->cyclic) - xilinx_write(chan, XILINX_DMA_REG_TAILDESC, - chan->cyclic_seg_v->phys); - else - xilinx_write(chan, XILINX_DMA_REG_TAILDESC, - tail_segment->phys); - } else if (chan->has_sg && chan->xdev->mcdma) { - if (chan->direction == DMA_MEM_TO_DEV) { - dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, - tail_segment->phys); - } else { - if (!chan->tdest) { - dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, - tail_segment->phys); - } else { - dma_ctrl_write(chan, - XILINX_DMA_MCRX_TDESC(chan->tdest), - tail_segment->phys); - } - } - } else { - struct xilinx_axidma_tx_segment *segment; - struct xilinx_axidma_desc_hw *hw; - - segment = list_first_entry(&head_desc->segments, - struct xilinx_axidma_tx_segment, - node); - hw = &segment->hw; - - xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR, hw->buf_addr); - - /* Start the transfer */ - dma_ctrl_write(chan, XILINX_DMA_REG_BTT, - hw->control & XILINX_DMA_MAX_TRANS_LEN); - } - - list_splice_tail_init(&chan->pending_list, &chan->active_list); - chan->desc_pendingcount = 0; -} - -/** - * xilinx_dma_issue_pending - Issue pending transactions - * @dchan: DMA channel - */ -static void xilinx_dma_issue_pending(struct dma_chan *dchan) -{ - struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); - unsigned long flags; - - spin_lock_irqsave(&chan->lock, flags); - chan->start_transfer(chan); - spin_unlock_irqrestore(&chan->lock, flags); -} - -/** - * xilinx_dma_complete_descriptor - Mark the active descriptor as complete - * @chan : xilinx DMA channel - * - * CONTEXT: hardirq - */ -static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan) -{ - struct xilinx_dma_tx_descriptor *desc, *next; - - /* This function was invoked with lock held */ - if (list_empty(&chan->active_list)) - return; - - list_for_each_entry_safe(desc, next, &chan->active_list, node) { - list_del(&desc->node); - if (!desc->cyclic) - dma_cookie_complete(&desc->async_tx); - list_add_tail(&desc->node, &chan->done_list); - } -} - -/** - * xilinx_dma_reset - Reset DMA channel - * @chan: Driver specific DMA channel - * - * Return: '0' on success and failure value on error - */ -static int xilinx_dma_reset(struct xilinx_dma_chan *chan) -{ - int err; - u32 tmp; - - dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RESET); - - /* Wait for the hardware to finish reset */ - err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMACR, tmp, - !(tmp & XILINX_DMA_DMACR_RESET), 0, - XILINX_DMA_LOOP_COUNT); - - if (err) { - dev_err(chan->dev, "reset timeout, cr %x, sr %x\n", - dma_ctrl_read(chan, XILINX_DMA_REG_DMACR), - dma_ctrl_read(chan, XILINX_DMA_REG_DMASR)); - return -ETIMEDOUT; - } - - chan->err = false; - - return err; -} - -/** - * xilinx_dma_chan_reset - Reset DMA channel and enable interrupts - * @chan: Driver specific DMA channel - * - * Return: '0' on success and failure value on error - */ -static int xilinx_dma_chan_reset(struct xilinx_dma_chan *chan) -{ - int err; - - /* Reset VDMA */ - err = xilinx_dma_reset(chan); - if (err) - return err; - - /* Enable interrupts */ - dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, - XILINX_DMA_DMAXR_ALL_IRQ_MASK); - - return 0; -} - -/** - * xilinx_dma_irq_handler - DMA Interrupt handler - * @irq: IRQ number - * @data: Pointer to the Xilinx DMA channel structure - * - * Return: IRQ_HANDLED/IRQ_NONE - */ -static irqreturn_t xilinx_dma_irq_handler(int irq, void *data) -{ - struct xilinx_dma_chan *chan = data; - u32 status; - - /* Read the status and ack the interrupts. */ - status = dma_ctrl_read(chan, XILINX_DMA_REG_DMASR); - if (!(status & XILINX_DMA_DMAXR_ALL_IRQ_MASK)) - return IRQ_NONE; - - dma_ctrl_write(chan, XILINX_DMA_REG_DMASR, - status & XILINX_DMA_DMAXR_ALL_IRQ_MASK); - - if (status & XILINX_DMA_DMASR_ERR_IRQ) { - /* - * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the - * error is recoverable, ignore it. Otherwise flag the error. - * - * Only recoverable errors can be cleared in the DMASR register, - * make sure not to write to other error bits to 1. - */ - u32 errors = status & XILINX_DMA_DMASR_ALL_ERR_MASK; - - dma_ctrl_write(chan, XILINX_DMA_REG_DMASR, - errors & XILINX_DMA_DMASR_ERR_RECOVER_MASK); - - if (!chan->flush_on_fsync || - (errors & ~XILINX_DMA_DMASR_ERR_RECOVER_MASK)) { - dev_err(chan->dev, - "Channel %p has errors %x, cdr %x tdr %x\n", - chan, errors, - dma_ctrl_read(chan, XILINX_DMA_REG_CURDESC), - dma_ctrl_read(chan, XILINX_DMA_REG_TAILDESC)); - chan->err = true; - } - } - - if (status & XILINX_DMA_DMASR_DLY_CNT_IRQ) { - /* - * Device takes too long to do the transfer when user requires - * responsiveness. - */ - dev_dbg(chan->dev, "Inter-packet latency too long\n"); - } - - if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) { - spin_lock(&chan->lock); - xilinx_dma_complete_descriptor(chan); - chan->start_transfer(chan); - spin_unlock(&chan->lock); - } - - tasklet_schedule(&chan->tasklet); - return IRQ_HANDLED; -} - -/** - * append_desc_queue - Queuing descriptor - * @chan: Driver specific dma channel - * @desc: dma transaction descriptor - */ -static void append_desc_queue(struct xilinx_dma_chan *chan, - struct xilinx_dma_tx_descriptor *desc) -{ - struct xilinx_vdma_tx_segment *tail_segment; - struct xilinx_dma_tx_descriptor *tail_desc; - struct xilinx_axidma_tx_segment *axidma_tail_segment; - struct xilinx_cdma_tx_segment *cdma_tail_segment; - - if (list_empty(&chan->pending_list)) - goto append; - - /* - * Add the hardware descriptor to the chain of hardware descriptors - * that already exists in memory. - */ - tail_desc = list_last_entry(&chan->pending_list, - struct xilinx_dma_tx_descriptor, node); - if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { - tail_segment = list_last_entry(&tail_desc->segments, - struct xilinx_vdma_tx_segment, - node); - tail_segment->hw.next_desc = (u32)desc->async_tx.phys; - } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { - cdma_tail_segment = list_last_entry(&tail_desc->segments, - struct xilinx_cdma_tx_segment, - node); - cdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys; - } else { - axidma_tail_segment = list_last_entry(&tail_desc->segments, - struct xilinx_axidma_tx_segment, - node); - axidma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys; - } - - /* - * Add the software descriptor and all children to the list - * of pending transactions - */ -append: - list_add_tail(&desc->node, &chan->pending_list); - chan->desc_pendingcount++; - - if (chan->has_sg && (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) - && unlikely(chan->desc_pendingcount > chan->num_frms)) { - dev_dbg(chan->dev, "desc pendingcount is too high\n"); - chan->desc_pendingcount = chan->num_frms; - } -} - -/** - * xilinx_dma_tx_submit - Submit DMA transaction - * @tx: Async transaction descriptor - * - * Return: cookie value on success and failure value on error - */ -static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx) -{ - struct xilinx_dma_tx_descriptor *desc = to_dma_tx_descriptor(tx); - struct xilinx_dma_chan *chan = to_xilinx_chan(tx->chan); - dma_cookie_t cookie; - unsigned long flags; - int err; - - if (chan->cyclic) { - xilinx_dma_free_tx_descriptor(chan, desc); - return -EBUSY; - } - - if (chan->err) { - /* - * If reset fails, need to hard reset the system. - * Channel is no longer functional - */ - err = xilinx_dma_chan_reset(chan); - if (err < 0) - return err; - } - - spin_lock_irqsave(&chan->lock, flags); - - cookie = dma_cookie_assign(tx); - - /* Put this transaction onto the tail of the pending queue */ - append_desc_queue(chan, desc); - - if (desc->cyclic) - chan->cyclic = true; - - spin_unlock_irqrestore(&chan->lock, flags); - - return cookie; -} - -/** - * xilinx_vdma_dma_prep_interleaved - prepare a descriptor for a - * DMA_SLAVE transaction - * @dchan: DMA channel - * @xt: Interleaved template pointer - * @flags: transfer ack flags - * - * Return: Async transaction descriptor on success and NULL on failure - */ -static struct dma_async_tx_descriptor * -xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan, - struct dma_interleaved_template *xt, - unsigned long flags) -{ - struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); - struct xilinx_dma_tx_descriptor *desc; - struct xilinx_vdma_tx_segment *segment, *prev = NULL; - struct xilinx_vdma_desc_hw *hw; - - if (!is_slave_direction(xt->dir)) - return NULL; - - if (!xt->numf || !xt->sgl[0].size) - return NULL; - - if (xt->frame_size != 1) - return NULL; - - /* Allocate a transaction descriptor. */ - desc = xilinx_dma_alloc_tx_descriptor(chan); - if (!desc) - return NULL; - - dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); - desc->async_tx.tx_submit = xilinx_dma_tx_submit; - async_tx_ack(&desc->async_tx); - - /* Allocate the link descriptor from DMA pool */ - segment = xilinx_vdma_alloc_tx_segment(chan); - if (!segment) - goto error; - - /* Fill in the hardware descriptor */ - hw = &segment->hw; - hw->vsize = xt->numf; - hw->hsize = xt->sgl[0].size; - hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) << - XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT; - hw->stride |= chan->config.frm_dly << - XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT; - - if (xt->dir != DMA_MEM_TO_DEV) { - if (chan->ext_addr) { - hw->buf_addr = lower_32_bits(xt->dst_start); - hw->buf_addr_msb = upper_32_bits(xt->dst_start); - } else { - hw->buf_addr = xt->dst_start; - } - } else { - if (chan->ext_addr) { - hw->buf_addr = lower_32_bits(xt->src_start); - hw->buf_addr_msb = upper_32_bits(xt->src_start); - } else { - hw->buf_addr = xt->src_start; - } - } - - /* Insert the segment into the descriptor segments list. */ - list_add_tail(&segment->node, &desc->segments); - - prev = segment; - - /* Link the last hardware descriptor with the first. */ - segment = list_first_entry(&desc->segments, - struct xilinx_vdma_tx_segment, node); - desc->async_tx.phys = segment->phys; - - return &desc->async_tx; - -error: - xilinx_dma_free_tx_descriptor(chan, desc); - return NULL; -} - -/** - * xilinx_cdma_prep_memcpy - prepare descriptors for a memcpy transaction - * @dchan: DMA channel - * @dma_dst: destination address - * @dma_src: source address - * @len: transfer length - * @flags: transfer ack flags - * - * Return: Async transaction descriptor on success and NULL on failure - */ -static struct dma_async_tx_descriptor * -xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst, - dma_addr_t dma_src, size_t len, unsigned long flags) -{ - struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); - struct xilinx_dma_tx_descriptor *desc; - struct xilinx_cdma_tx_segment *segment, *prev; - struct xilinx_cdma_desc_hw *hw; - - if (!len || len > XILINX_DMA_MAX_TRANS_LEN) - return NULL; - - desc = xilinx_dma_alloc_tx_descriptor(chan); - if (!desc) - return NULL; - - dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); - desc->async_tx.tx_submit = xilinx_dma_tx_submit; - - /* Allocate the link descriptor from DMA pool */ - segment = xilinx_cdma_alloc_tx_segment(chan); - if (!segment) - goto error; - - hw = &segment->hw; - hw->control = len; - hw->src_addr = dma_src; - hw->dest_addr = dma_dst; - if (chan->ext_addr) { - hw->src_addr_msb = upper_32_bits(dma_src); - hw->dest_addr_msb = upper_32_bits(dma_dst); - } - - /* Fill the previous next descriptor with current */ - prev = list_last_entry(&desc->segments, - struct xilinx_cdma_tx_segment, node); - prev->hw.next_desc = segment->phys; - - /* Insert the segment into the descriptor segments list. */ - list_add_tail(&segment->node, &desc->segments); - - prev = segment; - - /* Link the last hardware descriptor with the first. */ - segment = list_first_entry(&desc->segments, - struct xilinx_cdma_tx_segment, node); - desc->async_tx.phys = segment->phys; - prev->hw.next_desc = segment->phys; - - return &desc->async_tx; - -error: - xilinx_dma_free_tx_descriptor(chan, desc); - return NULL; -} - -/** - * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction - * @dchan: DMA channel - * @sgl: scatterlist to transfer to/from - * @sg_len: number of entries in @scatterlist - * @direction: DMA direction - * @flags: transfer ack flags - * @context: APP words of the descriptor - * - * Return: Async transaction descriptor on success and NULL on failure - */ -static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg( - struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, - enum dma_transfer_direction direction, unsigned long flags, - void *context) -{ - struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); - struct xilinx_dma_tx_descriptor *desc; - struct xilinx_axidma_tx_segment *segment = NULL, *prev = NULL; - u32 *app_w = (u32 *)context; - struct scatterlist *sg; - size_t copy; - size_t sg_used; - unsigned int i; - - if (!is_slave_direction(direction)) - return NULL; - - /* Allocate a transaction descriptor. */ - desc = xilinx_dma_alloc_tx_descriptor(chan); - if (!desc) - return NULL; - - dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); - desc->async_tx.tx_submit = xilinx_dma_tx_submit; - - /* Build transactions using information in the scatter gather list */ - for_each_sg(sgl, sg, sg_len, i) { - sg_used = 0; - - /* Loop until the entire scatterlist entry is used */ - while (sg_used < sg_dma_len(sg)) { - struct xilinx_axidma_desc_hw *hw; - - /* Get a free segment */ - segment = xilinx_axidma_alloc_tx_segment(chan); - if (!segment) - goto error; - - /* - * Calculate the maximum number of bytes to transfer, - * making sure it is less than the hw limit - */ - copy = min_t(size_t, sg_dma_len(sg) - sg_used, - XILINX_DMA_MAX_TRANS_LEN); - hw = &segment->hw; - - /* Fill in the descriptor */ - xilinx_axidma_buf(chan, hw, sg_dma_address(sg), - sg_used, 0); - - hw->control = copy; - - if (chan->direction == DMA_MEM_TO_DEV) { - if (app_w) - memcpy(hw->app, app_w, sizeof(u32) * - XILINX_DMA_NUM_APP_WORDS); - } - - if (prev) - prev->hw.next_desc = segment->phys; - - prev = segment; - sg_used += copy; - - /* - * Insert the segment into the descriptor segments - * list. - */ - list_add_tail(&segment->node, &desc->segments); - } - } - - segment = list_first_entry(&desc->segments, - struct xilinx_axidma_tx_segment, node); - desc->async_tx.phys = segment->phys; - prev->hw.next_desc = segment->phys; - - /* For the last DMA_MEM_TO_DEV transfer, set EOP */ - if (chan->direction == DMA_MEM_TO_DEV) { - segment->hw.control |= XILINX_DMA_BD_SOP; - segment = list_last_entry(&desc->segments, - struct xilinx_axidma_tx_segment, - node); - segment->hw.control |= XILINX_DMA_BD_EOP; - } - - return &desc->async_tx; - -error: - xilinx_dma_free_tx_descriptor(chan, desc); - return NULL; -} - -/** - * xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction - * @chan: DMA channel - * @sgl: scatterlist to transfer to/from - * @sg_len: number of entries in @scatterlist - * @direction: DMA direction - * @flags: transfer ack flags - */ -static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic( - struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len, - size_t period_len, enum dma_transfer_direction direction, - unsigned long flags) -{ - struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); - struct xilinx_dma_tx_descriptor *desc; - struct xilinx_axidma_tx_segment *segment, *head_segment, *prev = NULL; - size_t copy, sg_used; - unsigned int num_periods; - int i; - u32 reg; - - if (!period_len) - return NULL; - - num_periods = buf_len / period_len; - - if (!num_periods) - return NULL; - - if (!is_slave_direction(direction)) - return NULL; - - /* Allocate a transaction descriptor. */ - desc = xilinx_dma_alloc_tx_descriptor(chan); - if (!desc) - return NULL; - - chan->direction = direction; - dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); - desc->async_tx.tx_submit = xilinx_dma_tx_submit; - - for (i = 0; i < num_periods; ++i) { - sg_used = 0; - - while (sg_used < period_len) { - struct xilinx_axidma_desc_hw *hw; - - /* Get a free segment */ - segment = xilinx_axidma_alloc_tx_segment(chan); - if (!segment) - goto error; - - /* - * Calculate the maximum number of bytes to transfer, - * making sure it is less than the hw limit - */ - copy = min_t(size_t, period_len - sg_used, - XILINX_DMA_MAX_TRANS_LEN); - hw = &segment->hw; - xilinx_axidma_buf(chan, hw, buf_addr, sg_used, - period_len * i); - hw->control = copy; - - if (prev) - prev->hw.next_desc = segment->phys; - - prev = segment; - sg_used += copy; - - /* - * Insert the segment into the descriptor segments - * list. - */ - list_add_tail(&segment->node, &desc->segments); - } - } - - head_segment = list_first_entry(&desc->segments, - struct xilinx_axidma_tx_segment, node); - desc->async_tx.phys = head_segment->phys; - - desc->cyclic = true; - reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); - reg |= XILINX_DMA_CR_CYCLIC_BD_EN_MASK; - dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); - - /* For the last DMA_MEM_TO_DEV transfer, set EOP */ - if (direction == DMA_MEM_TO_DEV) { - head_segment->hw.control |= XILINX_DMA_BD_SOP; - segment = list_last_entry(&desc->segments, - struct xilinx_axidma_tx_segment, - node); - segment->hw.control |= XILINX_DMA_BD_EOP; - segment->hw.next_desc = (u32) head_segment->phys; - } - - return &desc->async_tx; - -error: - xilinx_dma_free_tx_descriptor(chan, desc); - return NULL; -} - -/** - * xilinx_dma_prep_interleaved - prepare a descriptor for a - * DMA_SLAVE transaction - * @dchan: DMA channel - * @xt: Interleaved template pointer - * @flags: transfer ack flags - * - * Return: Async transaction descriptor on success and NULL on failure - */ -static struct dma_async_tx_descriptor * -xilinx_dma_prep_interleaved(struct dma_chan *dchan, - struct dma_interleaved_template *xt, - unsigned long flags) -{ - struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); - struct xilinx_dma_tx_descriptor *desc; - struct xilinx_axidma_tx_segment *segment; - struct xilinx_axidma_desc_hw *hw; - - if (!is_slave_direction(xt->dir)) - return NULL; - - if (!xt->numf || !xt->sgl[0].size) - return NULL; - - if (xt->frame_size != 1) - return NULL; - - /* Allocate a transaction descriptor. */ - desc = xilinx_dma_alloc_tx_descriptor(chan); - if (!desc) - return NULL; - - chan->direction = xt->dir; - dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); - desc->async_tx.tx_submit = xilinx_dma_tx_submit; - - /* Get a free segment */ - segment = xilinx_axidma_alloc_tx_segment(chan); - if (!segment) - goto error; - - hw = &segment->hw; - - /* Fill in the descriptor */ - if (xt->dir != DMA_MEM_TO_DEV) - hw->buf_addr = xt->dst_start; - else - hw->buf_addr = xt->src_start; - - hw->mcdma_control = chan->tdest & XILINX_DMA_BD_TDEST_MASK; - hw->vsize_stride = (xt->numf << XILINX_DMA_BD_VSIZE_SHIFT) & - XILINX_DMA_BD_VSIZE_MASK; - hw->vsize_stride |= (xt->sgl[0].icg + xt->sgl[0].size) & - XILINX_DMA_BD_STRIDE_MASK; - hw->control = xt->sgl[0].size & XILINX_DMA_BD_HSIZE_MASK; - - /* - * Insert the segment into the descriptor segments - * list. - */ - list_add_tail(&segment->node, &desc->segments); - - - segment = list_first_entry(&desc->segments, - struct xilinx_axidma_tx_segment, node); - desc->async_tx.phys = segment->phys; - - /* For the last DMA_MEM_TO_DEV transfer, set EOP */ - if (xt->dir == DMA_MEM_TO_DEV) { - segment->hw.control |= XILINX_DMA_BD_SOP; - segment = list_last_entry(&desc->segments, - struct xilinx_axidma_tx_segment, - node); - segment->hw.control |= XILINX_DMA_BD_EOP; - } - - return &desc->async_tx; - -error: - xilinx_dma_free_tx_descriptor(chan, desc); - return NULL; -} - -/** - * xilinx_dma_terminate_all - Halt the channel and free descriptors - * @chan: Driver specific DMA Channel pointer - */ -static int xilinx_dma_terminate_all(struct dma_chan *dchan) -{ - struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); - u32 reg; - - if (chan->cyclic) - xilinx_dma_chan_reset(chan); - - /* Halt the DMA engine */ - xilinx_dma_halt(chan); - - /* Remove and free all of the descriptors in the lists */ - xilinx_dma_free_descriptors(chan); - - if (chan->cyclic) { - reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); - reg &= ~XILINX_DMA_CR_CYCLIC_BD_EN_MASK; - dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); - chan->cyclic = false; - } - - return 0; -} - -/** - * xilinx_dma_channel_set_config - Configure VDMA channel - * Run-time configuration for Axi VDMA, supports: - * . halt the channel - * . configure interrupt coalescing and inter-packet delay threshold - * . start/stop parking - * . enable genlock - * - * @dchan: DMA channel - * @cfg: VDMA device configuration pointer - * - * Return: '0' on success and failure value on error - */ -int xilinx_vdma_channel_set_config(struct dma_chan *dchan, - struct xilinx_vdma_config *cfg) -{ - struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); - u32 dmacr; - - if (cfg->reset) - return xilinx_dma_chan_reset(chan); - - dmacr = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); - - chan->config.frm_dly = cfg->frm_dly; - chan->config.park = cfg->park; - - /* genlock settings */ - chan->config.gen_lock = cfg->gen_lock; - chan->config.master = cfg->master; - - if (cfg->gen_lock && chan->genlock) { - dmacr |= XILINX_DMA_DMACR_GENLOCK_EN; - dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT; - } - - chan->config.frm_cnt_en = cfg->frm_cnt_en; - if (cfg->park) - chan->config.park_frm = cfg->park_frm; - else - chan->config.park_frm = -1; - - chan->config.coalesc = cfg->coalesc; - chan->config.delay = cfg->delay; - - if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) { - dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT; - chan->config.coalesc = cfg->coalesc; - } - - if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) { - dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT; - chan->config.delay = cfg->delay; - } - - /* FSync Source selection */ - dmacr &= ~XILINX_DMA_DMACR_FSYNCSRC_MASK; - dmacr |= cfg->ext_fsync << XILINX_DMA_DMACR_FSYNCSRC_SHIFT; - - dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, dmacr); - - return 0; -} -EXPORT_SYMBOL(xilinx_vdma_channel_set_config); - -/* ----------------------------------------------------------------------------- - * Probe and remove - */ - -/** - * xilinx_dma_chan_remove - Per Channel remove function - * @chan: Driver specific DMA channel - */ -static void xilinx_dma_chan_remove(struct xilinx_dma_chan *chan) -{ - /* Disable all interrupts */ - dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, - XILINX_DMA_DMAXR_ALL_IRQ_MASK); - - if (chan->irq > 0) - free_irq(chan->irq, chan); - - tasklet_kill(&chan->tasklet); - - list_del(&chan->common.device_node); -} - -static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk, - struct clk **tx_clk, struct clk **rx_clk, - struct clk **sg_clk, struct clk **tmp_clk) -{ - int err; - - *tmp_clk = NULL; - - *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk"); - if (IS_ERR(*axi_clk)) { - err = PTR_ERR(*axi_clk); - dev_err(&pdev->dev, "failed to get axi_aclk (%u)\n", err); - return err; - } - - *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk"); - if (IS_ERR(*tx_clk)) - *tx_clk = NULL; - - *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk"); - if (IS_ERR(*rx_clk)) - *rx_clk = NULL; - - *sg_clk = devm_clk_get(&pdev->dev, "m_axi_sg_aclk"); - if (IS_ERR(*sg_clk)) - *sg_clk = NULL; - - err = clk_prepare_enable(*axi_clk); - if (err) { - dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err); - return err; - } - - err = clk_prepare_enable(*tx_clk); - if (err) { - dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err); - goto err_disable_axiclk; - } - - err = clk_prepare_enable(*rx_clk); - if (err) { - dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err); - goto err_disable_txclk; - } - - err = clk_prepare_enable(*sg_clk); - if (err) { - dev_err(&pdev->dev, "failed to enable sg_clk (%u)\n", err); - goto err_disable_rxclk; - } - - return 0; - -err_disable_rxclk: - clk_disable_unprepare(*rx_clk); -err_disable_txclk: - clk_disable_unprepare(*tx_clk); -err_disable_axiclk: - clk_disable_unprepare(*axi_clk); - - return err; -} - -static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk, - struct clk **dev_clk, struct clk **tmp_clk, - struct clk **tmp1_clk, struct clk **tmp2_clk) -{ - int err; - - *tmp_clk = NULL; - *tmp1_clk = NULL; - *tmp2_clk = NULL; - - *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk"); - if (IS_ERR(*axi_clk)) { - err = PTR_ERR(*axi_clk); - dev_err(&pdev->dev, "failed to get axi_clk (%u)\n", err); - return err; - } - - *dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk"); - if (IS_ERR(*dev_clk)) { - err = PTR_ERR(*dev_clk); - dev_err(&pdev->dev, "failed to get dev_clk (%u)\n", err); - return err; - } - - err = clk_prepare_enable(*axi_clk); - if (err) { - dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err); - return err; - } - - err = clk_prepare_enable(*dev_clk); - if (err) { - dev_err(&pdev->dev, "failed to enable dev_clk (%u)\n", err); - goto err_disable_axiclk; - } - - return 0; - -err_disable_axiclk: - clk_disable_unprepare(*axi_clk); - - return err; -} - -static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk, - struct clk **tx_clk, struct clk **txs_clk, - struct clk **rx_clk, struct clk **rxs_clk) -{ - int err; - - *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk"); - if (IS_ERR(*axi_clk)) { - err = PTR_ERR(*axi_clk); - dev_err(&pdev->dev, "failed to get axi_aclk (%u)\n", err); - return err; - } - - *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk"); - if (IS_ERR(*tx_clk)) - *tx_clk = NULL; - - *txs_clk = devm_clk_get(&pdev->dev, "m_axis_mm2s_aclk"); - if (IS_ERR(*txs_clk)) - *txs_clk = NULL; - - *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk"); - if (IS_ERR(*rx_clk)) - *rx_clk = NULL; - - *rxs_clk = devm_clk_get(&pdev->dev, "s_axis_s2mm_aclk"); - if (IS_ERR(*rxs_clk)) - *rxs_clk = NULL; - - err = clk_prepare_enable(*axi_clk); - if (err) { - dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err); - return err; - } - - err = clk_prepare_enable(*tx_clk); - if (err) { - dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err); - goto err_disable_axiclk; - } - - err = clk_prepare_enable(*txs_clk); - if (err) { - dev_err(&pdev->dev, "failed to enable txs_clk (%u)\n", err); - goto err_disable_txclk; - } - - err = clk_prepare_enable(*rx_clk); - if (err) { - dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err); - goto err_disable_txsclk; - } - - err = clk_prepare_enable(*rxs_clk); - if (err) { - dev_err(&pdev->dev, "failed to enable rxs_clk (%u)\n", err); - goto err_disable_rxclk; - } - - return 0; - -err_disable_rxclk: - clk_disable_unprepare(*rx_clk); -err_disable_txsclk: - clk_disable_unprepare(*txs_clk); -err_disable_txclk: - clk_disable_unprepare(*tx_clk); -err_disable_axiclk: - clk_disable_unprepare(*axi_clk); - - return err; -} - -static void xdma_disable_allclks(struct xilinx_dma_device *xdev) -{ - clk_disable_unprepare(xdev->rxs_clk); - clk_disable_unprepare(xdev->rx_clk); - clk_disable_unprepare(xdev->txs_clk); - clk_disable_unprepare(xdev->tx_clk); - clk_disable_unprepare(xdev->axi_clk); -} - -/** - * xilinx_dma_chan_probe - Per Channel Probing - * It get channel features from the device tree entry and - * initialize special channel handling routines - * - * @xdev: Driver specific device structure - * @node: Device node - * - * Return: '0' on success and failure value on error - */ -static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, - struct device_node *node, int chan_id) -{ - struct xilinx_dma_chan *chan; - bool has_dre = false; - u32 value, width; - int err; - - /* Allocate and initialize the channel structure */ - chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL); - if (!chan) - return -ENOMEM; - - chan->dev = xdev->dev; - chan->xdev = xdev; - chan->has_sg = xdev->has_sg; - chan->desc_pendingcount = 0x0; - chan->ext_addr = xdev->ext_addr; - - spin_lock_init(&chan->lock); - INIT_LIST_HEAD(&chan->pending_list); - INIT_LIST_HEAD(&chan->done_list); - INIT_LIST_HEAD(&chan->active_list); - - /* Retrieve the channel properties from the device tree */ - has_dre = of_property_read_bool(node, "xlnx,include-dre"); - - chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode"); - - err = of_property_read_u32(node, "xlnx,datawidth", &value); - if (err) { - dev_err(xdev->dev, "missing xlnx,datawidth property\n"); - return err; - } - width = value >> 3; /* Convert bits to bytes */ - - /* If data width is greater than 8 bytes, DRE is not in hw */ - if (width > 8) - has_dre = false; - - if (!has_dre) - xdev->common.copy_align = fls(width - 1); - - if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel")) { - chan->direction = DMA_MEM_TO_DEV; - chan->id = chan_id; - chan->tdest = chan_id; - - chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET; - if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { - chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET; - - if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH || - xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S) - chan->flush_on_fsync = true; - } - } else if (of_device_is_compatible(node, - "xlnx,axi-vdma-s2mm-channel")) { - chan->direction = DMA_DEV_TO_MEM; - chan->id = chan_id; - chan->tdest = chan_id - xdev->nr_channels; - - chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET; - if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { - chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET; - - if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH || - xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM) - chan->flush_on_fsync = true; - } - } else { - dev_err(xdev->dev, "Invalid channel compatible node\n"); - return -EINVAL; - } - - /* Request the interrupt */ - chan->irq = irq_of_parse_and_map(node, 0); - err = request_irq(chan->irq, xilinx_dma_irq_handler, IRQF_SHARED, - "xilinx-dma-controller", chan); - if (err) { - dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq); - return err; - } - - if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) - chan->start_transfer = xilinx_dma_start_transfer; - else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) - chan->start_transfer = xilinx_cdma_start_transfer; - else - chan->start_transfer = xilinx_vdma_start_transfer; - - /* Initialize the tasklet */ - tasklet_init(&chan->tasklet, xilinx_dma_do_tasklet, - (unsigned long)chan); - - /* - * Initialize the DMA channel and add it to the DMA engine channels - * list. - */ - chan->common.device = &xdev->common; - - list_add_tail(&chan->common.device_node, &xdev->common.channels); - xdev->chan[chan->id] = chan; - - /* Reset the channel */ - err = xilinx_dma_chan_reset(chan); - if (err < 0) { - dev_err(xdev->dev, "Reset channel failed\n"); - return err; - } - - return 0; -} - -/** - * xilinx_dma_child_probe - Per child node probe - * It get number of dma-channels per child node from - * device-tree and initializes all the channels. - * - * @xdev: Driver specific device structure - * @node: Device node - * - * Return: 0 always. - */ -static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev, - struct device_node *node) { - int ret, i, nr_channels = 1; - - ret = of_property_read_u32(node, "dma-channels", &nr_channels); - if ((ret < 0) && xdev->mcdma) - dev_warn(xdev->dev, "missing dma-channels property\n"); - - for (i = 0; i < nr_channels; i++) - xilinx_dma_chan_probe(xdev, node, xdev->chan_id++); - - xdev->nr_channels += nr_channels; - - return 0; -} - -/** - * of_dma_xilinx_xlate - Translation function - * @dma_spec: Pointer to DMA specifier as found in the device tree - * @ofdma: Pointer to DMA controller data - * - * Return: DMA channel pointer on success and NULL on error - */ -static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec, - struct of_dma *ofdma) -{ - struct xilinx_dma_device *xdev = ofdma->of_dma_data; - int chan_id = dma_spec->args[0]; - - if (chan_id >= xdev->nr_channels || !xdev->chan[chan_id]) - return NULL; - - return dma_get_slave_channel(&xdev->chan[chan_id]->common); -} - -static const struct xilinx_dma_config axidma_config = { - .dmatype = XDMA_TYPE_AXIDMA, - .clk_init = axidma_clk_init, -}; - -static const struct xilinx_dma_config axicdma_config = { - .dmatype = XDMA_TYPE_CDMA, - .clk_init = axicdma_clk_init, -}; - -static const struct xilinx_dma_config axivdma_config = { - .dmatype = XDMA_TYPE_VDMA, - .clk_init = axivdma_clk_init, -}; - -static const struct of_device_id xilinx_dma_of_ids[] = { - { .compatible = "xlnx,axi-dma-1.00.a", .data = &axidma_config }, - { .compatible = "xlnx,axi-cdma-1.00.a", .data = &axicdma_config }, - { .compatible = "xlnx,axi-vdma-1.00.a", .data = &axivdma_config }, - {} -}; -MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids); - -/** - * xilinx_dma_probe - Driver probe function - * @pdev: Pointer to the platform_device structure - * - * Return: '0' on success and failure value on error - */ -static int xilinx_dma_probe(struct platform_device *pdev) -{ - int (*clk_init)(struct platform_device *, struct clk **, struct clk **, - struct clk **, struct clk **, struct clk **) - = axivdma_clk_init; - struct device_node *node = pdev->dev.of_node; - struct xilinx_dma_device *xdev; - struct device_node *child, *np = pdev->dev.of_node; - struct resource *io; - u32 num_frames, addr_width; - int i, err; - - /* Allocate and initialize the DMA engine structure */ - xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL); - if (!xdev) - return -ENOMEM; - - xdev->dev = &pdev->dev; - if (np) { - const struct of_device_id *match; - - match = of_match_node(xilinx_dma_of_ids, np); - if (match && match->data) { - xdev->dma_config = match->data; - clk_init = xdev->dma_config->clk_init; - } - } - - err = clk_init(pdev, &xdev->axi_clk, &xdev->tx_clk, &xdev->txs_clk, - &xdev->rx_clk, &xdev->rxs_clk); - if (err) - return err; - - /* Request and map I/O memory */ - io = platform_get_resource(pdev, IORESOURCE_MEM, 0); - xdev->regs = devm_ioremap_resource(&pdev->dev, io); - if (IS_ERR(xdev->regs)) - return PTR_ERR(xdev->regs); - - /* Retrieve the DMA engine properties from the device tree */ - xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg"); - if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) - xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma"); - - if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { - err = of_property_read_u32(node, "xlnx,num-fstores", - &num_frames); - if (err < 0) { - dev_err(xdev->dev, - "missing xlnx,num-fstores property\n"); - return err; - } - - err = of_property_read_u32(node, "xlnx,flush-fsync", - &xdev->flush_on_fsync); - if (err < 0) - dev_warn(xdev->dev, - "missing xlnx,flush-fsync property\n"); - } - - err = of_property_read_u32(node, "xlnx,addrwidth", &addr_width); - if (err < 0) - dev_warn(xdev->dev, "missing xlnx,addrwidth property\n"); - - if (addr_width > 32) - xdev->ext_addr = true; - else - xdev->ext_addr = false; - - /* Set the dma mask bits */ - dma_set_mask(xdev->dev, DMA_BIT_MASK(addr_width)); - - /* Initialize the DMA engine */ - xdev->common.dev = &pdev->dev; - - INIT_LIST_HEAD(&xdev->common.channels); - if (!(xdev->dma_config->dmatype == XDMA_TYPE_CDMA)) { - dma_cap_set(DMA_SLAVE, xdev->common.cap_mask); - dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask); - } - - xdev->common.device_alloc_chan_resources = - xilinx_dma_alloc_chan_resources; - xdev->common.device_free_chan_resources = - xilinx_dma_free_chan_resources; - xdev->common.device_terminate_all = xilinx_dma_terminate_all; - xdev->common.device_tx_status = xilinx_dma_tx_status; - xdev->common.device_issue_pending = xilinx_dma_issue_pending; - if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { - dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask); - xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg; - xdev->common.device_prep_dma_cyclic = - xilinx_dma_prep_dma_cyclic; - xdev->common.device_prep_interleaved_dma = - xilinx_dma_prep_interleaved; - /* Residue calculation is supported by only AXI DMA */ - xdev->common.residue_granularity = - DMA_RESIDUE_GRANULARITY_SEGMENT; - } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { - dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask); - xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy; - } else { - xdev->common.device_prep_interleaved_dma = - xilinx_vdma_dma_prep_interleaved; - } - - platform_set_drvdata(pdev, xdev); - - /* Initialize the channels */ - for_each_child_of_node(node, child) { - err = xilinx_dma_child_probe(xdev, child); - if (err < 0) - goto disable_clks; - } - - if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { - for (i = 0; i < xdev->nr_channels; i++) - if (xdev->chan[i]) - xdev->chan[i]->num_frms = num_frames; - } - - /* Register the DMA engine with the core */ - dma_async_device_register(&xdev->common); - - err = of_dma_controller_register(node, of_dma_xilinx_xlate, - xdev); - if (err < 0) { - dev_err(&pdev->dev, "Unable to register DMA to DT\n"); - dma_async_device_unregister(&xdev->common); - goto error; - } - - dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n"); - - return 0; - -disable_clks: - xdma_disable_allclks(xdev); -error: - for (i = 0; i < xdev->nr_channels; i++) - if (xdev->chan[i]) - xilinx_dma_chan_remove(xdev->chan[i]); - - return err; -} - -/** - * xilinx_dma_remove - Driver remove function - * @pdev: Pointer to the platform_device structure - * - * Return: Always '0' - */ -static int xilinx_dma_remove(struct platform_device *pdev) -{ - struct xilinx_dma_device *xdev = platform_get_drvdata(pdev); - int i; - - of_dma_controller_free(pdev->dev.of_node); - - dma_async_device_unregister(&xdev->common); - - for (i = 0; i < xdev->nr_channels; i++) - if (xdev->chan[i]) - xilinx_dma_chan_remove(xdev->chan[i]); - - xdma_disable_allclks(xdev); - - return 0; -} - -static struct platform_driver xilinx_vdma_driver = { - .driver = { - .name = "xilinx-vdma", - .of_match_table = xilinx_dma_of_ids, - }, - .probe = xilinx_dma_probe, - .remove = xilinx_dma_remove, -}; - -module_platform_driver(xilinx_vdma_driver); - -MODULE_AUTHOR("Xilinx, Inc."); -MODULE_DESCRIPTION("Xilinx VDMA driver"); -MODULE_LICENSE("GPL v2"); -- cgit v0.10.2 From e131f1ba6f6f177a5b28610dff270d67eedcadbf Mon Sep 17 00:00:00 2001 From: Kedareswara rao Appana Date: Fri, 24 Jun 2016 10:51:26 +0530 Subject: dmaengine: xilinx: Use different channel names for each dma Current driver assumes that child node channel name is either "xlnx,axi-vdma-mm2s-channel" or "xlnx,axi-vdma-s2mm-channel" which is confusing the users of AXI DMA and CDMA. This patch fixes this issue by using different channel names for the AXI DMA and AXI CDMA child nodes. Signed-off-by: Kedareswara rao Appana Acked-by: Rob Herring Signed-off-by: Vinod Koul diff --git a/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt b/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt index 0faa189..a2b8bfa 100644 --- a/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt +++ b/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt @@ -50,8 +50,12 @@ Optional properties for VDMA: {3}, flush s2mm channel Required child node properties: -- compatible: It should be either "xlnx,axi-vdma-mm2s-channel" or +- compatible: + For VDMA: It should be either "xlnx,axi-vdma-mm2s-channel" or "xlnx,axi-vdma-s2mm-channel". + For CDMA: It should be "xlnx,axi-cdma-channel". + For AXIDMA: It should be either "xlnx,axi-dma-mm2s-channel" or + "xlnx,axi-dma-s2mm-channel". - interrupts: Should contain per channel VDMA interrupts. - xlnx,datawidth: Should contain the stream data width, take values {32,64...1024}. diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index 0768d9f..cf47347 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -2353,7 +2353,9 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, if (!has_dre) xdev->common.copy_align = fls(width - 1); - if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel")) { + if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") || + of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") || + of_device_is_compatible(node, "xlnx,axi-cdma-channel")) { chan->direction = DMA_MEM_TO_DEV; chan->id = chan_id; chan->tdest = chan_id; @@ -2367,7 +2369,9 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, chan->flush_on_fsync = true; } } else if (of_device_is_compatible(node, - "xlnx,axi-vdma-s2mm-channel")) { + "xlnx,axi-vdma-s2mm-channel") || + of_device_is_compatible(node, + "xlnx,axi-dma-s2mm-channel")) { chan->direction = DMA_DEV_TO_MEM; chan->id = chan_id; chan->tdest = chan_id - xdev->nr_channels; -- cgit v0.10.2 From 3544d2878817bd139dda238cdd86a15e1c03d037 Mon Sep 17 00:00:00 2001 From: Muhammad Hamza Farooq Date: Thu, 30 Jun 2016 17:15:15 +0200 Subject: dmaengine: rcar-dmac: use result of updated get_residue in tx_status MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The hardware might have complete the transfer but the interrupt handler might not have had a chance to run. If rcar_dmac_chan_get_residue() which reads HW registers finds that there is no residue return DMA_COMPLETE. Signed-off-by: Muhammad Hamza Farooq Signed-off-by: Geert Uytterhoeven [Niklas: add explanation in commit message] Signed-off-by: Niklas Söderlund Acked-by: Laurent Pinchart Signed-off-by: Vinod Koul diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index 433e982..74c9563 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c @@ -1202,6 +1202,10 @@ static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan, residue = rcar_dmac_chan_get_residue(rchan, cookie); spin_unlock_irqrestore(&rchan->lock, flags); + /* if there's no residue, the cookie is complete */ + if (!residue) + return DMA_COMPLETE; + dma_set_residue(txstate, residue); return status; -- cgit v0.10.2 From 0f78e3b58f5f99c991613db4477f893b53da5520 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Niklas=20S=C3=B6derlund?= Date: Thu, 30 Jun 2016 17:15:16 +0200 Subject: dmaengine: rcar-dmac: warn if transfer cannot start as TE = 1 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The documentation states one should make sure both DE and TE are cleared before starting a transaction. This patch extends the current warning to look at both DE and TE. Based on previous work from Muhammad Hamza Farooq. Suggested-by: Muhammad Hamza Farooq Signed-off-by: Niklas Söderlund Acked-by: Laurent Pinchart Signed-off-by: Vinod Koul diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index 74c9563..72e7dab 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c @@ -311,7 +311,7 @@ static bool rcar_dmac_chan_is_busy(struct rcar_dmac_chan *chan) { u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR); - return (chcr & (RCAR_DMACHCR_DE | RCAR_DMACHCR_TE)) == RCAR_DMACHCR_DE; + return !!(chcr & (RCAR_DMACHCR_DE | RCAR_DMACHCR_TE)); } static void rcar_dmac_chan_start_xfer(struct rcar_dmac_chan *chan) -- cgit v0.10.2 From 48c73659abae103a2f8531f825ce7a3f8dedbb39 Mon Sep 17 00:00:00 2001 From: Muhammad Hamza Farooq Date: Thu, 30 Jun 2016 17:15:17 +0200 Subject: dmaengine: rcar-dmac: Fixed active descriptor initializing MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Running descriptor pointer is set to NULL upon freeing resources. Other- wise, rcar_dmac_issue_pending might not start new transfers Signed-off-by: Muhammad Hamza Farooq Signed-off-by: Geert Uytterhoeven Signed-off-by: Niklas Söderlund Acked-by: Laurent Pinchart Signed-off-by: Vinod Koul diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index 72e7dab..561476c 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c @@ -990,6 +990,8 @@ static void rcar_dmac_free_chan_resources(struct dma_chan *chan) list_splice_init(&rchan->desc.done, &list); list_splice_init(&rchan->desc.wait, &list); + rchan->desc.running = NULL; + list_for_each_entry(desc, &list, node) rcar_dmac_realloc_hwdesc(rchan, desc, 0); -- cgit v0.10.2 From 55bd582b4d8c2266bc43cbae2ddfce31b489618f Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Thu, 30 Jun 2016 17:15:18 +0200 Subject: dmaengine: rcar-dmac: Fix residue reporting for pending descriptors MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Cookies corresponding to pending transfers have a residue value equal to the full size of the corresponding descriptor. The driver miscomputes that and uses the size of the active descriptor instead. Fix it. Reported-by: Geert Uytterhoeven Signed-off-by: Laurent Pinchart [geert: Also check desc.active list] Signed-off-by: Geert Uytterhoeven Signed-off-by: Niklas Söderlund Signed-off-by: Vinod Koul diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index 561476c..0dd9538 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c @@ -1145,6 +1145,7 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan, struct rcar_dmac_desc *desc = chan->desc.running; struct rcar_dmac_xfer_chunk *running = NULL; struct rcar_dmac_xfer_chunk *chunk; + enum dma_status status; unsigned int residue = 0; unsigned int dptr = 0; @@ -1152,12 +1153,38 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan, return 0; /* + * If the cookie corresponds to a descriptor that has been completed + * there is no residue. The same check has already been performed by the + * caller but without holding the channel lock, so the descriptor could + * now be complete. + */ + status = dma_cookie_status(&chan->chan, cookie, NULL); + if (status == DMA_COMPLETE) + return 0; + + /* * If the cookie doesn't correspond to the currently running transfer * then the descriptor hasn't been processed yet, and the residue is * equal to the full descriptor size. */ - if (cookie != desc->async_tx.cookie) - return desc->size; + if (cookie != desc->async_tx.cookie) { + list_for_each_entry(desc, &chan->desc.pending, node) { + if (cookie == desc->async_tx.cookie) + return desc->size; + } + list_for_each_entry(desc, &chan->desc.active, node) { + if (cookie == desc->async_tx.cookie) + return desc->size; + } + + /* + * No descriptor found for the cookie, there's thus no residue. + * This shouldn't happen if the calling driver passes a correct + * cookie value. + */ + WARN(1, "No descriptor for cookie!"); + return 0; + } /* * In descriptor mode the descriptor running pointer is not maintained -- cgit v0.10.2 From 805abc5f2f5d374086114f941e5bb6cb02e0c955 Mon Sep 17 00:00:00 2001 From: Kedareswara rao Appana Date: Fri, 1 Jul 2016 17:07:05 +0530 Subject: Documentation: DT: dma: Add Xilinx zynqmp dma device tree binding documentation Device-tree binding documentation for Xilinx zynqmp dma engine used in Zynq UltraScale+ MPSoC. Acked-by: Rob Herring Signed-off-by: Punnaiah Choudary Kalluri Signed-off-by: Kedareswara rao Appana Signed-off-by: Vinod Koul diff --git a/Documentation/devicetree/bindings/dma/xilinx/zynqmp_dma.txt b/Documentation/devicetree/bindings/dma/xilinx/zynqmp_dma.txt new file mode 100644 index 0000000..a784cdd --- /dev/null +++ b/Documentation/devicetree/bindings/dma/xilinx/zynqmp_dma.txt @@ -0,0 +1,27 @@ +Xilinx ZynqMP DMA engine, it does support memory to memory transfers, +memory to device and device to memory transfers. It also has flow +control and rate control support for slave/peripheral dma access. + +Required properties: +- compatible : Should be "xlnx,zynqmp-dma-1.0" +- reg : Memory map for gdma/adma module access. +- interrupt-parent : Interrupt controller the interrupt is routed through +- interrupts : Should contain DMA channel interrupt. +- xlnx,bus-width : Axi buswidth in bits. Should contain 128 or 64 +- clock-names : List of input clocks "clk_main", "clk_apb" + (see clock bindings for details) + +Optional properties: +- dma-coherent : Present if dma operations are coherent. + +Example: +++++++++ +fpd_dma_chan1: dma@fd500000 { + compatible = "xlnx,zynqmp-dma-1.0"; + reg = <0x0 0xFD500000 0x1000>; + interrupt-parent = <&gic>; + interrupts = <0 117 4>; + clock-names = "clk_main", "clk_apb"; + xlnx,bus-width = <128>; + dma-coherent; +}; -- cgit v0.10.2 From b0cc417c1637192be658e68a74c8d1568e3d35f6 Mon Sep 17 00:00:00 2001 From: Kedareswara rao Appana Date: Fri, 1 Jul 2016 17:07:06 +0530 Subject: dmaengine: Add Xilinx zynqmp dma engine driver support Added the driver for zynqmp dma engine used in Zynq UltraScale+ MPSoC. This dma controller supports memory to memory and I/O to I/O buffer transfers. Signed-off-by: Kedareswara rao Appana Signed-off-by: Vinod Koul diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 1f39f3e..cde83f4 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -538,6 +538,13 @@ config XILINX_DMA AXI DMA engine provides high-bandwidth one dimensional direct memory access between memory and AXI4-Stream target peripherals. +config XILINX_ZYNQMP_DMA + tristate "Xilinx ZynqMP DMA Engine" + depends on (ARCH_ZYNQ || MICROBLAZE || ARM64) + select DMA_ENGINE + help + Enable support for Xilinx ZynqMP DMA controller. + config ZX_DMA tristate "ZTE ZX296702 DMA support" depends on ARCH_ZX diff --git a/drivers/dma/xilinx/Makefile b/drivers/dma/xilinx/Makefile index af9e69a4..9e91f8f 100644 --- a/drivers/dma/xilinx/Makefile +++ b/drivers/dma/xilinx/Makefile @@ -1 +1,2 @@ obj-$(CONFIG_XILINX_DMA) += xilinx_dma.o +obj-$(CONFIG_XILINX_ZYNQMP_DMA) += zynqmp_dma.o diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c new file mode 100644 index 0000000..59bc1f7 --- /dev/null +++ b/drivers/dma/xilinx/zynqmp_dma.c @@ -0,0 +1,1145 @@ +/* + * DMA driver for Xilinx ZynqMP DMA Engine + * + * Copyright (C) 2016 Xilinx, Inc. All rights reserved. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../dmaengine.h" + +/* Register Offsets */ +#define ZYNQMP_DMA_ISR 0x100 +#define ZYNQMP_DMA_IMR 0x104 +#define ZYNQMP_DMA_IER 0x108 +#define ZYNQMP_DMA_IDS 0x10C +#define ZYNQMP_DMA_CTRL0 0x110 +#define ZYNQMP_DMA_CTRL1 0x114 +#define ZYNQMP_DMA_DATA_ATTR 0x120 +#define ZYNQMP_DMA_DSCR_ATTR 0x124 +#define ZYNQMP_DMA_SRC_DSCR_WRD0 0x128 +#define ZYNQMP_DMA_SRC_DSCR_WRD1 0x12C +#define ZYNQMP_DMA_SRC_DSCR_WRD2 0x130 +#define ZYNQMP_DMA_SRC_DSCR_WRD3 0x134 +#define ZYNQMP_DMA_DST_DSCR_WRD0 0x138 +#define ZYNQMP_DMA_DST_DSCR_WRD1 0x13C +#define ZYNQMP_DMA_DST_DSCR_WRD2 0x140 +#define ZYNQMP_DMA_DST_DSCR_WRD3 0x144 +#define ZYNQMP_DMA_SRC_START_LSB 0x158 +#define ZYNQMP_DMA_SRC_START_MSB 0x15C +#define ZYNQMP_DMA_DST_START_LSB 0x160 +#define ZYNQMP_DMA_DST_START_MSB 0x164 +#define ZYNQMP_DMA_RATE_CTRL 0x18C +#define ZYNQMP_DMA_IRQ_SRC_ACCT 0x190 +#define ZYNQMP_DMA_IRQ_DST_ACCT 0x194 +#define ZYNQMP_DMA_CTRL2 0x200 + +/* Interrupt registers bit field definitions */ +#define ZYNQMP_DMA_DONE BIT(10) +#define ZYNQMP_DMA_AXI_WR_DATA BIT(9) +#define ZYNQMP_DMA_AXI_RD_DATA BIT(8) +#define ZYNQMP_DMA_AXI_RD_DST_DSCR BIT(7) +#define ZYNQMP_DMA_AXI_RD_SRC_DSCR BIT(6) +#define ZYNQMP_DMA_IRQ_DST_ACCT_ERR BIT(5) +#define ZYNQMP_DMA_IRQ_SRC_ACCT_ERR BIT(4) +#define ZYNQMP_DMA_BYTE_CNT_OVRFL BIT(3) +#define ZYNQMP_DMA_DST_DSCR_DONE BIT(2) +#define ZYNQMP_DMA_INV_APB BIT(0) + +/* Control 0 register bit field definitions */ +#define ZYNQMP_DMA_OVR_FETCH BIT(7) +#define ZYNQMP_DMA_POINT_TYPE_SG BIT(6) +#define ZYNQMP_DMA_RATE_CTRL_EN BIT(3) + +/* Control 1 register bit field definitions */ +#define ZYNQMP_DMA_SRC_ISSUE GENMASK(4, 0) + +/* Data Attribute register bit field definitions */ +#define ZYNQMP_DMA_ARBURST GENMASK(27, 26) +#define ZYNQMP_DMA_ARCACHE GENMASK(25, 22) +#define ZYNQMP_DMA_ARCACHE_OFST 22 +#define ZYNQMP_DMA_ARQOS GENMASK(21, 18) +#define ZYNQMP_DMA_ARQOS_OFST 18 +#define ZYNQMP_DMA_ARLEN GENMASK(17, 14) +#define ZYNQMP_DMA_ARLEN_OFST 14 +#define ZYNQMP_DMA_AWBURST GENMASK(13, 12) +#define ZYNQMP_DMA_AWCACHE GENMASK(11, 8) +#define ZYNQMP_DMA_AWCACHE_OFST 8 +#define ZYNQMP_DMA_AWQOS GENMASK(7, 4) +#define ZYNQMP_DMA_AWQOS_OFST 4 +#define ZYNQMP_DMA_AWLEN GENMASK(3, 0) +#define ZYNQMP_DMA_AWLEN_OFST 0 + +/* Descriptor Attribute register bit field definitions */ +#define ZYNQMP_DMA_AXCOHRNT BIT(8) +#define ZYNQMP_DMA_AXCACHE GENMASK(7, 4) +#define ZYNQMP_DMA_AXCACHE_OFST 4 +#define ZYNQMP_DMA_AXQOS GENMASK(3, 0) +#define ZYNQMP_DMA_AXQOS_OFST 0 + +/* Control register 2 bit field definitions */ +#define ZYNQMP_DMA_ENABLE BIT(0) + +/* Buffer Descriptor definitions */ +#define ZYNQMP_DMA_DESC_CTRL_STOP 0x10 +#define ZYNQMP_DMA_DESC_CTRL_COMP_INT 0x4 +#define ZYNQMP_DMA_DESC_CTRL_SIZE_256 0x2 +#define ZYNQMP_DMA_DESC_CTRL_COHRNT 0x1 + +/* Interrupt Mask specific definitions */ +#define ZYNQMP_DMA_INT_ERR (ZYNQMP_DMA_AXI_RD_DATA | \ + ZYNQMP_DMA_AXI_WR_DATA | \ + ZYNQMP_DMA_AXI_RD_DST_DSCR | \ + ZYNQMP_DMA_AXI_RD_SRC_DSCR | \ + ZYNQMP_DMA_INV_APB) +#define ZYNQMP_DMA_INT_OVRFL (ZYNQMP_DMA_BYTE_CNT_OVRFL | \ + ZYNQMP_DMA_IRQ_SRC_ACCT_ERR | \ + ZYNQMP_DMA_IRQ_DST_ACCT_ERR) +#define ZYNQMP_DMA_INT_DONE (ZYNQMP_DMA_DONE | ZYNQMP_DMA_DST_DSCR_DONE) +#define ZYNQMP_DMA_INT_EN_DEFAULT_MASK (ZYNQMP_DMA_INT_DONE | \ + ZYNQMP_DMA_INT_ERR | \ + ZYNQMP_DMA_INT_OVRFL | \ + ZYNQMP_DMA_DST_DSCR_DONE) + +/* Max number of descriptors per channel */ +#define ZYNQMP_DMA_NUM_DESCS 32 + +/* Max transfer size per descriptor */ +#define ZYNQMP_DMA_MAX_TRANS_LEN 0x40000000 + +/* Reset values for data attributes */ +#define ZYNQMP_DMA_AXCACHE_VAL 0xF +#define ZYNQMP_DMA_ARLEN_RST_VAL 0xF +#define ZYNQMP_DMA_AWLEN_RST_VAL 0xF + +#define ZYNQMP_DMA_SRC_ISSUE_RST_VAL 0x1F + +#define ZYNQMP_DMA_IDS_DEFAULT_MASK 0xFFF + +/* Bus width in bits */ +#define ZYNQMP_DMA_BUS_WIDTH_64 64 +#define ZYNQMP_DMA_BUS_WIDTH_128 128 + +#define ZYNQMP_DMA_DESC_SIZE(chan) (chan->desc_size) + +#define to_chan(chan) container_of(chan, struct zynqmp_dma_chan, \ + common) +#define tx_to_desc(tx) container_of(tx, struct zynqmp_dma_desc_sw, \ + async_tx) + +/** + * struct zynqmp_dma_desc_ll - Hw linked list descriptor + * @addr: Buffer address + * @size: Size of the buffer + * @ctrl: Control word + * @nxtdscraddr: Next descriptor base address + * @rsvd: Reserved field and for Hw internal use. + */ +struct zynqmp_dma_desc_ll { + u64 addr; + u32 size; + u32 ctrl; + u64 nxtdscraddr; + u64 rsvd; +}; __aligned(64) + +/** + * struct zynqmp_dma_desc_sw - Per Transaction structure + * @src: Source address for simple mode dma + * @dst: Destination address for simple mode dma + * @len: Transfer length for simple mode dma + * @node: Node in the channel descriptor list + * @tx_list: List head for the current transfer + * @async_tx: Async transaction descriptor + * @src_v: Virtual address of the src descriptor + * @src_p: Physical address of the src descriptor + * @dst_v: Virtual address of the dst descriptor + * @dst_p: Physical address of the dst descriptor + */ +struct zynqmp_dma_desc_sw { + u64 src; + u64 dst; + u32 len; + struct list_head node; + struct list_head tx_list; + struct dma_async_tx_descriptor async_tx; + struct zynqmp_dma_desc_ll *src_v; + dma_addr_t src_p; + struct zynqmp_dma_desc_ll *dst_v; + dma_addr_t dst_p; +}; + +/** + * struct zynqmp_dma_chan - Driver specific DMA channel structure + * @zdev: Driver specific device structure + * @regs: Control registers offset + * @lock: Descriptor operation lock + * @pending_list: Descriptors waiting + * @free_list: Descriptors free + * @active_list: Descriptors active + * @sw_desc_pool: SW descriptor pool + * @done_list: Complete descriptors + * @common: DMA common channel + * @desc_pool_v: Statically allocated descriptor base + * @desc_pool_p: Physical allocated descriptor base + * @desc_free_cnt: Descriptor available count + * @dev: The dma device + * @irq: Channel IRQ + * @is_dmacoherent: Tells whether dma operations are coherent or not + * @tasklet: Cleanup work after irq + * @idle : Channel status; + * @desc_size: Size of the low level descriptor + * @err: Channel has errors + * @bus_width: Bus width + * @src_burst_len: Source burst length + * @dst_burst_len: Dest burst length + * @clk_main: Pointer to main clock + * @clk_apb: Pointer to apb clock + */ +struct zynqmp_dma_chan { + struct zynqmp_dma_device *zdev; + void __iomem *regs; + spinlock_t lock; + struct list_head pending_list; + struct list_head free_list; + struct list_head active_list; + struct zynqmp_dma_desc_sw *sw_desc_pool; + struct list_head done_list; + struct dma_chan common; + void *desc_pool_v; + dma_addr_t desc_pool_p; + u32 desc_free_cnt; + struct device *dev; + int irq; + bool is_dmacoherent; + struct tasklet_struct tasklet; + bool idle; + u32 desc_size; + bool err; + u32 bus_width; + u32 src_burst_len; + u32 dst_burst_len; + struct clk *clk_main; + struct clk *clk_apb; +}; + +/** + * struct zynqmp_dma_device - DMA device structure + * @dev: Device Structure + * @common: DMA device structure + * @chan: Driver specific DMA channel + */ +struct zynqmp_dma_device { + struct device *dev; + struct dma_device common; + struct zynqmp_dma_chan *chan; +}; + +static inline void zynqmp_dma_writeq(struct zynqmp_dma_chan *chan, u32 reg, + u64 value) +{ + lo_hi_writeq(value, chan->regs + reg); +} + +/** + * zynqmp_dma_update_desc_to_ctrlr - Updates descriptor to the controller + * @chan: ZynqMP DMA DMA channel pointer + * @desc: Transaction descriptor pointer + */ +static void zynqmp_dma_update_desc_to_ctrlr(struct zynqmp_dma_chan *chan, + struct zynqmp_dma_desc_sw *desc) +{ + dma_addr_t addr; + + addr = desc->src_p; + zynqmp_dma_writeq(chan, ZYNQMP_DMA_SRC_START_LSB, addr); + addr = desc->dst_p; + zynqmp_dma_writeq(chan, ZYNQMP_DMA_DST_START_LSB, addr); +} + +/** + * zynqmp_dma_desc_config_eod - Mark the descriptor as end descriptor + * @chan: ZynqMP DMA channel pointer + * @desc: Hw descriptor pointer + */ +static void zynqmp_dma_desc_config_eod(struct zynqmp_dma_chan *chan, + void *desc) +{ + struct zynqmp_dma_desc_ll *hw = (struct zynqmp_dma_desc_ll *)desc; + + hw->ctrl |= ZYNQMP_DMA_DESC_CTRL_STOP; + hw++; + hw->ctrl |= ZYNQMP_DMA_DESC_CTRL_COMP_INT | ZYNQMP_DMA_DESC_CTRL_STOP; +} + +/** + * zynqmp_dma_config_sg_ll_desc - Configure the linked list descriptor + * @chan: ZynqMP DMA channel pointer + * @sdesc: Hw descriptor pointer + * @src: Source buffer address + * @dst: Destination buffer address + * @len: Transfer length + * @prev: Previous hw descriptor pointer + */ +static void zynqmp_dma_config_sg_ll_desc(struct zynqmp_dma_chan *chan, + struct zynqmp_dma_desc_ll *sdesc, + dma_addr_t src, dma_addr_t dst, size_t len, + struct zynqmp_dma_desc_ll *prev) +{ + struct zynqmp_dma_desc_ll *ddesc = sdesc + 1; + + sdesc->size = ddesc->size = len; + sdesc->addr = src; + ddesc->addr = dst; + + sdesc->ctrl = ddesc->ctrl = ZYNQMP_DMA_DESC_CTRL_SIZE_256; + if (chan->is_dmacoherent) { + sdesc->ctrl |= ZYNQMP_DMA_DESC_CTRL_COHRNT; + ddesc->ctrl |= ZYNQMP_DMA_DESC_CTRL_COHRNT; + } + + if (prev) { + dma_addr_t addr = chan->desc_pool_p + + ((dma_addr_t)sdesc - (dma_addr_t)chan->desc_pool_v); + ddesc = prev + 1; + prev->nxtdscraddr = addr; + ddesc->nxtdscraddr = addr + ZYNQMP_DMA_DESC_SIZE(chan); + } +} + +/** + * zynqmp_dma_init - Initialize the channel + * @chan: ZynqMP DMA channel pointer + */ +static void zynqmp_dma_init(struct zynqmp_dma_chan *chan) +{ + u32 val; + + writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS); + val = readl(chan->regs + ZYNQMP_DMA_ISR); + writel(val, chan->regs + ZYNQMP_DMA_ISR); + + if (chan->is_dmacoherent) { + val = ZYNQMP_DMA_AXCOHRNT; + val = (val & ~ZYNQMP_DMA_AXCACHE) | + (ZYNQMP_DMA_AXCACHE_VAL << ZYNQMP_DMA_AXCACHE_OFST); + writel(val, chan->regs + ZYNQMP_DMA_DSCR_ATTR); + } + + val = readl(chan->regs + ZYNQMP_DMA_DATA_ATTR); + if (chan->is_dmacoherent) { + val = (val & ~ZYNQMP_DMA_ARCACHE) | + (ZYNQMP_DMA_AXCACHE_VAL << ZYNQMP_DMA_ARCACHE_OFST); + val = (val & ~ZYNQMP_DMA_AWCACHE) | + (ZYNQMP_DMA_AXCACHE_VAL << ZYNQMP_DMA_AWCACHE_OFST); + } + writel(val, chan->regs + ZYNQMP_DMA_DATA_ATTR); + + /* Clearing the interrupt account rgisters */ + val = readl(chan->regs + ZYNQMP_DMA_IRQ_SRC_ACCT); + val = readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT); + + chan->idle = true; +} + +/** + * zynqmp_dma_tx_submit - Submit DMA transaction + * @tx: Async transaction descriptor pointer + * + * Return: cookie value + */ +static dma_cookie_t zynqmp_dma_tx_submit(struct dma_async_tx_descriptor *tx) +{ + struct zynqmp_dma_chan *chan = to_chan(tx->chan); + struct zynqmp_dma_desc_sw *desc, *new; + dma_cookie_t cookie; + + new = tx_to_desc(tx); + spin_lock_bh(&chan->lock); + cookie = dma_cookie_assign(tx); + + if (!list_empty(&chan->pending_list)) { + desc = list_last_entry(&chan->pending_list, + struct zynqmp_dma_desc_sw, node); + if (!list_empty(&desc->tx_list)) + desc = list_last_entry(&desc->tx_list, + struct zynqmp_dma_desc_sw, node); + desc->src_v->nxtdscraddr = new->src_p; + desc->src_v->ctrl &= ~ZYNQMP_DMA_DESC_CTRL_STOP; + desc->dst_v->nxtdscraddr = new->dst_p; + desc->dst_v->ctrl &= ~ZYNQMP_DMA_DESC_CTRL_STOP; + } + + list_add_tail(&new->node, &chan->pending_list); + spin_unlock_bh(&chan->lock); + + return cookie; +} + +/** + * zynqmp_dma_get_descriptor - Get the sw descriptor from the pool + * @chan: ZynqMP DMA channel pointer + * + * Return: The sw descriptor + */ +static struct zynqmp_dma_desc_sw * +zynqmp_dma_get_descriptor(struct zynqmp_dma_chan *chan) +{ + struct zynqmp_dma_desc_sw *desc; + + spin_lock_bh(&chan->lock); + desc = list_first_entry(&chan->free_list, + struct zynqmp_dma_desc_sw, node); + list_del(&desc->node); + spin_unlock_bh(&chan->lock); + + INIT_LIST_HEAD(&desc->tx_list); + /* Clear the src and dst descriptor memory */ + memset((void *)desc->src_v, 0, ZYNQMP_DMA_DESC_SIZE(chan)); + memset((void *)desc->dst_v, 0, ZYNQMP_DMA_DESC_SIZE(chan)); + + return desc; +} + +/** + * zynqmp_dma_free_descriptor - Issue pending transactions + * @chan: ZynqMP DMA channel pointer + * @sdesc: Transaction descriptor pointer + */ +static void zynqmp_dma_free_descriptor(struct zynqmp_dma_chan *chan, + struct zynqmp_dma_desc_sw *sdesc) +{ + struct zynqmp_dma_desc_sw *child, *next; + + chan->desc_free_cnt++; + list_add_tail(&sdesc->node, &chan->free_list); + list_for_each_entry_safe(child, next, &sdesc->tx_list, node) { + chan->desc_free_cnt++; + list_move_tail(&child->node, &chan->free_list); + } +} + +/** + * zynqmp_dma_free_desc_list - Free descriptors list + * @chan: ZynqMP DMA channel pointer + * @list: List to parse and delete the descriptor + */ +static void zynqmp_dma_free_desc_list(struct zynqmp_dma_chan *chan, + struct list_head *list) +{ + struct zynqmp_dma_desc_sw *desc, *next; + + list_for_each_entry_safe(desc, next, list, node) + zynqmp_dma_free_descriptor(chan, desc); +} + +/** + * zynqmp_dma_alloc_chan_resources - Allocate channel resources + * @dchan: DMA channel + * + * Return: Number of descriptors on success and failure value on error + */ +static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan) +{ + struct zynqmp_dma_chan *chan = to_chan(dchan); + struct zynqmp_dma_desc_sw *desc; + int i; + + chan->sw_desc_pool = kzalloc(sizeof(*desc) * ZYNQMP_DMA_NUM_DESCS, + GFP_KERNEL); + if (!chan->sw_desc_pool) + return -ENOMEM; + + chan->idle = true; + chan->desc_free_cnt = ZYNQMP_DMA_NUM_DESCS; + + INIT_LIST_HEAD(&chan->free_list); + + for (i = 0; i < ZYNQMP_DMA_NUM_DESCS; i++) { + desc = chan->sw_desc_pool + i; + dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); + desc->async_tx.tx_submit = zynqmp_dma_tx_submit; + list_add_tail(&desc->node, &chan->free_list); + } + + chan->desc_pool_v = dma_zalloc_coherent(chan->dev, + (2 * chan->desc_size * ZYNQMP_DMA_NUM_DESCS), + &chan->desc_pool_p, GFP_KERNEL); + if (!chan->desc_pool_v) + return -ENOMEM; + + for (i = 0; i < ZYNQMP_DMA_NUM_DESCS; i++) { + desc = chan->sw_desc_pool + i; + desc->src_v = (struct zynqmp_dma_desc_ll *) (chan->desc_pool_v + + (i * ZYNQMP_DMA_DESC_SIZE(chan) * 2)); + desc->dst_v = (struct zynqmp_dma_desc_ll *) (desc->src_v + 1); + desc->src_p = chan->desc_pool_p + + (i * ZYNQMP_DMA_DESC_SIZE(chan) * 2); + desc->dst_p = desc->src_p + ZYNQMP_DMA_DESC_SIZE(chan); + } + + return ZYNQMP_DMA_NUM_DESCS; +} + +/** + * zynqmp_dma_start - Start DMA channel + * @chan: ZynqMP DMA channel pointer + */ +static void zynqmp_dma_start(struct zynqmp_dma_chan *chan) +{ + writel(ZYNQMP_DMA_INT_EN_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IER); + chan->idle = false; + writel(ZYNQMP_DMA_ENABLE, chan->regs + ZYNQMP_DMA_CTRL2); +} + +/** + * zynqmp_dma_handle_ovfl_int - Process the overflow interrupt + * @chan: ZynqMP DMA channel pointer + * @status: Interrupt status value + */ +static void zynqmp_dma_handle_ovfl_int(struct zynqmp_dma_chan *chan, u32 status) +{ + u32 val; + + if (status & ZYNQMP_DMA_IRQ_DST_ACCT_ERR) + val = readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT); + if (status & ZYNQMP_DMA_IRQ_SRC_ACCT_ERR) + val = readl(chan->regs + ZYNQMP_DMA_IRQ_SRC_ACCT); +} + +static void zynqmp_dma_config(struct zynqmp_dma_chan *chan) +{ + u32 val; + + val = readl(chan->regs + ZYNQMP_DMA_CTRL0); + val |= ZYNQMP_DMA_POINT_TYPE_SG; + writel(val, chan->regs + ZYNQMP_DMA_CTRL0); + + val = readl(chan->regs + ZYNQMP_DMA_DATA_ATTR); + val = (val & ~ZYNQMP_DMA_ARLEN) | + (chan->src_burst_len << ZYNQMP_DMA_ARLEN_OFST); + val = (val & ~ZYNQMP_DMA_AWLEN) | + (chan->dst_burst_len << ZYNQMP_DMA_AWLEN_OFST); + writel(val, chan->regs + ZYNQMP_DMA_DATA_ATTR); +} + +/** + * zynqmp_dma_device_config - Zynqmp dma device configuration + * @dchan: DMA channel + * @config: DMA device config + */ +static int zynqmp_dma_device_config(struct dma_chan *dchan, + struct dma_slave_config *config) +{ + struct zynqmp_dma_chan *chan = to_chan(dchan); + + chan->src_burst_len = config->src_maxburst; + chan->dst_burst_len = config->dst_maxburst; + + return 0; +} + +/** + * zynqmp_dma_start_transfer - Initiate the new transfer + * @chan: ZynqMP DMA channel pointer + */ +static void zynqmp_dma_start_transfer(struct zynqmp_dma_chan *chan) +{ + struct zynqmp_dma_desc_sw *desc; + + if (!chan->idle) + return; + + zynqmp_dma_config(chan); + + desc = list_first_entry_or_null(&chan->pending_list, + struct zynqmp_dma_desc_sw, node); + if (!desc) + return; + + list_splice_tail_init(&chan->pending_list, &chan->active_list); + zynqmp_dma_update_desc_to_ctrlr(chan, desc); + zynqmp_dma_start(chan); +} + + +/** + * zynqmp_dma_chan_desc_cleanup - Cleanup the completed descriptors + * @chan: ZynqMP DMA channel + */ +static void zynqmp_dma_chan_desc_cleanup(struct zynqmp_dma_chan *chan) +{ + struct zynqmp_dma_desc_sw *desc, *next; + + list_for_each_entry_safe(desc, next, &chan->done_list, node) { + dma_async_tx_callback callback; + void *callback_param; + + list_del(&desc->node); + + callback = desc->async_tx.callback; + callback_param = desc->async_tx.callback_param; + if (callback) { + spin_unlock(&chan->lock); + callback(callback_param); + spin_lock(&chan->lock); + } + + /* Run any dependencies, then free the descriptor */ + zynqmp_dma_free_descriptor(chan, desc); + } +} + +/** + * zynqmp_dma_complete_descriptor - Mark the active descriptor as complete + * @chan: ZynqMP DMA channel pointer + */ +static void zynqmp_dma_complete_descriptor(struct zynqmp_dma_chan *chan) +{ + struct zynqmp_dma_desc_sw *desc; + + desc = list_first_entry_or_null(&chan->active_list, + struct zynqmp_dma_desc_sw, node); + if (!desc) + return; + list_del(&desc->node); + dma_cookie_complete(&desc->async_tx); + list_add_tail(&desc->node, &chan->done_list); +} + +/** + * zynqmp_dma_issue_pending - Issue pending transactions + * @dchan: DMA channel pointer + */ +static void zynqmp_dma_issue_pending(struct dma_chan *dchan) +{ + struct zynqmp_dma_chan *chan = to_chan(dchan); + + spin_lock_bh(&chan->lock); + zynqmp_dma_start_transfer(chan); + spin_unlock_bh(&chan->lock); +} + +/** + * zynqmp_dma_free_descriptors - Free channel descriptors + * @dchan: DMA channel pointer + */ +static void zynqmp_dma_free_descriptors(struct zynqmp_dma_chan *chan) +{ + zynqmp_dma_free_desc_list(chan, &chan->active_list); + zynqmp_dma_free_desc_list(chan, &chan->pending_list); + zynqmp_dma_free_desc_list(chan, &chan->done_list); +} + +/** + * zynqmp_dma_free_chan_resources - Free channel resources + * @dchan: DMA channel pointer + */ +static void zynqmp_dma_free_chan_resources(struct dma_chan *dchan) +{ + struct zynqmp_dma_chan *chan = to_chan(dchan); + + spin_lock_bh(&chan->lock); + zynqmp_dma_free_descriptors(chan); + spin_unlock_bh(&chan->lock); + dma_free_coherent(chan->dev, + (2 * ZYNQMP_DMA_DESC_SIZE(chan) * ZYNQMP_DMA_NUM_DESCS), + chan->desc_pool_v, chan->desc_pool_p); + kfree(chan->sw_desc_pool); +} + +/** + * zynqmp_dma_reset - Reset the channel + * @chan: ZynqMP DMA channel pointer + */ +static void zynqmp_dma_reset(struct zynqmp_dma_chan *chan) +{ + writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS); + + zynqmp_dma_complete_descriptor(chan); + zynqmp_dma_chan_desc_cleanup(chan); + zynqmp_dma_free_descriptors(chan); + zynqmp_dma_init(chan); +} + +/** + * zynqmp_dma_irq_handler - ZynqMP DMA Interrupt handler + * @irq: IRQ number + * @data: Pointer to the ZynqMP DMA channel structure + * + * Return: IRQ_HANDLED/IRQ_NONE + */ +static irqreturn_t zynqmp_dma_irq_handler(int irq, void *data) +{ + struct zynqmp_dma_chan *chan = (struct zynqmp_dma_chan *)data; + u32 isr, imr, status; + irqreturn_t ret = IRQ_NONE; + + isr = readl(chan->regs + ZYNQMP_DMA_ISR); + imr = readl(chan->regs + ZYNQMP_DMA_IMR); + status = isr & ~imr; + + writel(isr, chan->regs + ZYNQMP_DMA_ISR); + if (status & ZYNQMP_DMA_INT_DONE) { + tasklet_schedule(&chan->tasklet); + ret = IRQ_HANDLED; + } + + if (status & ZYNQMP_DMA_DONE) + chan->idle = true; + + if (status & ZYNQMP_DMA_INT_ERR) { + chan->err = true; + tasklet_schedule(&chan->tasklet); + dev_err(chan->dev, "Channel %p has errors\n", chan); + ret = IRQ_HANDLED; + } + + if (status & ZYNQMP_DMA_INT_OVRFL) { + zynqmp_dma_handle_ovfl_int(chan, status); + dev_info(chan->dev, "Channel %p overflow interrupt\n", chan); + ret = IRQ_HANDLED; + } + + return ret; +} + +/** + * zynqmp_dma_do_tasklet - Schedule completion tasklet + * @data: Pointer to the ZynqMP DMA channel structure + */ +static void zynqmp_dma_do_tasklet(unsigned long data) +{ + struct zynqmp_dma_chan *chan = (struct zynqmp_dma_chan *)data; + u32 count; + + spin_lock(&chan->lock); + + if (chan->err) { + zynqmp_dma_reset(chan); + chan->err = false; + goto unlock; + } + + count = readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT); + + while (count) { + zynqmp_dma_complete_descriptor(chan); + zynqmp_dma_chan_desc_cleanup(chan); + count--; + } + + if (chan->idle) + zynqmp_dma_start_transfer(chan); + +unlock: + spin_unlock(&chan->lock); +} + +/** + * zynqmp_dma_device_terminate_all - Aborts all transfers on a channel + * @dchan: DMA channel pointer + * + * Return: Always '0' + */ +static int zynqmp_dma_device_terminate_all(struct dma_chan *dchan) +{ + struct zynqmp_dma_chan *chan = to_chan(dchan); + + spin_lock_bh(&chan->lock); + writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS); + zynqmp_dma_free_descriptors(chan); + spin_unlock_bh(&chan->lock); + + return 0; +} + +/** + * zynqmp_dma_prep_memcpy - prepare descriptors for memcpy transaction + * @dchan: DMA channel + * @dma_dst: Destination buffer address + * @dma_src: Source buffer address + * @len: Transfer length + * @flags: transfer ack flags + * + * Return: Async transaction descriptor on success and NULL on failure + */ +static struct dma_async_tx_descriptor *zynqmp_dma_prep_memcpy( + struct dma_chan *dchan, dma_addr_t dma_dst, + dma_addr_t dma_src, size_t len, ulong flags) +{ + struct zynqmp_dma_chan *chan; + struct zynqmp_dma_desc_sw *new, *first = NULL; + void *desc = NULL, *prev = NULL; + size_t copy; + u32 desc_cnt; + + chan = to_chan(dchan); + + if (len > ZYNQMP_DMA_MAX_TRANS_LEN) + return NULL; + + desc_cnt = DIV_ROUND_UP(len, ZYNQMP_DMA_MAX_TRANS_LEN); + + spin_lock_bh(&chan->lock); + if (desc_cnt > chan->desc_free_cnt) { + spin_unlock_bh(&chan->lock); + dev_dbg(chan->dev, "chan %p descs are not available\n", chan); + return NULL; + } + chan->desc_free_cnt = chan->desc_free_cnt - desc_cnt; + spin_unlock_bh(&chan->lock); + + do { + /* Allocate and populate the descriptor */ + new = zynqmp_dma_get_descriptor(chan); + + copy = min_t(size_t, len, ZYNQMP_DMA_MAX_TRANS_LEN); + desc = (struct zynqmp_dma_desc_ll *)new->src_v; + zynqmp_dma_config_sg_ll_desc(chan, desc, dma_src, + dma_dst, copy, prev); + prev = desc; + len -= copy; + dma_src += copy; + dma_dst += copy; + if (!first) + first = new; + else + list_add_tail(&new->node, &first->tx_list); + } while (len); + + zynqmp_dma_desc_config_eod(chan, desc); + async_tx_ack(&first->async_tx); + first->async_tx.flags = flags; + return &first->async_tx; +} + +/** + * zynqmp_dma_prep_slave_sg - prepare descriptors for a memory sg transaction + * @dchan: DMA channel + * @dst_sg: Destination scatter list + * @dst_sg_len: Number of entries in destination scatter list + * @src_sg: Source scatter list + * @src_sg_len: Number of entries in source scatter list + * @flags: transfer ack flags + * + * Return: Async transaction descriptor on success and NULL on failure + */ +static struct dma_async_tx_descriptor *zynqmp_dma_prep_sg( + struct dma_chan *dchan, struct scatterlist *dst_sg, + unsigned int dst_sg_len, struct scatterlist *src_sg, + unsigned int src_sg_len, unsigned long flags) +{ + struct zynqmp_dma_desc_sw *new, *first = NULL; + struct zynqmp_dma_chan *chan = to_chan(dchan); + void *desc = NULL, *prev = NULL; + size_t len, dst_avail, src_avail; + dma_addr_t dma_dst, dma_src; + u32 desc_cnt = 0, i; + struct scatterlist *sg; + + for_each_sg(src_sg, sg, src_sg_len, i) + desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), + ZYNQMP_DMA_MAX_TRANS_LEN); + + spin_lock_bh(&chan->lock); + if (desc_cnt > chan->desc_free_cnt) { + spin_unlock_bh(&chan->lock); + dev_dbg(chan->dev, "chan %p descs are not available\n", chan); + return NULL; + } + chan->desc_free_cnt = chan->desc_free_cnt - desc_cnt; + spin_unlock_bh(&chan->lock); + + dst_avail = sg_dma_len(dst_sg); + src_avail = sg_dma_len(src_sg); + + /* Run until we are out of scatterlist entries */ + while (true) { + /* Allocate and populate the descriptor */ + new = zynqmp_dma_get_descriptor(chan); + desc = (struct zynqmp_dma_desc_ll *)new->src_v; + len = min_t(size_t, src_avail, dst_avail); + len = min_t(size_t, len, ZYNQMP_DMA_MAX_TRANS_LEN); + if (len == 0) + goto fetch; + dma_dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - + dst_avail; + dma_src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - + src_avail; + + zynqmp_dma_config_sg_ll_desc(chan, desc, dma_src, dma_dst, + len, prev); + prev = desc; + dst_avail -= len; + src_avail -= len; + + if (!first) + first = new; + else + list_add_tail(&new->node, &first->tx_list); +fetch: + /* Fetch the next dst scatterlist entry */ + if (dst_avail == 0) { + if (dst_sg_len == 0) + break; + dst_sg = sg_next(dst_sg); + if (dst_sg == NULL) + break; + dst_sg_len--; + dst_avail = sg_dma_len(dst_sg); + } + /* Fetch the next src scatterlist entry */ + if (src_avail == 0) { + if (src_sg_len == 0) + break; + src_sg = sg_next(src_sg); + if (src_sg == NULL) + break; + src_sg_len--; + src_avail = sg_dma_len(src_sg); + } + } + + zynqmp_dma_desc_config_eod(chan, desc); + first->async_tx.flags = flags; + return &first->async_tx; +} + +/** + * zynqmp_dma_chan_remove - Channel remove function + * @chan: ZynqMP DMA channel pointer + */ +static void zynqmp_dma_chan_remove(struct zynqmp_dma_chan *chan) +{ + if (!chan) + return; + + devm_free_irq(chan->zdev->dev, chan->irq, chan); + tasklet_kill(&chan->tasklet); + list_del(&chan->common.device_node); + clk_disable_unprepare(chan->clk_apb); + clk_disable_unprepare(chan->clk_main); +} + +/** + * zynqmp_dma_chan_probe - Per Channel Probing + * @zdev: Driver specific device structure + * @pdev: Pointer to the platform_device structure + * + * Return: '0' on success and failure value on error + */ +static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev, + struct platform_device *pdev) +{ + struct zynqmp_dma_chan *chan; + struct resource *res; + struct device_node *node = pdev->dev.of_node; + int err; + + chan = devm_kzalloc(zdev->dev, sizeof(*chan), GFP_KERNEL); + if (!chan) + return -ENOMEM; + chan->dev = zdev->dev; + chan->zdev = zdev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + chan->regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(chan->regs)) + return PTR_ERR(chan->regs); + + chan->bus_width = ZYNQMP_DMA_BUS_WIDTH_64; + chan->dst_burst_len = ZYNQMP_DMA_AWLEN_RST_VAL; + chan->src_burst_len = ZYNQMP_DMA_ARLEN_RST_VAL; + err = of_property_read_u32(node, "xlnx,bus-width", &chan->bus_width); + if ((err < 0) && ((chan->bus_width != ZYNQMP_DMA_BUS_WIDTH_64) || + (chan->bus_width != ZYNQMP_DMA_BUS_WIDTH_128))) { + dev_err(zdev->dev, "invalid bus-width value"); + return err; + } + + chan->is_dmacoherent = of_property_read_bool(node, "dma-coherent"); + zdev->chan = chan; + tasklet_init(&chan->tasklet, zynqmp_dma_do_tasklet, (ulong)chan); + spin_lock_init(&chan->lock); + INIT_LIST_HEAD(&chan->active_list); + INIT_LIST_HEAD(&chan->pending_list); + INIT_LIST_HEAD(&chan->done_list); + INIT_LIST_HEAD(&chan->free_list); + + dma_cookie_init(&chan->common); + chan->common.device = &zdev->common; + list_add_tail(&chan->common.device_node, &zdev->common.channels); + + zynqmp_dma_init(chan); + chan->irq = platform_get_irq(pdev, 0); + if (chan->irq < 0) + return -ENXIO; + err = devm_request_irq(&pdev->dev, chan->irq, zynqmp_dma_irq_handler, 0, + "zynqmp-dma", chan); + if (err) + return err; + chan->clk_main = devm_clk_get(&pdev->dev, "clk_main"); + if (IS_ERR(chan->clk_main)) { + dev_err(&pdev->dev, "main clock not found.\n"); + return PTR_ERR(chan->clk_main); + } + + chan->clk_apb = devm_clk_get(&pdev->dev, "clk_apb"); + if (IS_ERR(chan->clk_apb)) { + dev_err(&pdev->dev, "apb clock not found.\n"); + return PTR_ERR(chan->clk_apb); + } + + err = clk_prepare_enable(chan->clk_main); + if (err) { + dev_err(&pdev->dev, "Unable to enable main clock.\n"); + return err; + } + + err = clk_prepare_enable(chan->clk_apb); + if (err) { + clk_disable_unprepare(chan->clk_main); + dev_err(&pdev->dev, "Unable to enable apb clock.\n"); + return err; + } + + chan->desc_size = sizeof(struct zynqmp_dma_desc_ll); + chan->idle = true; + return 0; +} + +/** + * of_zynqmp_dma_xlate - Translation function + * @dma_spec: Pointer to DMA specifier as found in the device tree + * @ofdma: Pointer to DMA controller data + * + * Return: DMA channel pointer on success and NULL on error + */ +static struct dma_chan *of_zynqmp_dma_xlate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + struct zynqmp_dma_device *zdev = ofdma->of_dma_data; + + return dma_get_slave_channel(&zdev->chan->common); +} + +/** + * zynqmp_dma_probe - Driver probe function + * @pdev: Pointer to the platform_device structure + * + * Return: '0' on success and failure value on error + */ +static int zynqmp_dma_probe(struct platform_device *pdev) +{ + struct zynqmp_dma_device *zdev; + struct dma_device *p; + int ret; + + zdev = devm_kzalloc(&pdev->dev, sizeof(*zdev), GFP_KERNEL); + if (!zdev) + return -ENOMEM; + + zdev->dev = &pdev->dev; + INIT_LIST_HEAD(&zdev->common.channels); + + dma_set_mask(&pdev->dev, DMA_BIT_MASK(44)); + dma_cap_set(DMA_SG, zdev->common.cap_mask); + dma_cap_set(DMA_MEMCPY, zdev->common.cap_mask); + + p = &zdev->common; + p->device_prep_dma_sg = zynqmp_dma_prep_sg; + p->device_prep_dma_memcpy = zynqmp_dma_prep_memcpy; + p->device_terminate_all = zynqmp_dma_device_terminate_all; + p->device_issue_pending = zynqmp_dma_issue_pending; + p->device_alloc_chan_resources = zynqmp_dma_alloc_chan_resources; + p->device_free_chan_resources = zynqmp_dma_free_chan_resources; + p->device_tx_status = dma_cookie_status; + p->device_config = zynqmp_dma_device_config; + p->dev = &pdev->dev; + + platform_set_drvdata(pdev, zdev); + + ret = zynqmp_dma_chan_probe(zdev, pdev); + if (ret) { + dev_err(&pdev->dev, "Probing channel failed\n"); + goto free_chan_resources; + } + + p->dst_addr_widths = BIT(zdev->chan->bus_width / 8); + p->src_addr_widths = BIT(zdev->chan->bus_width / 8); + + dma_async_device_register(&zdev->common); + + ret = of_dma_controller_register(pdev->dev.of_node, + of_zynqmp_dma_xlate, zdev); + if (ret) { + dev_err(&pdev->dev, "Unable to register DMA to DT\n"); + dma_async_device_unregister(&zdev->common); + goto free_chan_resources; + } + + dev_info(&pdev->dev, "ZynqMP DMA driver Probe success\n"); + + return 0; + +free_chan_resources: + zynqmp_dma_chan_remove(zdev->chan); + return ret; +} + +/** + * zynqmp_dma_remove - Driver remove function + * @pdev: Pointer to the platform_device structure + * + * Return: Always '0' + */ +static int zynqmp_dma_remove(struct platform_device *pdev) +{ + struct zynqmp_dma_device *zdev = platform_get_drvdata(pdev); + + of_dma_controller_free(pdev->dev.of_node); + dma_async_device_unregister(&zdev->common); + + zynqmp_dma_chan_remove(zdev->chan); + + return 0; +} + +static const struct of_device_id zynqmp_dma_of_match[] = { + { .compatible = "xlnx,zynqmp-dma-1.0", }, + {} +}; +MODULE_DEVICE_TABLE(of, zynqmp_dma_of_match); + +static struct platform_driver zynqmp_dma_driver = { + .driver = { + .name = "xilinx-zynqmp-dma", + .of_match_table = zynqmp_dma_of_match, + }, + .probe = zynqmp_dma_probe, + .remove = zynqmp_dma_remove, +}; + +module_platform_driver(zynqmp_dma_driver); + +MODULE_AUTHOR("Xilinx, Inc."); +MODULE_DESCRIPTION("Xilinx ZynqMP DMA driver"); -- cgit v0.10.2 From e598e6eb46dd448d9b77a564e4d9664da52e3e1f Mon Sep 17 00:00:00 2001 From: Kedareswara rao Appana Date: Sat, 9 Jul 2016 14:09:48 +0530 Subject: dmaengine: xilinx: Fix race condition in axi dma cyclic dma mode In cyclic DMA mode need to link the tail bd segment with the head bd segment to process bd's in cyclic. Current driver is doing this only for tx channel needs to update the same for rx channel case also. This patch fixes the same. Signed-off-by: Kedareswara rao Appana Signed-off-by: Vinod Koul diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index cf47347..4e223d0 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -1895,14 +1895,15 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic( reg |= XILINX_DMA_CR_CYCLIC_BD_EN_MASK; dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); + segment = list_last_entry(&desc->segments, + struct xilinx_axidma_tx_segment, + node); + segment->hw.next_desc = (u32) head_segment->phys; + /* For the last DMA_MEM_TO_DEV transfer, set EOP */ if (direction == DMA_MEM_TO_DEV) { head_segment->hw.control |= XILINX_DMA_BD_SOP; - segment = list_last_entry(&desc->segments, - struct xilinx_axidma_tx_segment, - node); segment->hw.control |= XILINX_DMA_BD_EOP; - segment->hw.next_desc = (u32) head_segment->phys; } return &desc->async_tx; -- cgit v0.10.2 From 7cdd3587b8628215f377d5d73a39540d94f33dc1 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 11 Jul 2016 23:46:09 +0200 Subject: dmaengine: zynqmp: avoid cast warning The newly added zynqmp_dma driver produces a warning on 32-bit architectures when dma_addr_t is 64-bit wide: drivers/dma/xilinx/zynqmp_dma.c: In function 'zynqmp_dma_config_sg_ll_desc': drivers/dma/xilinx/zynqmp_dma.c:321:9: error: cast from pointer to integer of different size [-Werror=pointer-to-int-cast] ((dma_addr_t)sdesc - (dma_addr_t)chan->desc_pool_v); ^ drivers/dma/xilinx/zynqmp_dma.c:321:29: error: cast from pointer to integer of different size [-Werror=pointer-to-int-cast] ((dma_addr_t)sdesc - (dma_addr_t)chan->desc_pool_v); This changes the cast to the more appropriate uintptr_t. Signed-off-by: Arnd Bergmann Signed-off-by: Vinod Koul diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c index 59bc1f7..f777a5b 100644 --- a/drivers/dma/xilinx/zynqmp_dma.c +++ b/drivers/dma/xilinx/zynqmp_dma.c @@ -318,7 +318,7 @@ static void zynqmp_dma_config_sg_ll_desc(struct zynqmp_dma_chan *chan, if (prev) { dma_addr_t addr = chan->desc_pool_p + - ((dma_addr_t)sdesc - (dma_addr_t)chan->desc_pool_v); + ((uintptr_t)sdesc - (uintptr_t)chan->desc_pool_v); ddesc = prev + 1; prev->nxtdscraddr = addr; ddesc->nxtdscraddr = addr + ZYNQMP_DMA_DESC_SIZE(chan); -- cgit v0.10.2 From 9e69868fcb45212119d78ab73770f66c10db8c55 Mon Sep 17 00:00:00 2001 From: Thomas Petazzoni Date: Thu, 16 Jun 2016 14:28:33 +0200 Subject: dt-bindings: dma: add binding for the Marvell XOR v2 engine This commit adds the Device Tree binding documentation for the Marvell XOR v2 engine, which is found on Marvell Armada 7K/8K ARM64 SoCs. Signed-off-by: Thomas Petazzoni Acked-by: Rob Herring Signed-off-by: Vinod Koul diff --git a/Documentation/devicetree/bindings/dma/mv-xor-v2.txt b/Documentation/devicetree/bindings/dma/mv-xor-v2.txt new file mode 100644 index 0000000..217a90e --- /dev/null +++ b/Documentation/devicetree/bindings/dma/mv-xor-v2.txt @@ -0,0 +1,24 @@ +* Marvell XOR v2 engines + +Required properties: +- compatible: one of the following values: + "marvell,armada-7k-xor" + "marvell,xor-v2" +- reg: Should contain registers location and length (two sets) + the first set is the DMA registers + the second set is the global registers +- msi-parent: Phandle to the MSI-capable interrupt controller used for + interrupts. + +Optional properties: +- clocks: Optional reference to the clock used by the XOR engine. + +Example: + + xor0@400000 { + compatible = "marvell,xor-v2"; + reg = <0x400000 0x1000>, + <0x410000 0x1000>; + msi-parent = <&gic_v2m0>; + dma-coherent; + }; -- cgit v0.10.2 From 19a340b1a820430de0e05fbb8dcb20da91f2e013 Mon Sep 17 00:00:00 2001 From: Thomas Petazzoni Date: Thu, 16 Jun 2016 14:28:34 +0200 Subject: dmaengine: mv_xor_v2: new driver The new mv_xor_v2 driver supports the XOR engines found in the 64-bits ARM from Marvell of the Armada 7K and Armada 8K family. This XOR engine is a completely new hardware block, entirely different from the one used on previous Marvell Armada platforms, which use the existing mv_xor driver. Signed-off-by: Thomas Petazzoni Signed-off-by: Vinod Koul diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 8c98779..f6c46d0 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -339,6 +339,20 @@ config MV_XOR ---help--- Enable support for the Marvell XOR engine. +config MV_XOR_V2 + bool "Marvell XOR engine version 2 support " + depends on ARM64 + select DMA_ENGINE + select DMA_ENGINE_RAID + select ASYNC_TX_ENABLE_CHANNEL_SWITCH + select GENERIC_MSI_IRQ_DOMAIN + ---help--- + Enable support for the Marvell version 2 XOR engine. + + This engine provides acceleration for copy, XOR and RAID6 + operations, and is available on Marvell Armada 7K and 8K + platforms. + config MXS_DMA bool "MXS DMA support" depends on SOC_IMX23 || SOC_IMX28 || SOC_IMX6Q || SOC_IMX6UL diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 614f28b..e4dc9ca 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -45,6 +45,7 @@ obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o obj-$(CONFIG_MOXART_DMA) += moxart-dma.o obj-$(CONFIG_MPC512X_DMA) += mpc512x_dma.o obj-$(CONFIG_MV_XOR) += mv_xor.o +obj-$(CONFIG_MV_XOR_V2) += mv_xor_v2.o obj-$(CONFIG_MXS_DMA) += mxs-dma.o obj-$(CONFIG_MX3_IPU) += ipu/ obj-$(CONFIG_NBPFAXI_DMA) += nbpfaxi.o diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c new file mode 100644 index 0000000..8320155 --- /dev/null +++ b/drivers/dma/mv_xor_v2.c @@ -0,0 +1,879 @@ +/* + * Copyright (C) 2015-2016 Marvell International Ltd. + + * This program is free software: you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "dmaengine.h" + +/* DMA Engine Registers */ +#define MV_XOR_V2_DMA_DESQ_BALR_OFF 0x000 +#define MV_XOR_V2_DMA_DESQ_BAHR_OFF 0x004 +#define MV_XOR_V2_DMA_DESQ_SIZE_OFF 0x008 +#define MV_XOR_V2_DMA_DESQ_DONE_OFF 0x00C +#define MV_XOR_V2_DMA_DESQ_DONE_PENDING_MASK 0x7FFF +#define MV_XOR_V2_DMA_DESQ_DONE_PENDING_SHIFT 0 +#define MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_MASK 0x1FFF +#define MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_SHIFT 16 +#define MV_XOR_V2_DMA_DESQ_ARATTR_OFF 0x010 +#define MV_XOR_V2_DMA_DESQ_ATTR_CACHE_MASK 0x3F3F +#define MV_XOR_V2_DMA_DESQ_ATTR_OUTER_SHAREABLE 0x202 +#define MV_XOR_V2_DMA_DESQ_ATTR_CACHEABLE 0x3C3C +#define MV_XOR_V2_DMA_IMSG_CDAT_OFF 0x014 +#define MV_XOR_V2_DMA_IMSG_THRD_OFF 0x018 +#define MV_XOR_V2_DMA_IMSG_THRD_MASK 0x7FFF +#define MV_XOR_V2_DMA_IMSG_THRD_SHIFT 0x0 +#define MV_XOR_V2_DMA_DESQ_AWATTR_OFF 0x01C + /* Same flags as MV_XOR_V2_DMA_DESQ_ARATTR_OFF */ +#define MV_XOR_V2_DMA_DESQ_ALLOC_OFF 0x04C +#define MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_MASK 0xFFFF +#define MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_SHIFT 16 +#define MV_XOR_V2_DMA_IMSG_BALR_OFF 0x050 +#define MV_XOR_V2_DMA_IMSG_BAHR_OFF 0x054 +#define MV_XOR_V2_DMA_DESQ_CTRL_OFF 0x100 +#define MV_XOR_V2_DMA_DESQ_CTRL_32B 1 +#define MV_XOR_V2_DMA_DESQ_CTRL_128B 7 +#define MV_XOR_V2_DMA_DESQ_STOP_OFF 0x800 +#define MV_XOR_V2_DMA_DESQ_DEALLOC_OFF 0x804 +#define MV_XOR_V2_DMA_DESQ_ADD_OFF 0x808 + +/* XOR Global registers */ +#define MV_XOR_V2_GLOB_BW_CTRL 0x4 +#define MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_SHIFT 0 +#define MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_VAL 64 +#define MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_SHIFT 8 +#define MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_VAL 8 +#define MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_SHIFT 12 +#define MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_VAL 4 +#define MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_SHIFT 16 +#define MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_VAL 4 +#define MV_XOR_V2_GLOB_PAUSE 0x014 +#define MV_XOR_V2_GLOB_PAUSE_AXI_TIME_DIS_VAL 0x8 +#define MV_XOR_V2_GLOB_SYS_INT_CAUSE 0x200 +#define MV_XOR_V2_GLOB_SYS_INT_MASK 0x204 +#define MV_XOR_V2_GLOB_MEM_INT_CAUSE 0x220 +#define MV_XOR_V2_GLOB_MEM_INT_MASK 0x224 + +#define MV_XOR_V2_MIN_DESC_SIZE 32 +#define MV_XOR_V2_EXT_DESC_SIZE 128 + +#define MV_XOR_V2_DESC_RESERVED_SIZE 12 +#define MV_XOR_V2_DESC_BUFF_D_ADDR_SIZE 12 + +#define MV_XOR_V2_CMD_LINE_NUM_MAX_D_BUF 8 + +/* + * Descriptors queue size. With 32 bytes descriptors, up to 2^14 + * descriptors are allowed, with 128 bytes descriptors, up to 2^12 + * descriptors are allowed. This driver uses 128 bytes descriptors, + * but experimentation has shown that a set of 1024 descriptors is + * sufficient to reach a good level of performance. + */ +#define MV_XOR_V2_DESC_NUM 1024 + +/** + * struct mv_xor_v2_descriptor - DMA HW descriptor + * @desc_id: used by S/W and is not affected by H/W. + * @flags: error and status flags + * @crc32_result: CRC32 calculation result + * @desc_ctrl: operation mode and control flags + * @buff_size: amount of bytes to be processed + * @fill_pattern_src_addr: Fill-Pattern or Source-Address and + * AW-Attributes + * @data_buff_addr: Source (and might be RAID6 destination) + * addresses of data buffers in RAID5 and RAID6 + * @reserved: reserved + */ +struct mv_xor_v2_descriptor { + u16 desc_id; + u16 flags; + u32 crc32_result; + u32 desc_ctrl; + + /* Definitions for desc_ctrl */ +#define DESC_NUM_ACTIVE_D_BUF_SHIFT 22 +#define DESC_OP_MODE_SHIFT 28 +#define DESC_OP_MODE_NOP 0 /* Idle operation */ +#define DESC_OP_MODE_MEMCPY 1 /* Pure-DMA operation */ +#define DESC_OP_MODE_MEMSET 2 /* Mem-Fill operation */ +#define DESC_OP_MODE_MEMINIT 3 /* Mem-Init operation */ +#define DESC_OP_MODE_MEM_COMPARE 4 /* Mem-Compare operation */ +#define DESC_OP_MODE_CRC32 5 /* CRC32 calculation */ +#define DESC_OP_MODE_XOR 6 /* RAID5 (XOR) operation */ +#define DESC_OP_MODE_RAID6 7 /* RAID6 P&Q-generation */ +#define DESC_OP_MODE_RAID6_REC 8 /* RAID6 Recovery */ +#define DESC_Q_BUFFER_ENABLE BIT(16) +#define DESC_P_BUFFER_ENABLE BIT(17) +#define DESC_IOD BIT(27) + + u32 buff_size; + u32 fill_pattern_src_addr[4]; + u32 data_buff_addr[MV_XOR_V2_DESC_BUFF_D_ADDR_SIZE]; + u32 reserved[MV_XOR_V2_DESC_RESERVED_SIZE]; +}; + +/** + * struct mv_xor_v2_device - implements a xor device + * @lock: lock for the engine + * @dma_base: memory mapped DMA register base + * @glob_base: memory mapped global register base + * @irq_tasklet: + * @free_sw_desc: linked list of free SW descriptors + * @dmadev: dma device + * @dmachan: dma channel + * @hw_desq: HW descriptors queue + * @hw_desq_virt: virtual address of DESCQ + * @sw_desq: SW descriptors queue + * @desc_size: HW descriptor size + * @npendings: number of pending descriptors (for which tx_submit has + * been called, but not yet issue_pending) + */ +struct mv_xor_v2_device { + spinlock_t lock; + void __iomem *dma_base; + void __iomem *glob_base; + struct clk *clk; + struct tasklet_struct irq_tasklet; + struct list_head free_sw_desc; + struct dma_device dmadev; + struct dma_chan dmachan; + dma_addr_t hw_desq; + struct mv_xor_v2_descriptor *hw_desq_virt; + struct mv_xor_v2_sw_desc *sw_desq; + int desc_size; + unsigned int npendings; +}; + +/** + * struct mv_xor_v2_sw_desc - implements a xor SW descriptor + * @idx: descriptor index + * @async_tx: support for the async_tx api + * @hw_desc: assosiated HW descriptor + * @free_list: node of the free SW descriprots list +*/ +struct mv_xor_v2_sw_desc { + int idx; + struct dma_async_tx_descriptor async_tx; + struct mv_xor_v2_descriptor hw_desc; + struct list_head free_list; +}; + +/* + * Fill the data buffers to a HW descriptor + */ +static void mv_xor_v2_set_data_buffers(struct mv_xor_v2_device *xor_dev, + struct mv_xor_v2_descriptor *desc, + dma_addr_t src, int index) +{ + int arr_index = ((index >> 1) * 3); + + /* + * Fill the buffer's addresses to the descriptor. + * + * The format of the buffers address for 2 sequential buffers + * X and X + 1: + * + * First word: Buffer-DX-Address-Low[31:0] + * Second word: Buffer-DX+1-Address-Low[31:0] + * Third word: DX+1-Buffer-Address-High[47:32] [31:16] + * DX-Buffer-Address-High[47:32] [15:0] + */ + if ((index & 0x1) == 0) { + desc->data_buff_addr[arr_index] = lower_32_bits(src); + + desc->data_buff_addr[arr_index + 2] &= ~0xFFFF; + desc->data_buff_addr[arr_index + 2] |= + upper_32_bits(src) & 0xFFFF; + } else { + desc->data_buff_addr[arr_index + 1] = + lower_32_bits(src); + + desc->data_buff_addr[arr_index + 2] &= ~0xFFFF0000; + desc->data_buff_addr[arr_index + 2] |= + (upper_32_bits(src) & 0xFFFF) << 16; + } +} + +/* + * Return the next available index in the DESQ. + */ +static int mv_xor_v2_get_desq_write_ptr(struct mv_xor_v2_device *xor_dev) +{ + /* read the index for the next available descriptor in the DESQ */ + u32 reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ALLOC_OFF); + + return ((reg >> MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_SHIFT) + & MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_MASK); +} + +/* + * notify the engine of new descriptors, and update the available index. + */ +static void mv_xor_v2_add_desc_to_desq(struct mv_xor_v2_device *xor_dev, + int num_of_desc) +{ + /* write the number of new descriptors in the DESQ. */ + writel(num_of_desc, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ADD_OFF); +} + +/* + * free HW descriptors + */ +static void mv_xor_v2_free_desc_from_desq(struct mv_xor_v2_device *xor_dev, + int num_of_desc) +{ + /* write the number of new descriptors in the DESQ. */ + writel(num_of_desc, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_DEALLOC_OFF); +} + +/* + * Set descriptor size + * Return the HW descriptor size in bytes + */ +static int mv_xor_v2_set_desc_size(struct mv_xor_v2_device *xor_dev) +{ + writel(MV_XOR_V2_DMA_DESQ_CTRL_128B, + xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_CTRL_OFF); + + return MV_XOR_V2_EXT_DESC_SIZE; +} + +/* + * Set the IMSG threshold + */ +static inline +void mv_xor_v2_set_imsg_thrd(struct mv_xor_v2_device *xor_dev, int thrd_val) +{ + u32 reg; + + reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF); + + reg &= (~MV_XOR_V2_DMA_IMSG_THRD_MASK << MV_XOR_V2_DMA_IMSG_THRD_SHIFT); + reg |= (thrd_val << MV_XOR_V2_DMA_IMSG_THRD_SHIFT); + + writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF); +} + +static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data) +{ + struct mv_xor_v2_device *xor_dev = data; + unsigned int ndescs; + u32 reg; + + reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_DONE_OFF); + + ndescs = ((reg >> MV_XOR_V2_DMA_DESQ_DONE_PENDING_SHIFT) & + MV_XOR_V2_DMA_DESQ_DONE_PENDING_MASK); + + /* No descriptors to process */ + if (!ndescs) + return IRQ_NONE; + + /* + * Update IMSG threshold, to disable new IMSG interrupts until + * end of the tasklet + */ + mv_xor_v2_set_imsg_thrd(xor_dev, MV_XOR_V2_DESC_NUM); + + /* schedule a tasklet to handle descriptors callbacks */ + tasklet_schedule(&xor_dev->irq_tasklet); + + return IRQ_HANDLED; +} + +/* + * submit a descriptor to the DMA engine + */ +static dma_cookie_t +mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx) +{ + int desq_ptr; + void *dest_hw_desc; + dma_cookie_t cookie; + struct mv_xor_v2_sw_desc *sw_desc = + container_of(tx, struct mv_xor_v2_sw_desc, async_tx); + struct mv_xor_v2_device *xor_dev = + container_of(tx->chan, struct mv_xor_v2_device, dmachan); + + dev_dbg(xor_dev->dmadev.dev, + "%s sw_desc %p: async_tx %p\n", + __func__, sw_desc, &sw_desc->async_tx); + + /* assign coookie */ + spin_lock_bh(&xor_dev->lock); + cookie = dma_cookie_assign(tx); + + /* get the next available slot in the DESQ */ + desq_ptr = mv_xor_v2_get_desq_write_ptr(xor_dev); + + /* copy the HW descriptor from the SW descriptor to the DESQ */ + dest_hw_desc = xor_dev->hw_desq_virt + desq_ptr; + + memcpy(dest_hw_desc, &sw_desc->hw_desc, xor_dev->desc_size); + + xor_dev->npendings++; + + spin_unlock_bh(&xor_dev->lock); + + return cookie; +} + +/* + * Prepare a SW descriptor + */ +static struct mv_xor_v2_sw_desc * +mv_xor_v2_prep_sw_desc(struct mv_xor_v2_device *xor_dev) +{ + struct mv_xor_v2_sw_desc *sw_desc; + + /* Lock the channel */ + spin_lock_bh(&xor_dev->lock); + + if (list_empty(&xor_dev->free_sw_desc)) { + spin_unlock_bh(&xor_dev->lock); + /* schedule tasklet to free some descriptors */ + tasklet_schedule(&xor_dev->irq_tasklet); + return NULL; + } + + /* get a free SW descriptor from the SW DESQ */ + sw_desc = list_first_entry(&xor_dev->free_sw_desc, + struct mv_xor_v2_sw_desc, free_list); + list_del(&sw_desc->free_list); + + /* Release the channel */ + spin_unlock_bh(&xor_dev->lock); + + /* set the async tx descriptor */ + dma_async_tx_descriptor_init(&sw_desc->async_tx, &xor_dev->dmachan); + sw_desc->async_tx.tx_submit = mv_xor_v2_tx_submit; + async_tx_ack(&sw_desc->async_tx); + + return sw_desc; +} + +/* + * Prepare a HW descriptor for a memcpy operation + */ +static struct dma_async_tx_descriptor * +mv_xor_v2_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, + dma_addr_t src, size_t len, unsigned long flags) +{ + struct mv_xor_v2_sw_desc *sw_desc; + struct mv_xor_v2_descriptor *hw_descriptor; + struct mv_xor_v2_device *xor_dev; + + xor_dev = container_of(chan, struct mv_xor_v2_device, dmachan); + + dev_dbg(xor_dev->dmadev.dev, + "%s len: %zu src %pad dest %pad flags: %ld\n", + __func__, len, &src, &dest, flags); + + sw_desc = mv_xor_v2_prep_sw_desc(xor_dev); + + sw_desc->async_tx.flags = flags; + + /* set the HW descriptor */ + hw_descriptor = &sw_desc->hw_desc; + + /* save the SW descriptor ID to restore when operation is done */ + hw_descriptor->desc_id = sw_desc->idx; + + /* Set the MEMCPY control word */ + hw_descriptor->desc_ctrl = + DESC_OP_MODE_MEMCPY << DESC_OP_MODE_SHIFT; + + if (flags & DMA_PREP_INTERRUPT) + hw_descriptor->desc_ctrl |= DESC_IOD; + + /* Set source address */ + hw_descriptor->fill_pattern_src_addr[0] = lower_32_bits(src); + hw_descriptor->fill_pattern_src_addr[1] = + upper_32_bits(src) & 0xFFFF; + + /* Set Destination address */ + hw_descriptor->fill_pattern_src_addr[2] = lower_32_bits(dest); + hw_descriptor->fill_pattern_src_addr[3] = + upper_32_bits(dest) & 0xFFFF; + + /* Set buffers size */ + hw_descriptor->buff_size = len; + + /* return the async tx descriptor */ + return &sw_desc->async_tx; +} + +/* + * Prepare a HW descriptor for a XOR operation + */ +static struct dma_async_tx_descriptor * +mv_xor_v2_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, + unsigned int src_cnt, size_t len, unsigned long flags) +{ + struct mv_xor_v2_sw_desc *sw_desc; + struct mv_xor_v2_descriptor *hw_descriptor; + struct mv_xor_v2_device *xor_dev = + container_of(chan, struct mv_xor_v2_device, dmachan); + int i; + + if (src_cnt > MV_XOR_V2_CMD_LINE_NUM_MAX_D_BUF || src_cnt < 1) + return NULL; + + dev_dbg(xor_dev->dmadev.dev, + "%s src_cnt: %d len: %zu dest %pad flags: %ld\n", + __func__, src_cnt, len, &dest, flags); + + sw_desc = mv_xor_v2_prep_sw_desc(xor_dev); + + sw_desc->async_tx.flags = flags; + + /* set the HW descriptor */ + hw_descriptor = &sw_desc->hw_desc; + + /* save the SW descriptor ID to restore when operation is done */ + hw_descriptor->desc_id = sw_desc->idx; + + /* Set the XOR control word */ + hw_descriptor->desc_ctrl = + DESC_OP_MODE_XOR << DESC_OP_MODE_SHIFT; + hw_descriptor->desc_ctrl |= DESC_P_BUFFER_ENABLE; + + if (flags & DMA_PREP_INTERRUPT) + hw_descriptor->desc_ctrl |= DESC_IOD; + + /* Set the data buffers */ + for (i = 0; i < src_cnt; i++) + mv_xor_v2_set_data_buffers(xor_dev, hw_descriptor, src[i], i); + + hw_descriptor->desc_ctrl |= + src_cnt << DESC_NUM_ACTIVE_D_BUF_SHIFT; + + /* Set Destination address */ + hw_descriptor->fill_pattern_src_addr[2] = lower_32_bits(dest); + hw_descriptor->fill_pattern_src_addr[3] = + upper_32_bits(dest) & 0xFFFF; + + /* Set buffers size */ + hw_descriptor->buff_size = len; + + /* return the async tx descriptor */ + return &sw_desc->async_tx; +} + +/* + * Prepare a HW descriptor for interrupt operation. + */ +static struct dma_async_tx_descriptor * +mv_xor_v2_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags) +{ + struct mv_xor_v2_sw_desc *sw_desc; + struct mv_xor_v2_descriptor *hw_descriptor; + struct mv_xor_v2_device *xor_dev = + container_of(chan, struct mv_xor_v2_device, dmachan); + + sw_desc = mv_xor_v2_prep_sw_desc(xor_dev); + + /* set the HW descriptor */ + hw_descriptor = &sw_desc->hw_desc; + + /* save the SW descriptor ID to restore when operation is done */ + hw_descriptor->desc_id = sw_desc->idx; + + /* Set the INTERRUPT control word */ + hw_descriptor->desc_ctrl = + DESC_OP_MODE_NOP << DESC_OP_MODE_SHIFT; + hw_descriptor->desc_ctrl |= DESC_IOD; + + /* return the async tx descriptor */ + return &sw_desc->async_tx; +} + +/* + * push pending transactions to hardware + */ +static void mv_xor_v2_issue_pending(struct dma_chan *chan) +{ + struct mv_xor_v2_device *xor_dev = + container_of(chan, struct mv_xor_v2_device, dmachan); + + spin_lock_bh(&xor_dev->lock); + + /* + * update the engine with the number of descriptors to + * process + */ + mv_xor_v2_add_desc_to_desq(xor_dev, xor_dev->npendings); + xor_dev->npendings = 0; + + /* Activate the channel */ + writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF); + + spin_unlock_bh(&xor_dev->lock); +} + +static inline +int mv_xor_v2_get_pending_params(struct mv_xor_v2_device *xor_dev, + int *pending_ptr) +{ + u32 reg; + + reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_DONE_OFF); + + /* get the next pending descriptor index */ + *pending_ptr = ((reg >> MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_SHIFT) & + MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_MASK); + + /* get the number of descriptors pending handle */ + return ((reg >> MV_XOR_V2_DMA_DESQ_DONE_PENDING_SHIFT) & + MV_XOR_V2_DMA_DESQ_DONE_PENDING_MASK); +} + +/* + * handle the descriptors after HW process + */ +static void mv_xor_v2_tasklet(unsigned long data) +{ + struct mv_xor_v2_device *xor_dev = (struct mv_xor_v2_device *) data; + int pending_ptr, num_of_pending, i; + struct mv_xor_v2_descriptor *next_pending_hw_desc = NULL; + struct mv_xor_v2_sw_desc *next_pending_sw_desc = NULL; + + dev_dbg(xor_dev->dmadev.dev, "%s %d\n", __func__, __LINE__); + + /* get the pending descriptors parameters */ + num_of_pending = mv_xor_v2_get_pending_params(xor_dev, &pending_ptr); + + /* next HW descriptor */ + next_pending_hw_desc = xor_dev->hw_desq_virt + pending_ptr; + + /* loop over free descriptors */ + for (i = 0; i < num_of_pending; i++) { + + if (pending_ptr > MV_XOR_V2_DESC_NUM) + pending_ptr = 0; + + if (next_pending_sw_desc != NULL) + next_pending_hw_desc++; + + /* get the SW descriptor related to the HW descriptor */ + next_pending_sw_desc = + &xor_dev->sw_desq[next_pending_hw_desc->desc_id]; + + /* call the callback */ + if (next_pending_sw_desc->async_tx.cookie > 0) { + /* + * update the channel's completed cookie - no + * lock is required the IMSG threshold provide + * the locking + */ + dma_cookie_complete(&next_pending_sw_desc->async_tx); + + if (next_pending_sw_desc->async_tx.callback) + next_pending_sw_desc->async_tx.callback( + next_pending_sw_desc->async_tx.callback_param); + + dma_descriptor_unmap(&next_pending_sw_desc->async_tx); + } + + dma_run_dependencies(&next_pending_sw_desc->async_tx); + + /* Lock the channel */ + spin_lock_bh(&xor_dev->lock); + + /* add the SW descriptor to the free descriptors list */ + list_add(&next_pending_sw_desc->free_list, + &xor_dev->free_sw_desc); + + /* Release the channel */ + spin_unlock_bh(&xor_dev->lock); + + /* increment the next descriptor */ + pending_ptr++; + } + + if (num_of_pending != 0) { + /* free the descriptores */ + mv_xor_v2_free_desc_from_desq(xor_dev, num_of_pending); + } + + /* Update IMSG threshold, to enable new IMSG interrupts */ + mv_xor_v2_set_imsg_thrd(xor_dev, 0); +} + +/* + * Set DMA Interrupt-message (IMSG) parameters + */ +static void mv_xor_v2_set_msi_msg(struct msi_desc *desc, struct msi_msg *msg) +{ + struct mv_xor_v2_device *xor_dev = dev_get_drvdata(desc->dev); + + writel(msg->address_lo, + xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_BALR_OFF); + writel(msg->address_hi & 0xFFFF, + xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_BAHR_OFF); + writel(msg->data, + xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_CDAT_OFF); +} + +static int mv_xor_v2_descq_init(struct mv_xor_v2_device *xor_dev) +{ + u32 reg; + + /* write the DESQ size to the DMA engine */ + writel(MV_XOR_V2_DESC_NUM, + xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_SIZE_OFF); + + /* write the DESQ address to the DMA enngine*/ + writel(xor_dev->hw_desq & 0xFFFFFFFF, + xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BALR_OFF); + writel((xor_dev->hw_desq & 0xFFFF00000000) >> 32, + xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BAHR_OFF); + + /* enable the DMA engine */ + writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF); + + /* + * This is a temporary solution, until we activate the + * SMMU. Set the attributes for reading & writing data buffers + * & descriptors to: + * + * - OuterShareable - Snoops will be performed on CPU caches + * - Enable cacheable - Bufferable, Modifiable, Other Allocate + * and Allocate + */ + reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ARATTR_OFF); + reg &= ~MV_XOR_V2_DMA_DESQ_ATTR_CACHE_MASK; + reg |= MV_XOR_V2_DMA_DESQ_ATTR_OUTER_SHAREABLE | + MV_XOR_V2_DMA_DESQ_ATTR_CACHEABLE; + writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ARATTR_OFF); + + reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_AWATTR_OFF); + reg &= ~MV_XOR_V2_DMA_DESQ_ATTR_CACHE_MASK; + reg |= MV_XOR_V2_DMA_DESQ_ATTR_OUTER_SHAREABLE | + MV_XOR_V2_DMA_DESQ_ATTR_CACHEABLE; + writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_AWATTR_OFF); + + /* BW CTRL - set values to optimize the XOR performance: + * + * - Set WrBurstLen & RdBurstLen - the unit will issue + * maximum of 256B write/read transactions. + * - Limit the number of outstanding write & read data + * (OBB/IBB) requests to the maximal value. + */ + reg = ((MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_VAL << + MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_SHIFT) | + (MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_VAL << + MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_SHIFT) | + (MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_VAL << + MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_SHIFT) | + (MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_VAL << + MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_SHIFT)); + writel(reg, xor_dev->glob_base + MV_XOR_V2_GLOB_BW_CTRL); + + /* Disable the AXI timer feature */ + reg = readl(xor_dev->glob_base + MV_XOR_V2_GLOB_PAUSE); + reg |= MV_XOR_V2_GLOB_PAUSE_AXI_TIME_DIS_VAL; + writel(reg, xor_dev->glob_base + MV_XOR_V2_GLOB_PAUSE); + + return 0; +} + +static int mv_xor_v2_probe(struct platform_device *pdev) +{ + struct mv_xor_v2_device *xor_dev; + struct resource *res; + int i, ret = 0; + struct dma_device *dma_dev; + struct mv_xor_v2_sw_desc *sw_desc; + struct msi_desc *msi_desc; + + BUILD_BUG_ON(sizeof(struct mv_xor_v2_descriptor) != + MV_XOR_V2_EXT_DESC_SIZE); + + xor_dev = devm_kzalloc(&pdev->dev, sizeof(*xor_dev), GFP_KERNEL); + if (!xor_dev) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + xor_dev->dma_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(xor_dev->dma_base)) + return PTR_ERR(xor_dev->dma_base); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + xor_dev->glob_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(xor_dev->glob_base)) + return PTR_ERR(xor_dev->glob_base); + + platform_set_drvdata(pdev, xor_dev); + + xor_dev->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) + return -EPROBE_DEFER; + if (!IS_ERR(xor_dev->clk)) { + ret = clk_prepare_enable(xor_dev->clk); + if (ret) + return ret; + } + + ret = platform_msi_domain_alloc_irqs(&pdev->dev, 1, + mv_xor_v2_set_msi_msg); + if (ret) + goto disable_clk; + + msi_desc = first_msi_entry(&pdev->dev); + if (!msi_desc) + goto free_msi_irqs; + + ret = devm_request_irq(&pdev->dev, msi_desc->irq, + mv_xor_v2_interrupt_handler, 0, + dev_name(&pdev->dev), xor_dev); + if (ret) + goto free_msi_irqs; + + tasklet_init(&xor_dev->irq_tasklet, mv_xor_v2_tasklet, + (unsigned long) xor_dev); + + xor_dev->desc_size = mv_xor_v2_set_desc_size(xor_dev); + + dma_cookie_init(&xor_dev->dmachan); + + /* + * allocate coherent memory for hardware descriptors + * note: writecombine gives slightly better performance, but + * requires that we explicitly flush the writes + */ + xor_dev->hw_desq_virt = + dma_alloc_coherent(&pdev->dev, + xor_dev->desc_size * MV_XOR_V2_DESC_NUM, + &xor_dev->hw_desq, GFP_KERNEL); + if (!xor_dev->hw_desq_virt) { + ret = -ENOMEM; + goto free_msi_irqs; + } + + /* alloc memory for the SW descriptors */ + xor_dev->sw_desq = devm_kzalloc(&pdev->dev, sizeof(*sw_desc) * + MV_XOR_V2_DESC_NUM, GFP_KERNEL); + if (!xor_dev->sw_desq) { + ret = -ENOMEM; + goto free_hw_desq; + } + + spin_lock_init(&xor_dev->lock); + + /* init the free SW descriptors list */ + INIT_LIST_HEAD(&xor_dev->free_sw_desc); + + /* add all SW descriptors to the free list */ + for (i = 0; i < MV_XOR_V2_DESC_NUM; i++) { + xor_dev->sw_desq[i].idx = i; + list_add(&xor_dev->sw_desq[i].free_list, + &xor_dev->free_sw_desc); + } + + dma_dev = &xor_dev->dmadev; + + /* set DMA capabilities */ + dma_cap_zero(dma_dev->cap_mask); + dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); + dma_cap_set(DMA_XOR, dma_dev->cap_mask); + dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask); + + /* init dma link list */ + INIT_LIST_HEAD(&dma_dev->channels); + + /* set base routines */ + dma_dev->device_tx_status = dma_cookie_status; + dma_dev->device_issue_pending = mv_xor_v2_issue_pending; + dma_dev->dev = &pdev->dev; + + dma_dev->device_prep_dma_memcpy = mv_xor_v2_prep_dma_memcpy; + dma_dev->device_prep_dma_interrupt = mv_xor_v2_prep_dma_interrupt; + dma_dev->max_xor = 8; + dma_dev->device_prep_dma_xor = mv_xor_v2_prep_dma_xor; + + xor_dev->dmachan.device = dma_dev; + + list_add_tail(&xor_dev->dmachan.device_node, + &dma_dev->channels); + + mv_xor_v2_descq_init(xor_dev); + + ret = dma_async_device_register(dma_dev); + if (ret) + goto free_hw_desq; + + dev_notice(&pdev->dev, "Marvell Version 2 XOR driver\n"); + + return 0; + +free_hw_desq: + dma_free_coherent(&pdev->dev, + xor_dev->desc_size * MV_XOR_V2_DESC_NUM, + xor_dev->hw_desq_virt, xor_dev->hw_desq); +free_msi_irqs: + platform_msi_domain_free_irqs(&pdev->dev); +disable_clk: + if (!IS_ERR(xor_dev->clk)) + clk_disable_unprepare(xor_dev->clk); + return ret; +} + +static int mv_xor_v2_remove(struct platform_device *pdev) +{ + struct mv_xor_v2_device *xor_dev = platform_get_drvdata(pdev); + + dma_async_device_unregister(&xor_dev->dmadev); + + dma_free_coherent(&pdev->dev, + xor_dev->desc_size * MV_XOR_V2_DESC_NUM, + xor_dev->hw_desq_virt, xor_dev->hw_desq); + + platform_msi_domain_free_irqs(&pdev->dev); + + clk_disable_unprepare(xor_dev->clk); + + return 0; +} + +#ifdef CONFIG_OF +static const struct of_device_id mv_xor_v2_dt_ids[] = { + { .compatible = "marvell,xor-v2", }, + {}, +}; +MODULE_DEVICE_TABLE(of, mv_xor_v2_dt_ids); +#endif + +static struct platform_driver mv_xor_v2_driver = { + .probe = mv_xor_v2_probe, + .remove = mv_xor_v2_remove, + .driver = { + .name = "mv_xor_v2", + .of_match_table = of_match_ptr(mv_xor_v2_dt_ids), + }, +}; + +module_platform_driver(mv_xor_v2_driver); + +MODULE_DESCRIPTION("DMA engine driver for Marvell's Version 2 of XOR engine"); +MODULE_LICENSE("GPL"); + -- cgit v0.10.2 From f1532019dd5b2a9c5b659896968626e6e8fb232e Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Tue, 12 Jul 2016 10:09:15 +0530 Subject: dmaengine: mv_xor_v2: remove trailing whitespace Signed-off-by: Vinod Koul diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c index 8320155..a28a01f 100644 --- a/drivers/dma/mv_xor_v2.c +++ b/drivers/dma/mv_xor_v2.c @@ -876,4 +876,3 @@ module_platform_driver(mv_xor_v2_driver); MODULE_DESCRIPTION("DMA engine driver for Marvell's Version 2 of XOR engine"); MODULE_LICENSE("GPL"); - -- cgit v0.10.2 From 6a2cf55db33d13474b7c339d3e4cef8993508a40 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Wed, 6 Jul 2016 12:19:09 +0000 Subject: dmaengine: qcom_hidma: fix return value check in hidma_mgmt_of_populate_channels() In case of error, the function platform_device_register_full() returns ERR_PTR() and never returns NULL. The NULL test in the return value check should be replaced with IS_ERR(). Signed-off-by: Wei Yongjun Signed-off-by: Vinod Koul diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c index c0e3653..6416ded 100644 --- a/drivers/dma/qcom/hidma_mgmt.c +++ b/drivers/dma/qcom/hidma_mgmt.c @@ -371,8 +371,8 @@ static int __init hidma_mgmt_of_populate_channels(struct device_node *np) pdevinfo.size_data = 0; pdevinfo.dma_mask = DMA_BIT_MASK(64); new_pdev = platform_device_register_full(&pdevinfo); - if (!new_pdev) { - ret = -ENODEV; + if (IS_ERR(new_pdev)) { + ret = PTR_ERR(new_pdev); goto out; } of_dma_configure(&new_pdev->dev, child); -- cgit v0.10.2 From 1d069bfa3c78c6d0285e5e370710cf7062c71308 Mon Sep 17 00:00:00 2001 From: Michael Olbrich Date: Thu, 7 Jul 2016 11:35:51 +0200 Subject: dmaengine: imx-sdma: ack channel 0 IRQ in the interrupt handler Currently the handler ignores the channel 0 interrupt and thus doesn't ack it properly. This is done in order to allow sdma_run_channel0() to poll on the irq status bit, as this function may be called in atomic context, but needs to know when the channel has finished. This works mostly, as the polling happens under a spinlock, disabling IRQs on the local CPU, leaving only a very slight race window for a spurious IRQ to happen if the handler is executed on another CPU in an SMP system. Still this is clearly suboptimal. This behavior turns into a real problem on an RT system, where the spinlock doesn't disable IRQs on the local CPU. Not acking the IRQ in the handler in such a setup is very likely to drown the CPU in an IRQ storm, leaving it unable to make any progress in the polling loop, leading to the IRQ never being acked. Fix this by properly acknowledging the channel 0 IRQ in the handler. As the IRQ status bit can no longer be used to poll for the channel completion, switch over to using the SDMA_H_STATSTOP register for this purpose, where bit 0 is cleared by the hardware when the channel is done. Signed-off-by: Michael Olbrich Signed-off-by: Lucas Stach Signed-off-by: Vinod Koul diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 0f6fd42..ce865f6 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -18,6 +18,7 @@ */ #include +#include #include #include #include @@ -571,28 +572,20 @@ static void sdma_enable_channel(struct sdma_engine *sdma, int channel) static int sdma_run_channel0(struct sdma_engine *sdma) { int ret; - unsigned long timeout = 500; + u32 reg; sdma_enable_channel(sdma, 0); - while (!(ret = readl_relaxed(sdma->regs + SDMA_H_INTR) & 1)) { - if (timeout-- <= 0) - break; - udelay(1); - } - - if (ret) { - /* Clear the interrupt status */ - writel_relaxed(ret, sdma->regs + SDMA_H_INTR); - } else { + ret = readl_relaxed_poll_timeout_atomic(sdma->regs + SDMA_H_STATSTOP, + reg, !(reg & 1), 1, 500); + if (ret) dev_err(sdma->dev, "Timeout waiting for CH0 ready\n"); - } /* Set bits of CONFIG register with dynamic context switching */ if (readl(sdma->regs + SDMA_H_CONFIG) == 0) writel_relaxed(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG); - return ret ? 0 : -ETIMEDOUT; + return ret; } static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size, @@ -727,9 +720,9 @@ static irqreturn_t sdma_int_handler(int irq, void *dev_id) unsigned long stat; stat = readl_relaxed(sdma->regs + SDMA_H_INTR); - /* not interested in channel 0 interrupts */ - stat &= ~1; writel_relaxed(stat, sdma->regs + SDMA_H_INTR); + /* channel 0 is special and not handled here, see run_channel0() */ + stat &= ~1; while (stat) { int channel = fls(stat) - 1; -- cgit v0.10.2 From a0d4cb44da6ca0e87f068dd6644370e6eaca422c Mon Sep 17 00:00:00 2001 From: Kedareswara rao Appana Date: Thu, 9 Jun 2016 21:10:14 +0530 Subject: dmaengine: dmatest: Add support for scatter-gather DMA mode This patch updates the dmatest client to Support scatter-gather dma mode. Signed-off-by: Kedareswara rao Appana Signed-off-by: Vinod Koul diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index b8576fd..1245db5 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -51,6 +51,16 @@ module_param(iterations, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(iterations, "Iterations before stopping test (default: infinite)"); +static unsigned int sg_buffers = 1; +module_param(sg_buffers, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(sg_buffers, + "Number of scatter gather buffers (default: 1)"); + +static unsigned int dmatest = 1; +module_param(dmatest, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(dmatest, + "dmatest 0-memcpy 1-slave_sg (default: 1)"); + static unsigned int xor_sources = 3; module_param(xor_sources, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(xor_sources, @@ -431,6 +441,8 @@ static int dmatest_func(void *data) dev = chan->device; if (thread->type == DMA_MEMCPY) src_cnt = dst_cnt = 1; + else if (thread->type == DMA_SG) + src_cnt = dst_cnt = sg_buffers; else if (thread->type == DMA_XOR) { /* force odd to ensure dst = src */ src_cnt = min_odd(params->xor_sources | 1, dev->max_xor); @@ -485,6 +497,8 @@ static int dmatest_func(void *data) dma_addr_t *dsts; unsigned int src_off, dst_off, len; u8 align = 0; + struct scatterlist tx_sg[src_cnt]; + struct scatterlist rx_sg[src_cnt]; total_tests++; @@ -577,10 +591,22 @@ static int dmatest_func(void *data) um->bidi_cnt++; } + sg_init_table(tx_sg, src_cnt); + sg_init_table(rx_sg, src_cnt); + for (i = 0; i < src_cnt; i++) { + sg_dma_address(&rx_sg[i]) = srcs[i]; + sg_dma_address(&tx_sg[i]) = dsts[i] + dst_off; + sg_dma_len(&tx_sg[i]) = len; + sg_dma_len(&rx_sg[i]) = len; + } + if (thread->type == DMA_MEMCPY) tx = dev->device_prep_dma_memcpy(chan, dsts[0] + dst_off, srcs[0], len, flags); + else if (thread->type == DMA_SG) + tx = dev->device_prep_dma_sg(chan, tx_sg, src_cnt, + rx_sg, src_cnt, flags); else if (thread->type == DMA_XOR) tx = dev->device_prep_dma_xor(chan, dsts[0] + dst_off, @@ -748,6 +774,8 @@ static int dmatest_add_threads(struct dmatest_info *info, if (type == DMA_MEMCPY) op = "copy"; + else if (type == DMA_SG) + op = "sg"; else if (type == DMA_XOR) op = "xor"; else if (type == DMA_PQ) @@ -802,9 +830,19 @@ static int dmatest_add_channel(struct dmatest_info *info, INIT_LIST_HEAD(&dtc->threads); if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { - cnt = dmatest_add_threads(info, dtc, DMA_MEMCPY); - thread_count += cnt > 0 ? cnt : 0; + if (dmatest == 0) { + cnt = dmatest_add_threads(info, dtc, DMA_MEMCPY); + thread_count += cnt > 0 ? cnt : 0; + } } + + if (dma_has_cap(DMA_SG, dma_dev->cap_mask)) { + if (dmatest == 1) { + cnt = dmatest_add_threads(info, dtc, DMA_SG); + thread_count += cnt > 0 ? cnt : 0; + } + } + if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { cnt = dmatest_add_threads(info, dtc, DMA_XOR); thread_count += cnt > 0 ? cnt : 0; @@ -877,6 +915,7 @@ static void run_threaded_test(struct dmatest_info *info) request_channels(info, DMA_MEMCPY); request_channels(info, DMA_XOR); + request_channels(info, DMA_SG); request_channels(info, DMA_PQ); } -- cgit v0.10.2 From 7bb45f669610e0fae4b3dfe66056cf85a57014c6 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Fri, 1 Jul 2016 10:54:56 +0530 Subject: dmaengine: coh901318: explicitly freeup irq dmaengine device should explicitly call devm_free_irq() when using devm_request_irq(). The irq is still ON when devices remove is executed and irq should be quiesced before remove is completed. Signed-off-by: Vinod Koul Acked-by: Linus Walleij diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index c100616..ba044d4 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c @@ -1280,6 +1280,7 @@ struct coh901318_desc { struct coh901318_base { struct device *dev; void __iomem *virtbase; + unsigned int irq; struct coh901318_pool pool; struct powersave pm; struct dma_device dma_slave; @@ -2680,6 +2681,8 @@ static int __init coh901318_probe(struct platform_device *pdev) if (err) return err; + base->irq = irq; + err = coh901318_pool_create(&base->pool, &pdev->dev, sizeof(struct coh901318_lli), 32); @@ -2760,6 +2763,8 @@ static int coh901318_remove(struct platform_device *pdev) { struct coh901318_base *base = platform_get_drvdata(pdev); + devm_free_irq(&pdev->dev, base->irq, base); + of_dma_controller_free(pdev->dev.of_node); dma_async_device_unregister(&base->dma_memcpy); dma_async_device_unregister(&base->dma_slave); -- cgit v0.10.2 From 85abae1760b5ec66d7b77d3d690ff65a84a8d592 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Fri, 1 Jul 2016 11:16:47 +0530 Subject: dmaengine: coh901318: kill the tasklets upon exit drivers should ensure that tasklets are killed, so that they can't be run after driver remove is executed. Signed-off-by: Vinod Koul Acked-by: Linus Walleij diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index ba044d4..9b86c3e 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c @@ -2758,6 +2758,21 @@ static int __init coh901318_probe(struct platform_device *pdev) coh901318_pool_destroy(&base->pool); return err; } +static void coh901318_base_remove(struct coh901318_base *base, const int *pick_chans) +{ + int chans_i; + int i = 0; + struct coh901318_chan *cohc; + + for (chans_i = 0; pick_chans[chans_i] != -1; chans_i += 2) { + for (i = pick_chans[chans_i]; i <= pick_chans[chans_i+1]; i++) { + cohc = &base->chans[i]; + + tasklet_kill(&cohc->tasklet); + } + } + +} static int coh901318_remove(struct platform_device *pdev) { @@ -2765,6 +2780,9 @@ static int coh901318_remove(struct platform_device *pdev) devm_free_irq(&pdev->dev, base->irq, base); + coh901318_base_remove(base, dma_slave_channels); + coh901318_base_remove(base, dma_memcpy_channels); + of_dma_controller_free(pdev->dev.of_node); dma_async_device_unregister(&base->dma_memcpy); dma_async_device_unregister(&base->dma_slave); -- cgit v0.10.2 From f57b7cb46c07b2440e8b3d917726be52e7d78e24 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Fri, 1 Jul 2016 11:17:00 +0530 Subject: dmaengine: coh901318: statify symbols Sparse complains: drivers/dma/coh901318.c:269:30: warning: symbol 'chan_config' was not declared. Should it be static? drivers/dma/coh901318.c:2806:12: warning: symbol 'coh901318_init' was not declared. Should it be static? drivers/dma/coh901318.c:2812:13: warning: symbol 'coh901318_exit' was not declared. Should it be static? Signed-off-by: Vinod Koul Acked-by: Linus Walleij diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index 9b86c3e..e17fc79 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c @@ -266,7 +266,7 @@ static int dma_memcpy_channels[] = { COH901318_CX_CTRL_DDMA_LEGACY | \ COH901318_CX_CTRL_PRDD_SOURCE) -const struct coh_dma_channel chan_config[U300_DMA_CHANNELS] = { +static const struct coh_dma_channel chan_config[U300_DMA_CHANNELS] = { { .number = U300_DMA_MSL_TX_0, .name = "MSL TX 0", @@ -2803,13 +2803,13 @@ static struct platform_driver coh901318_driver = { }, }; -int __init coh901318_init(void) +static int __init coh901318_init(void) { return platform_driver_probe(&coh901318_driver, coh901318_probe); } subsys_initcall(coh901318_init); -void __exit coh901318_exit(void) +static void __exit coh901318_exit(void) { platform_driver_unregister(&coh901318_driver); } -- cgit v0.10.2 From 638001e0e01470e44da01a6e4dc4ab3e0f49d4ad Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Fri, 1 Jul 2016 11:34:35 +0530 Subject: dmaengine: edma: explicitly freeup irq dmaengine device should explicitly call devm_free_irq() when using devm_request_irq(). The irq is still ON when devices remove is executed and irq should be quiesced before remove is completed. Signed-off-by: Vinod Koul Acked-by: Peter Ujfalusi diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index b95ef74..dffbd4bb 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c @@ -239,6 +239,9 @@ struct edma_cc { bool chmap_exist; enum dma_event_q default_queue; + unsigned int ccint; + unsigned int ccerrint; + /* * The slot_inuse bit for each PaRAM slot is clear unless the slot is * in use by Linux or if it is allocated to be used by DSP. @@ -2283,6 +2286,7 @@ static int edma_probe(struct platform_device *pdev) dev_err(dev, "CCINT (%d) failed --> %d\n", irq, ret); return ret; } + ecc->ccint = irq; } irq = platform_get_irq_byname(pdev, "edma3_ccerrint"); @@ -2298,6 +2302,7 @@ static int edma_probe(struct platform_device *pdev) dev_err(dev, "CCERRINT (%d) failed --> %d\n", irq, ret); return ret; } + ecc->ccerrint = irq; } ecc->dummy_slot = edma_alloc_slot(ecc, EDMA_SLOT_ANY); @@ -2393,6 +2398,9 @@ static int edma_remove(struct platform_device *pdev) struct device *dev = &pdev->dev; struct edma_cc *ecc = dev_get_drvdata(dev); + devm_free_irq(dev, ecc->ccint, ecc); + devm_free_irq(dev, ecc->ccerrint, ecc); + if (dev->of_node) of_dma_controller_free(dev->of_node); dma_async_device_unregister(&ecc->dma_slave); -- cgit v0.10.2 From f4e0628ba37aa4bf2989f912c1f63e3ad1a46704 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Fri, 1 Jul 2016 13:51:41 +0530 Subject: dmaengine: edma: kill the tasklets upon exit drivers should ensure that tasklets are killed, so that they can't be executed after driver remove is executed, so ensure they are killed. This driver used vchan tasklets, so those need to be killed. Signed-off-by: Vinod Koul Acked-by: Peter Ujfalusi diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index dffbd4bb..3d277fa 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c @@ -2393,6 +2393,17 @@ err_reg1: return ret; } +static void edma_cleanupp_vchan(struct dma_device *dmadev) +{ + struct edma_chan *echan, *_echan; + + list_for_each_entry_safe(echan, _echan, + &dmadev->channels, vchan.chan.device_node) { + list_del(&echan->vchan.chan.device_node); + tasklet_kill(&echan->vchan.task); + } +} + static int edma_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -2401,6 +2412,8 @@ static int edma_remove(struct platform_device *pdev) devm_free_irq(dev, ecc->ccint, ecc); devm_free_irq(dev, ecc->ccerrint, ecc); + edma_cleanupp_vchan(&ecc->dma_slave); + if (dev->of_node) of_dma_controller_free(dev->of_node); dma_async_device_unregister(&ecc->dma_slave); -- cgit v0.10.2 From 476c7c809ebeefb83e017be543922dede1c10584 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Fri, 1 Jul 2016 17:34:14 +0530 Subject: dmaengine: fsl-edma: explicitly freeup irq dmaengine device should explicitly call devm_free_irq() when using devm_request_irq(). The irq is still ON when devices remove is executed and irq should be quiesced before remove is completed. Signed-off-by: Vinod Koul Cc: Jingchang Lu Cc: Peter Griffin diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c index cc06eea..d797a3c 100644 --- a/drivers/dma/fsl-edma.c +++ b/drivers/dma/fsl-edma.c @@ -852,6 +852,17 @@ fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma return 0; } +static void fsl_edma_irq_exit( + struct platform_device *pdev, struct fsl_edma_engine *fsl_edma) +{ + if (fsl_edma->txirq == fsl_edma->errirq) { + devm_free_irq(&pdev->dev, fsl_edma->txirq, fsl_edma); + } else { + devm_free_irq(&pdev->dev, fsl_edma->txirq, fsl_edma); + devm_free_irq(&pdev->dev, fsl_edma->errirq, fsl_edma); + } +} + static void fsl_disable_clocks(struct fsl_edma_engine *fsl_edma) { int i; @@ -989,6 +1000,7 @@ static int fsl_edma_remove(struct platform_device *pdev) struct device_node *np = pdev->dev.of_node; struct fsl_edma_engine *fsl_edma = platform_get_drvdata(pdev); + fsl_edma_irq_exit(pdev, fsl_edma); of_dma_controller_free(np); dma_async_device_unregister(&fsl_edma->dma_dev); fsl_disable_clocks(fsl_edma); -- cgit v0.10.2 From cb28c7ab786b3e77f8435a41929b3ee9bcc51eb1 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Fri, 1 Jul 2016 22:48:49 +0530 Subject: dmaengine: fsl_raid: kill the tasklets upon exit drivers should ensure that tasklets are killed, so that they can't be run after driver remove is executed Signed-off-by: Vinod Koul Cc: Xuelin Shi diff --git a/drivers/dma/fsl_raid.c b/drivers/dma/fsl_raid.c index 4d9470f..1b71d1f 100644 --- a/drivers/dma/fsl_raid.c +++ b/drivers/dma/fsl_raid.c @@ -856,6 +856,8 @@ static int fsl_re_probe(struct platform_device *ofdev) static void fsl_re_remove_chan(struct fsl_re_chan *chan) { + tasklet_kill(&chan->irqtask); + dma_pool_free(chan->re_dev->hw_desc_pool, chan->inb_ring_virt_addr, chan->inb_phys_addr); -- cgit v0.10.2 From f950f025364ad3ad4834cd1058737f272b2cc665 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Fri, 1 Jul 2016 22:54:40 +0530 Subject: dmaengine: fsl_raid: fix size_t print specifiers size_t should be printed with %zu, not %lu as driver did, so fix these warning by doing this change drivers/dma/fsl_raid.c: In function 'fsl_re_prep_dma_genq': drivers/dma/fsl_raid.c:341:4: warning: format '%lu' expects argument of type 'long unsigned int', but argument 3 has type 'size_t' [-Wformat=] len, FSL_RE_MAX_DATA_LEN); ^ drivers/dma/fsl_raid.c: In function 'fsl_re_prep_dma_pq': drivers/dma/fsl_raid.c:428:4: warning: format '%lu' expects argument of type 'long unsigned int', but argument 3 has type 'size_t' [-Wformat=] len, FSL_RE_MAX_DATA_LEN); ^ drivers/dma/fsl_raid.c: In function 'fsl_re_prep_dma_memcpy': drivers/dma/fsl_raid.c:549:4: warning: format '%lu' expects argument of type 'long unsigned int', but argument 3 has type 'size_t' [-Wformat=] len, FSL_RE_MAX_DATA_LEN); ^ Signed-off-by: Vinod Koul diff --git a/drivers/dma/fsl_raid.c b/drivers/dma/fsl_raid.c index 1b71d1f..ec9c73b 100644 --- a/drivers/dma/fsl_raid.c +++ b/drivers/dma/fsl_raid.c @@ -337,7 +337,7 @@ static struct dma_async_tx_descriptor *fsl_re_prep_dma_genq( re_chan = container_of(chan, struct fsl_re_chan, chan); if (len > FSL_RE_MAX_DATA_LEN) { - dev_err(re_chan->dev, "genq tx length %lu, max length %d\n", + dev_err(re_chan->dev, "genq tx length %zu, max length %d\n", len, FSL_RE_MAX_DATA_LEN); return NULL; } @@ -424,7 +424,7 @@ static struct dma_async_tx_descriptor *fsl_re_prep_dma_pq( re_chan = container_of(chan, struct fsl_re_chan, chan); if (len > FSL_RE_MAX_DATA_LEN) { - dev_err(re_chan->dev, "pq tx length is %lu, max length is %d\n", + dev_err(re_chan->dev, "pq tx length is %zu, max length is %d\n", len, FSL_RE_MAX_DATA_LEN); return NULL; } @@ -545,7 +545,7 @@ static struct dma_async_tx_descriptor *fsl_re_prep_dma_memcpy( re_chan = container_of(chan, struct fsl_re_chan, chan); if (len > FSL_RE_MAX_DATA_LEN) { - dev_err(re_chan->dev, "cp tx length is %lu, max length is %d\n", + dev_err(re_chan->dev, "cp tx length is %zu, max length is %d\n", len, FSL_RE_MAX_DATA_LEN); return NULL; } -- cgit v0.10.2 From cec9cfa8d88c8b2ad0789e8441ff98c3f52b8142 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Sat, 2 Jul 2016 14:48:02 +0530 Subject: dmaengine: jz4740: kill the tasklets upon exit drivers should ensure that tasklets are killed, so that they can't be executed after driver remove is executed, so ensure they are killed. This driver used vchan tasklets, so those need to be killed. Signed-off-by: Vinod Koul Acked-by: Lars-Peter Clausen diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c index 7638b24..9689b36 100644 --- a/drivers/dma/dma-jz4740.c +++ b/drivers/dma/dma-jz4740.c @@ -573,12 +573,26 @@ err_unregister: return ret; } +static void jz4740_cleanup_vchan(struct dma_device *dmadev) +{ + struct jz4740_dmaengine_chan *chan, *_chan; + + list_for_each_entry_safe(chan, _chan, + &dmadev->channels, vchan.chan.device_node) { + list_del(&chan->vchan.chan.device_node); + tasklet_kill(&chan->vchan.task); + } +} + + static int jz4740_dma_remove(struct platform_device *pdev) { struct jz4740_dma_dev *dmadev = platform_get_drvdata(pdev); int irq = platform_get_irq(pdev, 0); free_irq(irq, dmadev); + + jz4740_cleanup_vchan(&dmadev->ddev); dma_async_device_unregister(&dmadev->ddev); clk_disable_unprepare(dmadev->clk); -- cgit v0.10.2 From 6f93b93b2a1bd53f1dad9f3deb4e75874db0256a Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Sat, 2 Jul 2016 14:58:30 +0530 Subject: dmaengine: fsl-edma: kill the tasklets upon exit drivers should ensure that tasklets are killed, so that they can't be executed after driver remove is executed, so ensure they are killed. This driver used vchan tasklets, so those need to be killed. Signed-off-by: Vinod Koul Cc: Jingchang Lu Cc: Peter Griffin diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c index d797a3c..6775f2c 100644 --- a/drivers/dma/fsl-edma.c +++ b/drivers/dma/fsl-edma.c @@ -995,12 +995,24 @@ static int fsl_edma_probe(struct platform_device *pdev) return 0; } +static void fsl_edma_cleanup_vchan(struct dma_device *dmadev) +{ + struct fsl_edma_chan *chan, *_chan; + + list_for_each_entry_safe(chan, _chan, + &dmadev->channels, vchan.chan.device_node) { + list_del(&chan->vchan.chan.device_node); + tasklet_kill(&chan->vchan.task); + } +} + static int fsl_edma_remove(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct fsl_edma_engine *fsl_edma = platform_get_drvdata(pdev); fsl_edma_irq_exit(pdev, fsl_edma); + fsl_edma_cleanup_vchan(&fsl_edma->dma_dev); of_dma_controller_free(np); dma_async_device_unregister(&fsl_edma->dma_dev); fsl_disable_clocks(fsl_edma); -- cgit v0.10.2 From ea62aa80bbcc881d0805cbee9afd74e3d029e48e Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Sat, 2 Jul 2016 15:25:01 +0530 Subject: dmaengine: imx-dma: explicitly freeup irq dmaengine device should explicitly call devm_free_irq() when using devm_request_irq(). The irq is still ON when devices remove is executed and irq should be quiesced before remove is completed. Signed-off-by: Vinod Koul diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index 48d85f8..9301d3d 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -167,6 +167,7 @@ struct imxdma_channel { u32 ccr_to_device; bool enabled_2d; int slot_2d; + unsigned int irq; }; enum imx_dma_type { @@ -186,6 +187,9 @@ struct imxdma_engine { struct imx_dma_2d_config slots_2d[IMX_DMA_2D_SLOTS]; struct imxdma_channel channel[IMX_DMA_CHANNELS]; enum imx_dma_type devtype; + unsigned int irq; + unsigned int irq_err; + }; struct imxdma_filter_data { @@ -1100,6 +1104,7 @@ static int __init imxdma_probe(struct platform_device *pdev) dev_warn(imxdma->dev, "Can't register IRQ for DMA\n"); goto disable_dma_ahb_clk; } + imxdma->irq = irq; irq_err = platform_get_irq(pdev, 1); if (irq_err < 0) { @@ -1113,6 +1118,7 @@ static int __init imxdma_probe(struct platform_device *pdev) dev_warn(imxdma->dev, "Can't register ERRIRQ for DMA\n"); goto disable_dma_ahb_clk; } + imxdma->irq_err = irq_err; } /* enable DMA module */ @@ -1150,6 +1156,8 @@ static int __init imxdma_probe(struct platform_device *pdev) irq + i, i); goto disable_dma_ahb_clk; } + + imxdmac->irq = irq + i; init_timer(&imxdmac->watchdog); imxdmac->watchdog.function = &imxdma_watchdog; imxdmac->watchdog.data = (unsigned long)imxdmac; @@ -1217,10 +1225,31 @@ disable_dma_ipg_clk: return ret; } +static void imxdma_free_irq(struct platform_device *pdev, struct imxdma_engine *imxdma) +{ + int i; + + if (is_imx1_dma(imxdma)) { + disable_irq(imxdma->irq); + disable_irq(imxdma->irq_err); + } + + for (i = 0; i < IMX_DMA_CHANNELS; i++) { + struct imxdma_channel *imxdmac = &imxdma->channel[i]; + + if (!is_imx1_dma(imxdma)) + disable_irq(imxdmac->irq); + + tasklet_kill(&imxdmac->dma_tasklet); + } +} + static int imxdma_remove(struct platform_device *pdev) { struct imxdma_engine *imxdma = platform_get_drvdata(pdev); + imxdma_free_irq(pdev, imxdma); + dma_async_device_unregister(&imxdma->dma_device); if (pdev->dev.of_node) -- cgit v0.10.2 From 71c6b663492c5da78c94d3405c0e8044b8290d00 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Sat, 2 Jul 2016 15:35:07 +0530 Subject: dmaengine: imx-dma: fix coding style issue imxdma_probe function starting brace is wrongly indented, so fix that Signed-off-by: Vinod Koul Cc: Sascha Hauer Cc: Linus Walleij diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index 9301d3d..a960608 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -1052,7 +1052,7 @@ static struct dma_chan *imxdma_xlate(struct of_phandle_args *dma_spec, } static int __init imxdma_probe(struct platform_device *pdev) - { +{ struct imxdma_engine *imxdma; struct resource *res; const struct of_device_id *of_id; -- cgit v0.10.2 From 5bb9dbb5ae0931fa3c6780a45f651755266f9b6d Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Sun, 3 Jul 2016 00:00:55 +0530 Subject: dmaengine: imx-sdma: explicitly freeup irq dmaengine device should explicitly call devm_free_irq() when using devm_request_irq(). The irq is still ON when devices remove is executed and irq should be quiesced before remove is completed. Signed-off-by: Vinod Koul Cc: Sascha Hauer Cc: Fabio Estevam diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index ce865f6..18bcf55 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -386,6 +386,7 @@ struct sdma_engine { const struct sdma_driver_data *drvdata; u32 spba_start_addr; u32 spba_end_addr; + unsigned int irq; }; static struct sdma_driver_data sdma_imx31 = { @@ -1708,6 +1709,8 @@ static int sdma_probe(struct platform_device *pdev) if (ret) return ret; + sdma->irq = irq; + sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL); if (!sdma->script_addrs) return -ENOMEM; @@ -1833,6 +1836,7 @@ static int sdma_remove(struct platform_device *pdev) struct sdma_engine *sdma = platform_get_drvdata(pdev); int i; + devm_free_irq(&pdev->dev, sdma->irq, sdma); dma_async_device_unregister(&sdma->dma_device); kfree(sdma->script_addrs); /* Kill the tasklet */ -- cgit v0.10.2 From 486b10a255f70735585c90d334422da65dfe43ac Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Sun, 3 Jul 2016 00:02:29 +0530 Subject: dmaengine: k3dma: explicitly freeup irq dmaengine device should explicitly call devm_free_irq() when using devm_request_irq(). The irq is still ON when devices remove is executed and irq should be quiesced before remove is completed. Signed-off-by: Vinod Koul Acked-by: Zhangfei Gao diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c index 35961af..9364dac 100644 --- a/drivers/dma/k3dma.c +++ b/drivers/dma/k3dma.c @@ -102,6 +102,7 @@ struct k3_dma_dev { struct clk *clk; u32 dma_channels; u32 dma_requests; + unsigned int irq; }; #define to_k3_dma(dmadev) container_of(dmadev, struct k3_dma_dev, slave) @@ -703,6 +704,8 @@ static int k3_dma_probe(struct platform_device *op) if (ret) return ret; + d->irq = irq; + /* init phy channel */ d->phy = devm_kzalloc(&op->dev, d->dma_channels * sizeof(struct k3_dma_phy), GFP_KERNEL); @@ -785,6 +788,8 @@ static int k3_dma_remove(struct platform_device *op) dma_async_device_unregister(&d->slave); of_dma_controller_free((&op->dev)->of_node); + devm_free_irq(&op->dev, d->irq, d); + list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) { list_del(&c->vc.chan.device_node); tasklet_kill(&c->vc.task); -- cgit v0.10.2 From a46018929b35f551e29302a89de5ef20c1cfd4f8 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Mon, 4 Jul 2016 15:15:26 +0530 Subject: dmaengine: mmp_pdma: explicitly freeup irq dmaengine device should explicitly call devm_free_irq() when using devm_request_irq(). The irq is still ON when devices remove is executed and irq should be quiesced before remove is completed. Signed-off-by: Vinod Koul Acked-by: Zhangfei Gao diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c index 56f1fd6..f4b25fb 100644 --- a/drivers/dma/mmp_pdma.c +++ b/drivers/dma/mmp_pdma.c @@ -931,6 +931,25 @@ static void dma_do_tasklet(unsigned long data) static int mmp_pdma_remove(struct platform_device *op) { struct mmp_pdma_device *pdev = platform_get_drvdata(op); + struct mmp_pdma_phy *phy; + int i, irq = 0, irq_num = 0; + + + for (i = 0; i < pdev->dma_channels; i++) { + if (platform_get_irq(op, i) > 0) + irq_num++; + } + + if (irq_num != pdev->dma_channels) { + irq = platform_get_irq(op, 0); + devm_free_irq(&op->dev, irq, pdev); + } else { + for (i = 0; i < pdev->dma_channels; i++) { + phy = &pdev->phy[i]; + irq = platform_get_irq(op, i); + devm_free_irq(&op->dev, irq, phy); + } + } dma_async_device_unregister(&pdev->device); return 0; -- cgit v0.10.2 From 0422e30458d6cd9e8dd27913b6a3a72db47eadab Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Mon, 4 Jul 2016 15:39:48 +0530 Subject: dmaengine: mmp_tdma: statify symbols Sparse complains: drivers/dma/mmp_tdma.c:407:22: warning: symbol 'mmp_tdma_alloc_descriptor' was not declared. Should it be static? drivers/dma/mmp_tdma.c:595:17: warning: symbol 'mmp_tdma_xlate' was not declared. Should it be static? Signed-off-by: Vinod Koul Cc: Qiao Zhou diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c index ba7f412..b3441f5 100644 --- a/drivers/dma/mmp_tdma.c +++ b/drivers/dma/mmp_tdma.c @@ -404,7 +404,7 @@ static void mmp_tdma_free_chan_resources(struct dma_chan *chan) return; } -struct mmp_tdma_desc *mmp_tdma_alloc_descriptor(struct mmp_tdma_chan *tdmac) +static struct mmp_tdma_desc *mmp_tdma_alloc_descriptor(struct mmp_tdma_chan *tdmac) { struct gen_pool *gpool; int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc); @@ -592,7 +592,7 @@ static bool mmp_tdma_filter_fn(struct dma_chan *chan, void *fn_param) return true; } -struct dma_chan *mmp_tdma_xlate(struct of_phandle_args *dma_spec, +static struct dma_chan *mmp_tdma_xlate(struct of_phandle_args *dma_spec, struct of_dma *ofdma) { struct mmp_tdma_device *tdev = ofdma->of_dma_data; -- cgit v0.10.2 From 144fa37f5b4f9b81dfab79c8d440b4aba8b07fde Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Mon, 4 Jul 2016 15:41:25 +0530 Subject: dmaengine: moxart-dma: explicitly freeup irq dmaengine device should explicitly call devm_free_irq() when using devm_request_irq(). The irq is still ON when devices remove is executed and irq should be quiesced before remove is completed. Signed-off-by: Vinod Koul Cc: Jonas Jensen diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c index b3a1d9a..a6e6427 100644 --- a/drivers/dma/moxart-dma.c +++ b/drivers/dma/moxart-dma.c @@ -148,6 +148,7 @@ struct moxart_chan { struct moxart_dmadev { struct dma_device dma_slave; struct moxart_chan slave_chans[APB_DMA_MAX_CHANNEL]; + unsigned int irq; }; struct moxart_filter_data { @@ -615,6 +616,7 @@ static int moxart_probe(struct platform_device *pdev) dev_err(dev, "devm_request_irq failed\n"); return ret; } + mdc->irq = irq; ret = dma_async_device_register(&mdc->dma_slave); if (ret) { @@ -638,6 +640,8 @@ static int moxart_remove(struct platform_device *pdev) { struct moxart_dmadev *m = platform_get_drvdata(pdev); + devm_free_irq(&pdev->dev, m->irq, m); + dma_async_device_unregister(&m->dma_slave); if (pdev->dev.of_node) -- cgit v0.10.2 From 84c610ba5476b6c38ef8e6bc834993bb38cf1208 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Mon, 4 Jul 2016 16:01:18 +0530 Subject: dmaengine: nbpfaxi: explicitly freeup irq dmaengine device should explicitly call devm_free_irq() when using devm_request_irq(). The irq is still ON when devices remove is executed and irq should be quiesced before remove is completed. Signed-off-by: Vinod Koul Cc: Guennadi Liakhovetski diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c index 9f0e98b..f489f4e 100644 --- a/drivers/dma/nbpfaxi.c +++ b/drivers/dma/nbpfaxi.c @@ -227,6 +227,7 @@ struct nbpf_device { void __iomem *base; struct clk *clk; const struct nbpf_config *config; + unsigned int eirq; struct nbpf_channel chan[]; }; @@ -1375,6 +1376,7 @@ static int nbpf_probe(struct platform_device *pdev) IRQF_SHARED, "dma error", nbpf); if (ret < 0) return ret; + nbpf->eirq = eirq; INIT_LIST_HEAD(&dma_dev->channels); @@ -1446,6 +1448,15 @@ e_clk_off: static int nbpf_remove(struct platform_device *pdev) { struct nbpf_device *nbpf = platform_get_drvdata(pdev); + int i; + + devm_free_irq(&pdev->dev, nbpf->eirq, nbpf); + + for (i = 0; i < nbpf->config->num_channels; i++) { + struct nbpf_channel *chan = nbpf->chan + i; + + devm_free_irq(&pdev->dev, chan->irq, chan); + } of_dma_controller_free(pdev->dev.of_node); dma_async_device_unregister(&nbpf->dma_dev); -- cgit v0.10.2 From b63abf18796f2b5cab22a3b48b4f854dbee0faaa Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Mon, 4 Jul 2016 16:06:04 +0530 Subject: dmaengine: nbpfaxi: kill the tasklets upon exit drivers should ensure that tasklets are killed, so that they can't be run after driver remove is executed Signed-off-by: Vinod Koul Cc: Guennadi Liakhovetski diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c index f489f4e..08c45c1 100644 --- a/drivers/dma/nbpfaxi.c +++ b/drivers/dma/nbpfaxi.c @@ -1456,6 +1456,8 @@ static int nbpf_remove(struct platform_device *pdev) struct nbpf_channel *chan = nbpf->chan + i; devm_free_irq(&pdev->dev, chan->irq, chan); + + tasklet_kill(&chan->tasklet); } of_dma_controller_free(pdev->dev.of_node); -- cgit v0.10.2 From 085fedf7ee17a966d3e5b5d8523a18e3017242cf Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Mon, 4 Jul 2016 16:13:09 +0530 Subject: dmaengine: mpc512x: kill the tasklets upon exit drivers should ensure that tasklets are killed, so that they can't be run after driver remove is executed Signed-off-by: Vinod Koul Cc: Mario Six diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c index ccadafa..fa86592 100644 --- a/drivers/dma/mpc512x_dma.c +++ b/drivers/dma/mpc512x_dma.c @@ -1110,6 +1110,7 @@ static int mpc_dma_remove(struct platform_device *op) } free_irq(mdma->irq, mdma); irq_dispose_mapping(mdma->irq); + tasklet_kill(&mdma->tasklet); return 0; } -- cgit v0.10.2 From 898dbbf65f1d041d5c1b29a0880286e26fac5076 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Tue, 5 Jul 2016 09:58:33 +0530 Subject: dmaengine: omap-dma: explicitly freeup irq dmaengine device should explicitly call devm_free_irq() when using devm_request_irq(). The irq is still ON when devices remove is executed and irq should be quiesced before remove is completed. Signed-off-by: Vinod Koul Acked-by: Peter Ujfalusi diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c index 1e984e1..5ff5299 100644 --- a/drivers/dma/omap-dma.c +++ b/drivers/dma/omap-dma.c @@ -1204,10 +1204,14 @@ static int omap_dma_probe(struct platform_device *pdev) static int omap_dma_remove(struct platform_device *pdev) { struct omap_dmadev *od = platform_get_drvdata(pdev); + int irq; if (pdev->dev.of_node) of_dma_controller_free(pdev->dev.of_node); + irq = platform_get_irq(pdev, 1); + devm_free_irq(&pdev->dev, irq, od); + dma_async_device_unregister(&od->ddev); if (!od->legacy) { -- cgit v0.10.2 From 46cf94d6ab38420690d890d9922bfc61a7b3e2c5 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Tue, 5 Jul 2016 10:02:16 +0530 Subject: dmaengine: pl330: explicitly freeup irq dmaengine device should explicitly call devm_free_irq() when using devm_request_irq(). The irq is still ON when devices remove is executed and irq should be quiesced before remove is completed. Signed-off-by: Vinod Koul Cc: Jassi Brar Cc: Linus Walleij diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index c8767d3..4fc3ffb 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -3002,12 +3002,18 @@ static int pl330_remove(struct amba_device *adev) { struct pl330_dmac *pl330 = amba_get_drvdata(adev); struct dma_pl330_chan *pch, *_p; + int i, irq; pm_runtime_get_noresume(pl330->ddma.dev); if (adev->dev.of_node) of_dma_controller_free(adev->dev.of_node); + for (i = 0; i < AMBA_NR_IRQS; i++) { + irq = adev->irq[i]; + devm_free_irq(&adev->dev, irq, pl330); + } + dma_async_device_unregister(&pl330->ddma); /* Idle the DMAC */ -- cgit v0.10.2 From 9200ebd8b23a32381e3647a4d326b07561e5d222 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Tue, 5 Jul 2016 11:41:23 +0530 Subject: dmaengine: s3c24xx: explicitly freeup irq dmaengine device should explicitly call devm_free_irq() when using devm_request_irq(). The irq is still ON when devices remove is executed and irq should be quiesced before remove is completed. Signed-off-by: Vinod Koul Reviewed-by: Krzysztof Kozlowski diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c index 0d2d187..163d95f 100644 --- a/drivers/dma/s3c24xx-dma.c +++ b/drivers/dma/s3c24xx-dma.c @@ -1359,6 +1359,18 @@ err_memcpy: return ret; } +static void s3c24xx_dma_free_irq(struct platform_device *pdev, + struct s3c24xx_dma_engine *s3cdma) +{ + int i; + + for (i = 0; i < s3cdma->pdata->num_phy_channels; i++) { + struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i]; + + devm_free_irq(&pdev->dev, phy->irq, phy); + } +} + static int s3c24xx_dma_remove(struct platform_device *pdev) { const struct s3c24xx_dma_platdata *pdata = dev_get_platdata(&pdev->dev); @@ -1369,6 +1381,8 @@ static int s3c24xx_dma_remove(struct platform_device *pdev) dma_async_device_unregister(&s3cdma->slave); dma_async_device_unregister(&s3cdma->memcpy); + s3c24xx_dma_free_irq(pdev, s3cdma); + s3c24xx_dma_free_virtual_channels(&s3cdma->slave); s3c24xx_dma_free_virtual_channels(&s3cdma->memcpy); -- cgit v0.10.2 From 7e654bf7c10162c3cc7fed5c520a2adef9d6e8f7 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Tue, 5 Jul 2016 11:42:28 +0530 Subject: dmaengine: s3c24xx: kill the tasklets upon exit drivers should ensure that tasklets are killed, so that they can't be executed after driver remove is executed, so ensure they are killed. This driver used vchan tasklets, so those need to be killed. Signed-off-by: Vinod Koul Reviewed-by: Krzysztof Kozlowski diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c index 163d95f..ce67075 100644 --- a/drivers/dma/s3c24xx-dma.c +++ b/drivers/dma/s3c24xx-dma.c @@ -1136,8 +1136,10 @@ static void s3c24xx_dma_free_virtual_channels(struct dma_device *dmadev) struct s3c24xx_dma_chan *next; list_for_each_entry_safe(chan, - next, &dmadev->channels, vc.chan.device_node) + next, &dmadev->channels, vc.chan.device_node) { list_del(&chan->vc.chan.device_node); + tasklet_kill(&chan->vc.task); + } } /* s3c2410, s3c2440 and s3c2442 have a 0x40 stride without separate clocks */ -- cgit v0.10.2 From 1f11e37729d7bedd5c9aba59550f694307b7efd9 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Tue, 5 Jul 2016 11:56:10 +0530 Subject: dmaengine: sirf-dma: kill the tasklets upon exit drivers should ensure that tasklets are killed, so that they can't be run after driver remove is executed Signed-off-by: Vinod Koul Cc: Barry Song diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c index 9068779..d8bc3f2 100644 --- a/drivers/dma/sirf-dma.c +++ b/drivers/dma/sirf-dma.c @@ -980,6 +980,7 @@ static int sirfsoc_dma_remove(struct platform_device *op) of_dma_controller_free(op->dev.of_node); dma_async_device_unregister(&sdma->dma); free_irq(sdma->irq, sdma); + tasklet_kill(&sdma->tasklet); irq_dispose_mapping(sdma->irq); pm_runtime_disable(&op->dev); if (!pm_runtime_status_suspended(&op->dev)) -- cgit v0.10.2 From debc4849007517be8f03a199ea29dc3f797c329e Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Tue, 5 Jul 2016 14:52:21 +0530 Subject: dmaengine: txx9dmac: explicitly freeup irq dmaengine device should explicitly call devm_free_irq() when using devm_request_irq(). The irq is still ON when devices remove is executed and irq should be quiesced before remove is completed. Signed-off-by: Vinod Koul diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c index 8849318..7632290 100644 --- a/drivers/dma/txx9dmac.c +++ b/drivers/dma/txx9dmac.c @@ -1165,9 +1165,12 @@ static int txx9dmac_chan_remove(struct platform_device *pdev) { struct txx9dmac_chan *dc = platform_get_drvdata(pdev); + dma_async_device_unregister(&dc->dma); - if (dc->irq >= 0) + if (dc->irq >= 0) { + devm_free_irq(&pdev->dev, dc->irq, dc); tasklet_kill(&dc->tasklet); + } dc->ddev->chan[pdev->id % TXX9_DMA_MAX_NR_CHANNELS] = NULL; return 0; } @@ -1228,8 +1231,10 @@ static int txx9dmac_remove(struct platform_device *pdev) struct txx9dmac_dev *ddev = platform_get_drvdata(pdev); txx9dmac_off(ddev); - if (ddev->irq >= 0) + if (ddev->irq >= 0) { + devm_free_irq(&pdev->dev, ddev->irq, ddev); tasklet_kill(&ddev->tasklet); + } return 0; } -- cgit v0.10.2 From bd16934a5630f1e7294f33f1f72d89d4f6e6aeae Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Tue, 5 Jul 2016 14:57:40 +0530 Subject: dmaengine: qcom_hidma: kill the tasklets upon exit drivers should ensure that tasklets are killed, so that they can't be run after driver remove is executed Signed-off-by: Vinod Koul Acked-by: Sinan Kaya diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c index 41b5c6d..b2374cd 100644 --- a/drivers/dma/qcom/hidma.c +++ b/drivers/dma/qcom/hidma.c @@ -708,6 +708,7 @@ static int hidma_remove(struct platform_device *pdev) pm_runtime_get_sync(dmadev->ddev.dev); dma_async_device_unregister(&dmadev->ddev); devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev); + tasklet_kill(&dmadev->task); hidma_debug_uninit(dmadev); hidma_ll_uninit(dmadev->lldev); hidma_free(dmadev); -- cgit v0.10.2 From a19346eaeca780fd61b845a3ccb0362ecb2c6e23 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Tue, 5 Jul 2016 15:29:15 +0530 Subject: dmaengine: coh901318: remove owner assignment debugfs file operations owner is set by core, so remove Signed-off-by: Vinod Koul Cc: Linus Walleij diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index e17fc79..e4acd63 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c @@ -1365,7 +1365,6 @@ static int coh901318_debugfs_read(struct file *file, char __user *buf, } static const struct file_operations coh901318_debugfs_status_operations = { - .owner = THIS_MODULE, .open = simple_open, .read = coh901318_debugfs_read, .llseek = default_llseek, -- cgit v0.10.2 From 23a396611fb1c009fcec432eb4076379a66deb5a Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Tue, 5 Jul 2016 15:32:25 +0530 Subject: dmaengine: fsl_raid: remove owner assignment platform driver operations owner is set by core, so remove Signed-off-by: Vinod Koul Cc: Xuelin Shi diff --git a/drivers/dma/fsl_raid.c b/drivers/dma/fsl_raid.c index ec9c73b..aad167e 100644 --- a/drivers/dma/fsl_raid.c +++ b/drivers/dma/fsl_raid.c @@ -892,7 +892,6 @@ static struct of_device_id fsl_re_ids[] = { static struct platform_driver fsl_re_driver = { .driver = { .name = "fsl-raideng", - .owner = THIS_MODULE, .of_match_table = fsl_re_ids, }, .probe = fsl_re_probe, -- cgit v0.10.2 From 376ab15fe2a77e27cd7e9cb198530b221906dbcf Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Tue, 5 Jul 2016 15:33:44 +0530 Subject: dmaengine: pxa_dma: remove owner assignment debugfs file operations owner is set by core, so remove Signed-off-by: Vinod Koul Acked-by: Robert Jarzmik diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c index e756a30c..1966c52 100644 --- a/drivers/dma/pxa_dma.c +++ b/drivers/dma/pxa_dma.c @@ -318,7 +318,6 @@ static int dbg_open_##name(struct inode *inode, struct file *file) \ return single_open(file, dbg_show_##name, inode->i_private); \ } \ static const struct file_operations dbg_fops_##name = { \ - .owner = THIS_MODULE, \ .open = dbg_open_##name, \ .llseek = seq_lseek, \ .read = seq_read, \ -- cgit v0.10.2 From 4cad91b2a9977c8d92f342c4a9b73cd9dbce8f21 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Wed, 6 Jul 2016 22:07:23 +0530 Subject: dmaengine: qcom_hidma_lli: kill the tasklets upon exit drivers should ensure that tasklets are killed, so that they can't be run after driver remove is executed Signed-off-by: Vinod Koul Cc: Sinan Kaya diff --git a/drivers/dma/qcom/hidma_ll.c b/drivers/dma/qcom/hidma_ll.c index f392900..ad20dfb 100644 --- a/drivers/dma/qcom/hidma_ll.c +++ b/drivers/dma/qcom/hidma_ll.c @@ -831,6 +831,7 @@ int hidma_ll_uninit(struct hidma_lldev *lldev) required_bytes = sizeof(struct hidma_tre) * lldev->nr_tres; tasklet_kill(&lldev->task); + tasklet_kill(&lldev->rst_task); memset(lldev->trepool, 0, required_bytes); lldev->trepool = NULL; lldev->pending_tre_count = 0; -- cgit v0.10.2 From a03811045ee3c2be1d333fb2136d06f244302d46 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Fri, 8 Jul 2016 10:30:57 +0530 Subject: dmaengine: cppi: remove unused and bogus check In cppi41_dma_prep_slave_sg() variable num is initialized to zero, but never updated and a BUG_ON is checked for it being greater than zero which will be always false. Remove the bogus check and this variable Reported-by: David Binderman Signed-off-by: Vinod Koul diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c index ceedafb..4b23174 100644 --- a/drivers/dma/cppi41.c +++ b/drivers/dma/cppi41.c @@ -497,16 +497,13 @@ static struct dma_async_tx_descriptor *cppi41_dma_prep_slave_sg( struct cppi41_desc *d; struct scatterlist *sg; unsigned int i; - unsigned int num; - num = 0; d = c->desc; for_each_sg(sgl, sg, sg_len, i) { u32 addr; u32 len; /* We need to use more than one desc once musb supports sg */ - BUG_ON(num > 0); addr = lower_32_bits(sg_dma_address(sg)); len = sg_dma_len(sg); -- cgit v0.10.2 From 24a1b5a011b0d223ec0d97f6f69cd4d66101684b Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Fri, 8 Jul 2016 10:39:26 +0530 Subject: dmaengine: imx-sdma: remove dummy assignment David reported: drivers/dma/imx-sdma.c:1003]: (style) Same expression on both sides of '|=' ORing with itself yields same result, So remove this Reported-by: David Binderman Cc: Sascha Hauer Cc: Fabio Estevam Signed-off-by: Vinod Koul diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 18bcf55..584cce9 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -993,8 +993,6 @@ static int sdma_config_channel(struct dma_chan *chan) } else __set_bit(sdmac->event_id0, sdmac->event_mask); - /* Watermark Level */ - sdmac->watermark_level |= sdmac->watermark_level; /* Address */ sdmac->shp_addr = sdmac->per_address; sdmac->per_addr = sdmac->per_address2; -- cgit v0.10.2 From 0d605ba0b8ce7f5963124853774cc9bd84589a99 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Fri, 8 Jul 2016 10:43:27 +0530 Subject: dmaengine: imx-sdma: remove assignment never used David reported: [drivers/dma/imx-sdma.c:769]: (style) Variable 'emi_2_emi' is assigned a value that is never used Since emi_2_emi is never used afterwards, remove thsi as well Reported-by: David Binderman Cc: Sascha Hauer Cc: Fabio Estevam Signed-off-by: Vinod Koul diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 584cce9..03ec76f 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -752,7 +752,7 @@ static void sdma_get_pc(struct sdma_channel *sdmac, * These are needed once we start to support transfers between * two peripherals or memory-to-memory transfers */ - int per_2_per = 0, emi_2_emi = 0; + int per_2_per = 0; sdmac->pc_from_device = 0; sdmac->pc_to_device = 0; @@ -760,7 +760,6 @@ static void sdma_get_pc(struct sdma_channel *sdmac, switch (peripheral_type) { case IMX_DMATYPE_MEMORY: - emi_2_emi = sdma->script_addrs->ap_2_ap_addr; break; case IMX_DMATYPE_DSP: emi_2_per = sdma->script_addrs->bp_2_ap_addr; -- cgit v0.10.2 From 7d604663255ac757ab4b0e17f533cba136486551 Mon Sep 17 00:00:00 2001 From: Robert Jarzmik Date: Sun, 10 Jul 2016 23:50:49 +0200 Subject: dmaengine: pxa_dma: implement device_synchronize Implement the function which wait until a dma channel is stopped to have a synchronization point. This also protects the pxad_remove() from races, such as spurious interrupts while removing the driver, because : - as long as there is one dma channel requested, ie. dma_chan_get() but no dma_chan_put(), the try_module_get() of dma_chan_get() prevents the remove() routine from running - when the last channel is released, ie. the last dma_chan_put() is called, if there is a running DMA, pxad_synchronize() is called - pxad_synchronize() waits for the channel to stop, which in turn ensures on pxa architecture that the interrupt cannot be fired anymore Reported-by: Vinod Koul Signed-off-by: Robert Jarzmik Signed-off-by: Vinod Koul diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c index 1966c52..dc7850a 100644 --- a/drivers/dma/pxa_dma.c +++ b/drivers/dma/pxa_dma.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include "dmaengine.h" @@ -118,6 +119,8 @@ struct pxad_chan { struct pxad_phy *phy; struct dma_pool *desc_pool; /* Descriptors pool */ dma_cookie_t bus_error; + + wait_queue_head_t wq_state; }; struct pxad_device { @@ -571,6 +574,7 @@ static void pxad_launch_chan(struct pxad_chan *chan, */ phy_writel(chan->phy, desc->first, DDADR); phy_enable(chan->phy, chan->misaligned); + wake_up(&chan->wq_state); } static void set_updater_desc(struct pxad_desc_sw *sw_desc, @@ -716,6 +720,7 @@ static irqreturn_t pxad_chan_handler(int irq, void *dev_id) } } spin_unlock_irqrestore(&chan->vc.lock, flags); + wake_up(&chan->wq_state); return IRQ_HANDLED; } @@ -1267,6 +1272,14 @@ static enum dma_status pxad_tx_status(struct dma_chan *dchan, return ret; } +static void pxad_synchronize(struct dma_chan *dchan) +{ + struct pxad_chan *chan = to_pxad_chan(dchan); + + wait_event(chan->wq_state, !is_chan_running(chan)); + vchan_synchronize(&chan->vc); +} + static void pxad_free_channels(struct dma_device *dmadev) { struct pxad_chan *c, *cn; @@ -1371,6 +1384,7 @@ static int pxad_init_dmadev(struct platform_device *op, pdev->slave.device_tx_status = pxad_tx_status; pdev->slave.device_issue_pending = pxad_issue_pending; pdev->slave.device_config = pxad_config; + pdev->slave.device_synchronize = pxad_synchronize; pdev->slave.device_terminate_all = pxad_terminate_all; if (op->dev.coherent_dma_mask) @@ -1388,6 +1402,7 @@ static int pxad_init_dmadev(struct platform_device *op, return -ENOMEM; c->vc.desc_free = pxad_free_desc; vchan_init(&c->vc, &pdev->slave); + init_waitqueue_head(&c->wq_state); } return dma_async_device_register(&pdev->slave); -- cgit v0.10.2 From 184ff2aa3c0ba7f1cd44ed7e8d766e12e43694e2 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Sat, 16 Jul 2016 19:56:21 +0530 Subject: dmaengine: ioat: statify symbol Sparse warns: drivers/dma/ioat/init.c:1215:6: warning: symbol 'ioat_resume' was not declared. Should it be static? Signed-off-by: Vinod Koul Acked-by: Dave Jiang diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c index d406056..7145f77 100644 --- a/drivers/dma/ioat/init.c +++ b/drivers/dma/ioat/init.c @@ -1212,7 +1212,7 @@ static void ioat_shutdown(struct pci_dev *pdev) ioat_disable_interrupts(ioat_dma); } -void ioat_resume(struct ioatdma_device *ioat_dma) +static void ioat_resume(struct ioatdma_device *ioat_dma) { struct ioatdma_chan *ioat_chan; u32 chanerr; -- cgit v0.10.2 From ad52465b6c37a6a3c24b2455404f6f524a1ce14d Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Tue, 12 Jul 2016 14:21:14 +0300 Subject: dmaengine: omap-dma: Support for interleaved transfer Initial support for interleaved transfer with sDMA. The implementation only supports DMA_MEM_TO_MEM and frame_size must be 1. sDMA needs to be configured for double indexing when ICG is needed. Signed-off-by: Peter Ujfalusi Signed-off-by: Vinod Koul diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c index 1e984e1..2e0d49b 100644 --- a/drivers/dma/omap-dma.c +++ b/drivers/dma/omap-dma.c @@ -59,6 +59,8 @@ struct omap_sg { dma_addr_t addr; uint32_t en; /* number of elements (24-bit) */ uint32_t fn; /* number of frames (16-bit) */ + int32_t fi; /* for double indexing */ + int16_t ei; /* for double indexing */ }; struct omap_desc { @@ -66,7 +68,8 @@ struct omap_desc { enum dma_transfer_direction dir; dma_addr_t dev_addr; - int16_t fi; /* for OMAP_DMA_SYNC_PACKET */ + int32_t fi; /* for OMAP_DMA_SYNC_PACKET / double indexing */ + int16_t ei; /* for double indexing */ uint8_t es; /* CSDP_DATA_TYPE_xxx */ uint32_t ccr; /* CCR value */ uint16_t clnk_ctrl; /* CLNK_CTRL value */ @@ -379,8 +382,8 @@ static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d, } omap_dma_chan_write(c, cxsa, sg->addr); - omap_dma_chan_write(c, cxei, 0); - omap_dma_chan_write(c, cxfi, 0); + omap_dma_chan_write(c, cxei, sg->ei); + omap_dma_chan_write(c, cxfi, sg->fi); omap_dma_chan_write(c, CEN, sg->en); omap_dma_chan_write(c, CFN, sg->fn); @@ -425,7 +428,7 @@ static void omap_dma_start_desc(struct omap_chan *c) } omap_dma_chan_write(c, cxsa, d->dev_addr); - omap_dma_chan_write(c, cxei, 0); + omap_dma_chan_write(c, cxei, d->ei); omap_dma_chan_write(c, cxfi, d->fi); omap_dma_chan_write(c, CSDP, d->csdp); omap_dma_chan_write(c, CLNK_CTRL, d->clnk_ctrl); @@ -971,6 +974,89 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_memcpy( return vchan_tx_prep(&c->vc, &d->vd, tx_flags); } +static struct dma_async_tx_descriptor *omap_dma_prep_dma_interleaved( + struct dma_chan *chan, struct dma_interleaved_template *xt, + unsigned long flags) +{ + struct omap_chan *c = to_omap_dma_chan(chan); + struct omap_desc *d; + struct omap_sg *sg; + uint8_t data_type; + size_t src_icg, dst_icg; + + /* Slave mode is not supported */ + if (is_slave_direction(xt->dir)) + return NULL; + + if (xt->frame_size != 1 || xt->numf == 0) + return NULL; + + d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC); + if (!d) + return NULL; + + data_type = __ffs((xt->src_start | xt->dst_start | xt->sgl[0].size)); + if (data_type > CSDP_DATA_TYPE_32) + data_type = CSDP_DATA_TYPE_32; + + sg = &d->sg[0]; + d->dir = DMA_MEM_TO_MEM; + d->dev_addr = xt->src_start; + d->es = data_type; + sg->en = xt->sgl[0].size / BIT(data_type); + sg->fn = xt->numf; + sg->addr = xt->dst_start; + d->sglen = 1; + d->ccr = c->ccr; + + src_icg = dmaengine_get_src_icg(xt, &xt->sgl[0]); + dst_icg = dmaengine_get_dst_icg(xt, &xt->sgl[0]); + if (src_icg) { + d->ccr |= CCR_SRC_AMODE_DBLIDX; + d->ei = 1; + d->fi = src_icg; + } else if (xt->src_inc) { + d->ccr |= CCR_SRC_AMODE_POSTINC; + d->fi = 0; + } else { + dev_err(chan->device->dev, + "%s: SRC constant addressing is not supported\n", + __func__); + kfree(d); + return NULL; + } + + if (dst_icg) { + d->ccr |= CCR_DST_AMODE_DBLIDX; + sg->ei = 1; + sg->fi = dst_icg; + } else if (xt->dst_inc) { + d->ccr |= CCR_DST_AMODE_POSTINC; + sg->fi = 0; + } else { + dev_err(chan->device->dev, + "%s: DST constant addressing is not supported\n", + __func__); + kfree(d); + return NULL; + } + + d->cicr = CICR_DROP_IE | CICR_FRAME_IE; + + d->csdp = data_type; + + if (dma_omap1()) { + d->cicr |= CICR_TOUT_IE; + d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_EMIFF; + } else { + d->csdp |= CSDP_DST_PACKED | CSDP_SRC_PACKED; + d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE; + d->csdp |= CSDP_DST_BURST_64 | CSDP_SRC_BURST_64; + } + + return vchan_tx_prep(&c->vc, &d->vd, flags); +} + static int omap_dma_slave_config(struct dma_chan *chan, struct dma_slave_config *cfg) { struct omap_chan *c = to_omap_dma_chan(chan); @@ -1116,6 +1202,7 @@ static int omap_dma_probe(struct platform_device *pdev) dma_cap_set(DMA_SLAVE, od->ddev.cap_mask); dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask); dma_cap_set(DMA_MEMCPY, od->ddev.cap_mask); + dma_cap_set(DMA_INTERLEAVE, od->ddev.cap_mask); od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources; od->ddev.device_free_chan_resources = omap_dma_free_chan_resources; od->ddev.device_tx_status = omap_dma_tx_status; @@ -1123,6 +1210,7 @@ static int omap_dma_probe(struct platform_device *pdev) od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg; od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic; od->ddev.device_prep_dma_memcpy = omap_dma_prep_dma_memcpy; + od->ddev.device_prep_interleaved_dma = omap_dma_prep_dma_interleaved; od->ddev.device_config = omap_dma_slave_config; od->ddev.device_pause = omap_dma_pause; od->ddev.device_resume = omap_dma_resume; -- cgit v0.10.2 From caf5ee94be697f8df6d0292e19f3afa4d74745ce Mon Sep 17 00:00:00 2001 From: Kedareswara rao Appana Date: Thu, 14 Jul 2016 19:00:55 +0530 Subject: dmaengine: zynqmp_dma: Fix static checker warning This patch fixes the below static checker warning drivers/dma/xilinx/zynqmp_dma.c:973 zynqmp_dma_chan_probe() warn: was && intended here instead of ||? Reported-by: Dan Carpenter Signed-off-by: Kedareswara rao Appana Signed-off-by: Vinod Koul diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c index f777a5b..b18d51f 100644 --- a/drivers/dma/xilinx/zynqmp_dma.c +++ b/drivers/dma/xilinx/zynqmp_dma.c @@ -970,12 +970,17 @@ static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev, chan->dst_burst_len = ZYNQMP_DMA_AWLEN_RST_VAL; chan->src_burst_len = ZYNQMP_DMA_ARLEN_RST_VAL; err = of_property_read_u32(node, "xlnx,bus-width", &chan->bus_width); - if ((err < 0) && ((chan->bus_width != ZYNQMP_DMA_BUS_WIDTH_64) || - (chan->bus_width != ZYNQMP_DMA_BUS_WIDTH_128))) { - dev_err(zdev->dev, "invalid bus-width value"); + if (err < 0) { + dev_err(&pdev->dev, "missing xlnx,bus-width property\n"); return err; } + if (chan->bus_width != ZYNQMP_DMA_BUS_WIDTH_64 && + chan->bus_width != ZYNQMP_DMA_BUS_WIDTH_128) { + dev_err(zdev->dev, "invalid bus-width value"); + return -EINVAL; + } + chan->is_dmacoherent = of_property_read_bool(node, "dma-coherent"); zdev->chan = chan; tasklet_init(&chan->tasklet, zynqmp_dma_do_tasklet, (ulong)chan); -- cgit v0.10.2 From d8cc38dd965d6b2ea657c142d4fd0a0a3ba9dec4 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Wed, 13 Jul 2016 12:55:06 +0000 Subject: dmaengine: qcom_hidma: use for_each_matching_node() macro Use for_each_matching_node() macro instead of open coding it. Signed-off-by: Wei Yongjun Acked-by: Sinan Kaya Signed-off-by: Vinod Koul diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c index 6416ded..82f36e4 100644 --- a/drivers/dma/qcom/hidma_mgmt.c +++ b/drivers/dma/qcom/hidma_mgmt.c @@ -392,8 +392,7 @@ static int __init hidma_mgmt_init(void) #if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ) struct device_node *child; - for (child = of_find_matching_node(NULL, hidma_mgmt_match); child; - child = of_find_matching_node(child, hidma_mgmt_match)) { + for_each_matching_node(child, hidma_mgmt_match) { /* device tree based firmware here */ hidma_mgmt_of_populate_channels(child); of_node_put(child); -- cgit v0.10.2 From e94570a36bda79b9f87abd283d7277b6a92c508d Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 19 Jul 2016 10:43:49 +0200 Subject: dmaengine: zynqmp_dma: add missing MODULE_LICENSE We get a warning about the missing MODULE_LICENSE tag for this newly added driver module: WARNING: modpost: missing MODULE_LICENSE() in drivers/dma/xilinx/zynqmp_dma.o see include/linux/module.h for more information This adds a "GPL" license, matching the "version 2 or later" information in the comment at the start of the file. Signed-off-by: Arnd Bergmann Acked-by: Kedareswara rao Appana Signed-off-by: Vinod Koul diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c index b18d51f..6d221e5 100644 --- a/drivers/dma/xilinx/zynqmp_dma.c +++ b/drivers/dma/xilinx/zynqmp_dma.c @@ -1146,5 +1146,6 @@ static struct platform_driver zynqmp_dma_driver = { module_platform_driver(zynqmp_dma_driver); +MODULE_LICENSE("GPL"); MODULE_AUTHOR("Xilinx, Inc."); MODULE_DESCRIPTION("Xilinx ZynqMP DMA driver"); -- cgit v0.10.2 From 89b90c09b5d505dcff1068054f1fc2d2704e909f Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Tue, 19 Jul 2016 11:29:41 +0000 Subject: dmaengine: k3dma: add missing clk_disable_unprepare() on error in k3_dma_probe() Add the missing clk_disable_unprepare() before return from k3_dma_probe() in the error handling case. Signed-off-by: Wei Yongjun Signed-off-by: Vinod Koul diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c index 35961af..563affd 100644 --- a/drivers/dma/k3dma.c +++ b/drivers/dma/k3dma.c @@ -757,7 +757,7 @@ static int k3_dma_probe(struct platform_device *op) ret = dma_async_device_register(&d->slave); if (ret) - return ret; + goto dma_async_register_fail; ret = of_dma_controller_register((&op->dev)->of_node, k3_of_dma_simple_xlate, d); @@ -774,6 +774,8 @@ static int k3_dma_probe(struct platform_device *op) of_dma_register_fail: dma_async_device_unregister(&d->slave); +dma_async_register_fail: + clk_disable_unprepare(d->clk); return ret; } -- cgit v0.10.2