From aebf5a67db8dbacbc624b9c652b81f5460b15eff Mon Sep 17 00:00:00 2001 From: Robert Jarzmik Date: Mon, 21 Sep 2015 11:06:32 +0200 Subject: dmaengine: pxa_dma: fix initial list move Since the commit to have an allocated list of virtual descriptors was reverted, the pxa_dma driver is broken, as it assumes the descriptor is placed on the allocated list upon allocation. Fix the issue in pxa_dma by making an allocated virtual descriptor a singleton. Fixes: 8c8fe97b2b8a ("Revert "dmaengine: virt-dma: don't always free descriptor upon completion"") Signed-off-by: Robert Jarzmik Signed-off-by: Vinod Koul diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c index 5cb61ce..c6723ec 100644 --- a/drivers/dma/pxa_dma.c +++ b/drivers/dma/pxa_dma.c @@ -887,6 +887,7 @@ pxad_tx_prep(struct virt_dma_chan *vc, struct virt_dma_desc *vd, struct dma_async_tx_descriptor *tx; struct pxad_chan *chan = container_of(vc, struct pxad_chan, vc); + INIT_LIST_HEAD(&vd->node); tx = vchan_tx_prep(vc, vd, tx_flags); tx->tx_submit = pxad_tx_submit; dev_dbg(&chan->vc.chan.dev->device, -- cgit v0.10.2 From c1492b4c541e3a382b60f1b5879cd3c4d246ad31 Mon Sep 17 00:00:00 2001 From: Andrzej Hajda Date: Thu, 24 Sep 2015 16:00:17 +0200 Subject: dmaengine: xgene-dma: fix handling xgene_dma_get_ring_size result The function can return negative value. The problem has been detected using proposed semantic patch scripts/coccinelle/tests/assign_signed_to_unsigned.cocci [1]. [1]: http://permalink.gmane.org/gmane.linux.kernel/2046107 Signed-off-by: Andrzej Hajda Signed-off-by: Vinod Koul diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c index b23e8d5..4c5ddda 100644 --- a/drivers/dma/xgene-dma.c +++ b/drivers/dma/xgene-dma.c @@ -1421,15 +1421,18 @@ static int xgene_dma_create_ring_one(struct xgene_dma_chan *chan, struct xgene_dma_ring *ring, enum xgene_dma_ring_cfgsize cfgsize) { + int ret; + /* Setup DMA ring descriptor variables */ ring->pdma = chan->pdma; ring->cfgsize = cfgsize; ring->num = chan->pdma->ring_num++; ring->id = XGENE_DMA_RING_ID_GET(ring->owner, ring->buf_num); - ring->size = xgene_dma_get_ring_size(chan, cfgsize); - if (ring->size <= 0) - return ring->size; + ret = xgene_dma_get_ring_size(chan, cfgsize); + if (ret <= 0) + return ret; + ring->size = ret; /* Allocate memory for DMA ring descriptor */ ring->desc_vaddr = dma_zalloc_coherent(chan->dev, ring->size, -- cgit v0.10.2 From 0b23a1ece9be2c3e04c3b8d3594a1ada1fa1ae50 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Mon, 14 Sep 2015 11:55:36 +0300 Subject: dmaengine: idma64: improve residue estimation The residue calculation may provide a wrong estimation when the transfer is started. There are possible scenarios we have to separate: 1) the transfer is not started yet; residue is equal to the total length; 2) the transfer is just started (first chunk is ongoing); residue is equal to the total length without already transfered bytes; 3) the transfer is ongoing and we already sent few chunks of data; residue is equal to the total length without fully transfered chunks and already sent bytes. Mistakenly the calculation in cases 2) and 3) was done in the similar way and the result is equal to -bytes that have been transfered, i.e. quite big since size_t type can't keep negative values. Rewrite the calculation algorithm to be one pass and have a correct result. Besides above in case user asks for a status of the active DMA descriptor without pausing an ongoing transfer the residue will be estimated based on the register value, though it's still racy. Since the transfer is active the value is continuously being changed. Here we have to read two registers at a time. To minimize an error make those reads close to each other. Signed-off-by: Andy Shevchenko Signed-off-by: Vinod Koul diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c index 18c14e1..48d6d9e 100644 --- a/drivers/dma/idma64.c +++ b/drivers/dma/idma64.c @@ -355,23 +355,23 @@ static size_t idma64_active_desc_size(struct idma64_chan *idma64c) struct idma64_desc *desc = idma64c->desc; struct idma64_hw_desc *hw; size_t bytes = desc->length; - u64 llp; - u32 ctlhi; + u64 llp = channel_readq(idma64c, LLP); + u32 ctlhi = channel_readl(idma64c, CTL_HI); unsigned int i = 0; - llp = channel_readq(idma64c, LLP); do { hw = &desc->hw[i]; - } while ((hw->llp != llp) && (++i < desc->ndesc)); + if (hw->llp == llp) + break; + bytes -= hw->len; + } while (++i < desc->ndesc); if (!i) return bytes; - do { - bytes -= desc->hw[--i].len; - } while (i); + /* The current chunk is not fully transfered yet */ + bytes += desc->hw[--i].len; - ctlhi = channel_readl(idma64c, CTL_HI); return bytes - IDMA64C_CTLH_BLOCK_TS(ctlhi); } -- cgit v0.10.2 From 40482e64b0b84388561b00b880eeca7000f72d38 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Emilio=20L=C3=B3pez?= Date: Sun, 13 Sep 2015 17:15:53 -0300 Subject: dmaengine: sun4i: fix unsafe list iteration MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently, sun4i_dma_free_contract iterates over lists and frees memory as it goes through them, causing reads to recently freed memory to be performed. Fix this by using the safe version of the iterator, so freed memory is not referenced at all. Reported-by: Dan Carpenter Signed-off-by: Emilio López Acked-by: Maxime Ripard Signed-off-by: Vinod Koul diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c index a1a500d..1661d518 100644 --- a/drivers/dma/sun4i-dma.c +++ b/drivers/dma/sun4i-dma.c @@ -599,13 +599,13 @@ get_next_cyclic_promise(struct sun4i_dma_contract *contract) static void sun4i_dma_free_contract(struct virt_dma_desc *vd) { struct sun4i_dma_contract *contract = to_sun4i_dma_contract(vd); - struct sun4i_dma_promise *promise; + struct sun4i_dma_promise *promise, *tmp; /* Free all the demands and completed demands */ - list_for_each_entry(promise, &contract->demands, list) + list_for_each_entry_safe(promise, tmp, &contract->demands, list) kfree(promise); - list_for_each_entry(promise, &contract->completed_demands, list) + list_for_each_entry_safe(promise, tmp, &contract->completed_demands, list) kfree(promise); kfree(contract); -- cgit v0.10.2 From 214fc4e423ff38b41b60db2209cf49b4e9a7209b Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Thu, 24 Sep 2015 12:03:35 +0300 Subject: dmaengine: fix balance of privatecnt dma_release_channel() decrements privatecnt counter and almost all dma_get* function increments it with the exception of dma_get_slave_channel(). In most cases this does not cause issue since normally the channel is not requested and released, but if a driver requests DMA channel via dma_get_slave_channel() and releases the channel the privatecnt will be unbalanced and this will prevent for example getting channel for memcpy. Signed-off-by: Peter Ujfalusi Signed-off-by: Vinod Koul diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 3ff284c..09479d4 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -554,10 +554,18 @@ struct dma_chan *dma_get_slave_channel(struct dma_chan *chan) mutex_lock(&dma_list_mutex); if (chan->client_count == 0) { + struct dma_device *device = chan->device; + + dma_cap_set(DMA_PRIVATE, device->cap_mask); + device->privatecnt++; err = dma_chan_get(chan); - if (err) + if (err) { pr_debug("%s: failed to get %s: (%d)\n", __func__, dma_chan_name(chan), err); + chan = NULL; + if (--device->privatecnt == 0) + dma_cap_clear(DMA_PRIVATE, device->cap_mask); + } } else chan = NULL; -- cgit v0.10.2 From ee08b59d47d859ed0a11ab331a3fbc5ab3b56100 Mon Sep 17 00:00:00 2001 From: Rameshwar Prasad Sahu Date: Wed, 16 Sep 2015 13:33:23 +0530 Subject: dmaengine: xgene-dma: Fix overwritting DMA tx ring This patch fixes an over flow issue with the TX ring descriptor. Each descriptor is 32B in size and an operation requires 2 of these descriptors. Signed-off-by: Rameshwar Prasad Sahu Signed-off-by: Vinod Koul diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c index 4c5ddda..8d57b1b 100644 --- a/drivers/dma/xgene-dma.c +++ b/drivers/dma/xgene-dma.c @@ -59,7 +59,6 @@ #define XGENE_DMA_RING_MEM_RAM_SHUTDOWN 0xD070 #define XGENE_DMA_RING_BLK_MEM_RDY 0xD074 #define XGENE_DMA_RING_BLK_MEM_RDY_VAL 0xFFFFFFFF -#define XGENE_DMA_RING_DESC_CNT(v) (((v) & 0x0001FFFE) >> 1) #define XGENE_DMA_RING_ID_GET(owner, num) (((owner) << 6) | (num)) #define XGENE_DMA_RING_DST_ID(v) ((1 << 10) | (v)) #define XGENE_DMA_RING_CMD_OFFSET 0x2C @@ -379,14 +378,6 @@ static u8 xgene_dma_encode_xor_flyby(u32 src_cnt) return flyby_type[src_cnt]; } -static u32 xgene_dma_ring_desc_cnt(struct xgene_dma_ring *ring) -{ - u32 __iomem *cmd_base = ring->cmd_base; - u32 ring_state = ioread32(&cmd_base[1]); - - return XGENE_DMA_RING_DESC_CNT(ring_state); -} - static void xgene_dma_set_src_buffer(__le64 *ext8, size_t *len, dma_addr_t *paddr) { @@ -659,15 +650,12 @@ static void xgene_dma_clean_running_descriptor(struct xgene_dma_chan *chan, dma_pool_free(chan->desc_pool, desc, desc->tx.phys); } -static int xgene_chan_xfer_request(struct xgene_dma_ring *ring, - struct xgene_dma_desc_sw *desc_sw) +static void xgene_chan_xfer_request(struct xgene_dma_chan *chan, + struct xgene_dma_desc_sw *desc_sw) { + struct xgene_dma_ring *ring = &chan->tx_ring; struct xgene_dma_desc_hw *desc_hw; - /* Check if can push more descriptor to hw for execution */ - if (xgene_dma_ring_desc_cnt(ring) > (ring->slots - 2)) - return -EBUSY; - /* Get hw descriptor from DMA tx ring */ desc_hw = &ring->desc_hw[ring->head]; @@ -694,11 +682,13 @@ static int xgene_chan_xfer_request(struct xgene_dma_ring *ring, memcpy(desc_hw, &desc_sw->desc2, sizeof(*desc_hw)); } + /* Increment the pending transaction count */ + chan->pending += ((desc_sw->flags & + XGENE_DMA_FLAG_64B_DESC) ? 2 : 1); + /* Notify the hw that we have descriptor ready for execution */ iowrite32((desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) ? 2 : 1, ring->cmd); - - return 0; } /** @@ -710,7 +700,6 @@ static int xgene_chan_xfer_request(struct xgene_dma_ring *ring, static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan) { struct xgene_dma_desc_sw *desc_sw, *_desc_sw; - int ret; /* * If the list of pending descriptors is empty, then we @@ -735,18 +724,13 @@ static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan) if (chan->pending >= chan->max_outstanding) return; - ret = xgene_chan_xfer_request(&chan->tx_ring, desc_sw); - if (ret) - return; + xgene_chan_xfer_request(chan, desc_sw); /* * Delete this element from ld pending queue and append it to * ld running queue */ list_move_tail(&desc_sw->node, &chan->ld_running); - - /* Increment the pending transaction count */ - chan->pending++; } } @@ -821,7 +805,8 @@ static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan) * Decrement the pending transaction count * as we have processed one */ - chan->pending--; + chan->pending -= ((desc_sw->flags & + XGENE_DMA_FLAG_64B_DESC) ? 2 : 1); /* * Delete this node from ld running queue and append it to @@ -1485,7 +1470,7 @@ static int xgene_dma_create_chan_rings(struct xgene_dma_chan *chan) tx_ring->id, tx_ring->num, tx_ring->desc_vaddr); /* Set the max outstanding request possible to this channel */ - chan->max_outstanding = rx_ring->slots; + chan->max_outstanding = tx_ring->slots; return ret; } -- cgit v0.10.2 From 6bea0f6d1c47b07be88dfd93f013ae05fcb3d8bf Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Mon, 28 Sep 2015 18:57:03 +0300 Subject: dmaengine: dw: properly read DWC_PARAMS register In case we have less than maximum allowed channels (8) and autoconfiguration is enabled the DWC_PARAMS read is wrong because it uses different arithmetic to what is needed for channel priority setup. Re-do the caclulations properly. This now works on AVR32 board well. Fixes: fed2574b3c9f (dw_dmac: introduce software emulation of LLP transfers) Cc: yitian.bu@tangramtek.com Signed-off-by: Andy Shevchenko Signed-off-by: Vinod Koul diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index cf1c87f..bedce03 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c @@ -1591,7 +1591,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) INIT_LIST_HEAD(&dw->dma.channels); for (i = 0; i < nr_channels; i++) { struct dw_dma_chan *dwc = &dw->chan[i]; - int r = nr_channels - i - 1; dwc->chan.device = &dw->dma; dma_cookie_init(&dwc->chan); @@ -1603,7 +1602,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) /* 7 is highest priority & 0 is lowest. */ if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING) - dwc->priority = r; + dwc->priority = nr_channels - i - 1; else dwc->priority = i; @@ -1622,6 +1621,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) /* Hardware configuration */ if (autocfg) { unsigned int dwc_params; + unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1; void __iomem *addr = chip->regs + r * sizeof(u32); dwc_params = dma_read_byaddr(addr, DWC_PARAMS); -- cgit v0.10.2 From a1cf09031e641d3cceaca4a4dd20ef6a785bc9b3 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Tue, 15 Sep 2015 15:36:00 +0200 Subject: dmaengine: at_xdmac: change block increment addressing mode The addressing mode we were using was not only incrementing the address at each microblock, but also at each data boundary, which was severely slowing the transfer, without any benefit since we were not using the data stride. Switch to the micro block increment only in order to get back to an acceptable performance level. Signed-off-by: Maxime Ripard Signed-off-by: Ludovic Desroches Fixes: 6007ccb57744 ("dmaengine: xdmac: Add interleaved transfer support") Cc: stable@vger.kernel.org #4.2 Signed-off-by: Vinod Koul diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index a165b4b..ffea602 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -875,14 +875,14 @@ at_xdmac_interleaved_queue_desc(struct dma_chan *chan, if (xt->src_inc) { if (xt->src_sgl) - chan_cc |= AT_XDMAC_CC_SAM_UBS_DS_AM; + chan_cc |= AT_XDMAC_CC_SAM_UBS_AM; else chan_cc |= AT_XDMAC_CC_SAM_INCREMENTED_AM; } if (xt->dst_inc) { if (xt->dst_sgl) - chan_cc |= AT_XDMAC_CC_DAM_UBS_DS_AM; + chan_cc |= AT_XDMAC_CC_DAM_UBS_AM; else chan_cc |= AT_XDMAC_CC_DAM_INCREMENTED_AM; } -- cgit v0.10.2 From 0be2136b67067617b36c70e525d7534108361e36 Mon Sep 17 00:00:00 2001 From: Ludovic Desroches Date: Tue, 15 Sep 2015 15:39:11 +0200 Subject: dmaengine: at_xdmac: clean used descriptor When putting back a descriptor to the free descs list, some fields are not set to 0, it can cause bugs if someone uses it without having this in mind. Descriptor are not put back one by one so it is easier to clean descriptors when we request them. Signed-off-by: Ludovic Desroches Cc: stable@vger.kernel.org #4.2 Signed-off-by: Vinod Koul diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index ffea602..dd24375 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -455,6 +455,15 @@ static struct at_xdmac_desc *at_xdmac_alloc_desc(struct dma_chan *chan, return desc; } +void at_xdmac_init_used_desc(struct at_xdmac_desc *desc) +{ + memset(&desc->lld, 0, sizeof(desc->lld)); + INIT_LIST_HEAD(&desc->descs_list); + desc->direction = DMA_TRANS_NONE; + desc->xfer_size = 0; + desc->active_xfer = false; +} + /* Call must be protected by lock. */ static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan) { @@ -466,7 +475,7 @@ static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan) desc = list_first_entry(&atchan->free_descs_list, struct at_xdmac_desc, desc_node); list_del(&desc->desc_node); - desc->active_xfer = false; + at_xdmac_init_used_desc(desc); } return desc; -- cgit v0.10.2 From aa3ee5f569fda51e54c224c0df60e187e9c5e582 Mon Sep 17 00:00:00 2001 From: Axel Lin Date: Sat, 26 Sep 2015 17:15:47 +0800 Subject: dmaengine: zxdma: Fix off-by-one for testing valid pchan request The valid pchan range is 0 ~ d->dma_requests - 1. Signed-off-by: Axel Lin Reviewed-by: Jun Nie Signed-off-by: Vinod Koul diff --git a/drivers/dma/zx296702_dma.c b/drivers/dma/zx296702_dma.c index 39915a6..c017fcd 100644 --- a/drivers/dma/zx296702_dma.c +++ b/drivers/dma/zx296702_dma.c @@ -739,7 +739,7 @@ static struct dma_chan *zx_of_dma_simple_xlate(struct of_phandle_args *dma_spec, struct dma_chan *chan; struct zx_dma_chan *c; - if (request > d->dma_requests) + if (request >= d->dma_requests) return NULL; chan = dma_get_any_slave_channel(&d->slave); -- cgit v0.10.2 From e87ffbdf06971a80ad2a11217200bdd936195af1 Mon Sep 17 00:00:00 2001 From: Robert Jarzmik Date: Wed, 30 Sep 2015 19:42:14 +0200 Subject: dmaengine: pxa_dma: fix the no-requestor case A very small number of devices don't use the flow control offered by requestor lines. In these specific cases, the pxa dma driver should be aware of that and not try to use a requestor line. Signed-off-by: Robert Jarzmik Signed-off-by: Vinod Koul diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c index c6723ec..064a826 100644 --- a/drivers/dma/pxa_dma.c +++ b/drivers/dma/pxa_dma.c @@ -473,8 +473,10 @@ static void pxad_free_phy(struct pxad_chan *chan) return; /* clear the channel mapping in DRCMR */ - reg = pxad_drcmr(chan->drcmr); - writel_relaxed(0, chan->phy->base + reg); + if (chan->drcmr <= DRCMR_CHLNUM) { + reg = pxad_drcmr(chan->drcmr); + writel_relaxed(0, chan->phy->base + reg); + } spin_lock_irqsave(&pdev->phy_lock, flags); for (i = 0; i < 32; i++) @@ -516,8 +518,10 @@ static void phy_enable(struct pxad_phy *phy, bool misaligned) "%s(); phy=%p(%d) misaligned=%d\n", __func__, phy, phy->idx, misaligned); - reg = pxad_drcmr(phy->vchan->drcmr); - writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg); + if (phy->vchan->drcmr <= DRCMR_CHLNUM) { + reg = pxad_drcmr(phy->vchan->drcmr); + writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg); + } dalgn = phy_readl_relaxed(phy, DALGN); if (misaligned) @@ -911,14 +915,18 @@ static void pxad_get_config(struct pxad_chan *chan, width = chan->cfg.src_addr_width; dev_addr = chan->cfg.src_addr; *dev_src = dev_addr; - *dcmd |= PXA_DCMD_INCTRGADDR | PXA_DCMD_FLOWSRC; + *dcmd |= PXA_DCMD_INCTRGADDR; + if (chan->drcmr <= DRCMR_CHLNUM) + *dcmd |= PXA_DCMD_FLOWSRC; } if (dir == DMA_MEM_TO_DEV) { maxburst = chan->cfg.dst_maxburst; width = chan->cfg.dst_addr_width; dev_addr = chan->cfg.dst_addr; *dev_dst = dev_addr; - *dcmd |= PXA_DCMD_INCSRCADDR | PXA_DCMD_FLOWTRG; + *dcmd |= PXA_DCMD_INCSRCADDR; + if (chan->drcmr <= DRCMR_CHLNUM) + *dcmd |= PXA_DCMD_FLOWTRG; } if (dir == DMA_MEM_TO_MEM) *dcmd |= PXA_DCMD_BURST32 | PXA_DCMD_INCTRGADDR | -- cgit v0.10.2 From 7b09a1bba4091a9d208481d7831682a1f3061ab9 Mon Sep 17 00:00:00 2001 From: Robert Jarzmik Date: Wed, 30 Sep 2015 19:42:15 +0200 Subject: dmaengine: pxa_dma: fix residue corner case A very tiny temporal window exists in the residue calculation where : - upon entering residue calculation, the transfer is ongoing - when reading the current transfer pointer, it just changed to the "finisher/linker" descriptor In this case, the residue returned is the whole transfer length instead of 0. Fix it. This appears almost in one extreme case, where the driver is used by older clients which inquire for residue in interrupt context, such as the smsc91x ethernet driver, in a tight loop : interrupt_handler() dmaengine_submit() do { dmaengine_tx_status() } while (residue > 0 || status != DMA_ERROR) Signed-off-by: Robert Jarzmik Signed-off-by: Vinod Koul diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c index 064a826..fc4156a 100644 --- a/drivers/dma/pxa_dma.c +++ b/drivers/dma/pxa_dma.c @@ -1186,6 +1186,16 @@ static unsigned int pxad_residue(struct pxad_chan *chan, else curr = phy_readl_relaxed(chan->phy, DTADR); + /* + * curr has to be actually read before checking descriptor + * completion, so that a curr inside a status updater + * descriptor implies the following test returns true, and + * preventing reordering of curr load and the test. + */ + rmb(); + if (is_desc_completed(vd)) + goto out; + for (i = 0; i < sw_desc->nb_desc - 1; i++) { hw_desc = sw_desc->hw_desc[i]; if (sw_desc->hw_desc[0]->dcmd & PXA_DCMD_INCSRCADDR) -- cgit v0.10.2