diff options
Diffstat (limited to 'crypto/async_tx/async_xor.c')
-rw-r--r-- | crypto/async_tx/async_xor.c | 123 |
1 files changed, 57 insertions, 66 deletions
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index 3c562f5..8ade0a0 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c @@ -33,31 +33,48 @@ /* do_async_xor - dma map the pages and perform the xor with an engine */ static __async_inline struct dma_async_tx_descriptor * -do_async_xor(struct dma_chan *chan, struct dmaengine_unmap_data *unmap, +do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, + unsigned int offset, int src_cnt, size_t len, dma_addr_t *dma_src, struct async_submit_ctl *submit) { struct dma_device *dma = chan->device; struct dma_async_tx_descriptor *tx = NULL; + int src_off = 0; + int i; dma_async_tx_callback cb_fn_orig = submit->cb_fn; void *cb_param_orig = submit->cb_param; enum async_tx_flags flags_orig = submit->flags; - enum dma_ctrl_flags dma_flags = 0; - int src_cnt = unmap->to_cnt; - int xor_src_cnt; - dma_addr_t dma_dest = unmap->addr[unmap->to_cnt]; - dma_addr_t *src_list = unmap->addr; + enum dma_ctrl_flags dma_flags; + int xor_src_cnt = 0; + dma_addr_t dma_dest; + + /* map the dest bidrectional in case it is re-used as a source */ + dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_BIDIRECTIONAL); + for (i = 0; i < src_cnt; i++) { + /* only map the dest once */ + if (!src_list[i]) + continue; + if (unlikely(src_list[i] == dest)) { + dma_src[xor_src_cnt++] = dma_dest; + continue; + } + dma_src[xor_src_cnt++] = dma_map_page(dma->dev, src_list[i], offset, + len, DMA_TO_DEVICE); + } + src_cnt = xor_src_cnt; while (src_cnt) { - dma_addr_t tmp; - submit->flags = flags_orig; + dma_flags = 0; xor_src_cnt = min(src_cnt, (int)dma->max_xor); - /* if we are submitting additional xors, leave the chain open - * and clear the callback parameters + /* if we are submitting additional xors, leave the chain open, + * clear the callback parameters, and leave the destination + * buffer mapped */ if (src_cnt > xor_src_cnt) { submit->flags &= ~ASYNC_TX_ACK; submit->flags |= ASYNC_TX_FENCE; + dma_flags = DMA_COMPL_SKIP_DEST_UNMAP; submit->cb_fn = NULL; submit->cb_param = NULL; } else { @@ -68,18 +85,12 @@ do_async_xor(struct dma_chan *chan, struct dmaengine_unmap_data *unmap, dma_flags |= DMA_PREP_INTERRUPT; if (submit->flags & ASYNC_TX_FENCE) dma_flags |= DMA_PREP_FENCE; - - /* Drivers force forward progress in case they can not provide a - * descriptor + /* Since we have clobbered the src_list we are committed + * to doing this asynchronously. Drivers force forward progress + * in case they can not provide a descriptor */ - tmp = src_list[0]; - if (src_list > unmap->addr) - src_list[0] = dma_dest; - tx = dma->device_prep_dma_xor(chan, dma_dest, src_list, - xor_src_cnt, unmap->len, - dma_flags); - src_list[0] = tmp; - + tx = dma->device_prep_dma_xor(chan, dma_dest, &dma_src[src_off], + xor_src_cnt, len, dma_flags); if (unlikely(!tx)) async_tx_quiesce(&submit->depend_tx); @@ -88,21 +99,22 @@ do_async_xor(struct dma_chan *chan, struct dmaengine_unmap_data *unmap, while (unlikely(!tx)) { dma_async_issue_pending(chan); tx = dma->device_prep_dma_xor(chan, dma_dest, - src_list, - xor_src_cnt, unmap->len, + &dma_src[src_off], + xor_src_cnt, len, dma_flags); } - dma_set_unmap(tx, unmap); async_tx_submit(chan, tx, submit); submit->depend_tx = tx; if (src_cnt > xor_src_cnt) { /* drop completed sources */ src_cnt -= xor_src_cnt; + src_off += xor_src_cnt; + /* use the intermediate result a source */ + dma_src[--src_off] = dma_dest; src_cnt++; - src_list += xor_src_cnt - 1; } else break; } @@ -177,40 +189,22 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset, struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR, &dest, 1, src_list, src_cnt, len); - struct dma_device *device = chan ? chan->device : NULL; - struct dmaengine_unmap_data *unmap = NULL; + dma_addr_t *dma_src = NULL; BUG_ON(src_cnt <= 1); - if (device) - unmap = dmaengine_get_unmap_data(device->dev, src_cnt+1, GFP_NOIO); - - if (unmap && is_dma_xor_aligned(device, offset, 0, len)) { - struct dma_async_tx_descriptor *tx; - int i, j; + if (submit->scribble) + dma_src = submit->scribble; + else if (sizeof(dma_addr_t) <= sizeof(struct page *)) + dma_src = (dma_addr_t *) src_list; + if (dma_src && chan && is_dma_xor_aligned(chan->device, offset, 0, len)) { /* run the xor asynchronously */ pr_debug("%s (async): len: %zu\n", __func__, len); - unmap->len = len; - for (i = 0, j = 0; i < src_cnt; i++) { - if (!src_list[i]) - continue; - unmap->to_cnt++; - unmap->addr[j++] = dma_map_page(device->dev, src_list[i], - offset, len, DMA_TO_DEVICE); - } - - /* map it bidirectional as it may be re-used as a source */ - unmap->addr[j] = dma_map_page(device->dev, dest, offset, len, - DMA_BIDIRECTIONAL); - unmap->bidi_cnt = 1; - - tx = do_async_xor(chan, unmap, submit); - dmaengine_unmap_put(unmap); - return tx; + return do_async_xor(chan, dest, src_list, offset, src_cnt, len, + dma_src, submit); } else { - dmaengine_unmap_put(unmap); /* run the xor synchronously */ pr_debug("%s (sync): len: %zu\n", __func__, len); WARN_ONCE(chan, "%s: no space for dma address conversion\n", @@ -274,14 +268,16 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, struct dma_chan *chan = xor_val_chan(submit, dest, src_list, src_cnt, len); struct dma_device *device = chan ? chan->device : NULL; struct dma_async_tx_descriptor *tx = NULL; - struct dmaengine_unmap_data *unmap = NULL; + dma_addr_t *dma_src = NULL; BUG_ON(src_cnt <= 1); - if (device) - unmap = dmaengine_get_unmap_data(device->dev, src_cnt, GFP_NOIO); + if (submit->scribble) + dma_src = submit->scribble; + else if (sizeof(dma_addr_t) <= sizeof(struct page *)) + dma_src = (dma_addr_t *) src_list; - if (unmap && src_cnt <= device->max_xor && + if (dma_src && device && src_cnt <= device->max_xor && is_dma_xor_aligned(device, offset, 0, len)) { unsigned long dma_prep_flags = 0; int i; @@ -292,15 +288,11 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, dma_prep_flags |= DMA_PREP_INTERRUPT; if (submit->flags & ASYNC_TX_FENCE) dma_prep_flags |= DMA_PREP_FENCE; + for (i = 0; i < src_cnt; i++) + dma_src[i] = dma_map_page(device->dev, src_list[i], + offset, len, DMA_TO_DEVICE); - for (i = 0; i < src_cnt; i++) { - unmap->addr[i] = dma_map_page(device->dev, src_list[i], - offset, len, DMA_TO_DEVICE); - unmap->to_cnt++; - } - unmap->len = len; - - tx = device->device_prep_dma_xor_val(chan, unmap->addr, src_cnt, + tx = device->device_prep_dma_xor_val(chan, dma_src, src_cnt, len, result, dma_prep_flags); if (unlikely(!tx)) { @@ -309,11 +301,11 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, while (!tx) { dma_async_issue_pending(chan); tx = device->device_prep_dma_xor_val(chan, - unmap->addr, src_cnt, len, result, + dma_src, src_cnt, len, result, dma_prep_flags); } } - dma_set_unmap(tx, unmap); + async_tx_submit(chan, tx, submit); } else { enum async_tx_flags flags_orig = submit->flags; @@ -335,7 +327,6 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, async_tx_sync_epilog(submit); submit->flags = flags_orig; } - dmaengine_unmap_put(unmap); return tx; } |