From 10c95ed9aa2970e05fedb4ac8b3ce1b934dab17b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 7 Aug 2015 18:15:11 +0200 Subject: scatterlist: allow limited chaining without ARCH_HAS_SG_CHAIN There are a couple of uses of struct scatterlist that never go to the dma_map_sg() helper and thus don't care about ARCH_HAS_SG_CHAIN which indicates that we can map chained S/G list. The most important one is the crypto code, which currently has to open code a few helpers to always allow chaining. This patch removes a few #ifdef ARCH_HAS_SG_CHAIN statements so that we can switch the crypto code to these common helpers. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index 9b1ef0c..698e906 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h @@ -161,10 +161,6 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf, static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents, struct scatterlist *sgl) { -#ifndef CONFIG_ARCH_HAS_SG_CHAIN - BUG(); -#endif - /* * offset and length are unused for chain entry. Clear them. */ diff --git a/lib/scatterlist.c b/lib/scatterlist.c index d105a9f..bafa993 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -105,16 +105,12 @@ EXPORT_SYMBOL(sg_nents_for_len); **/ struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents) { -#ifndef CONFIG_ARCH_HAS_SG_CHAIN - struct scatterlist *ret = &sgl[nents - 1]; -#else struct scatterlist *sg, *ret = NULL; unsigned int i; for_each_sg(sgl, sg, nents, i) ret = sg; -#endif #ifdef CONFIG_DEBUG_SG BUG_ON(sgl[0].sg_magic != SG_MAGIC); BUG_ON(!sg_is_last(ret)); -- cgit v0.10.2 From 02c4de53add1b799f5663d5e9c8e9a5d9366a507 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 7 Aug 2015 18:15:12 +0200 Subject: target/rd: always chain S/G list The rd sg lists are never passed to hardware, so use S/G chaining unonditionally. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c index 4703f40..badd927 100644 --- a/drivers/target/target_core_rd.c +++ b/drivers/target/target_core_rd.c @@ -138,16 +138,12 @@ static int rd_allocate_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table * sg_per_table = (total_sg_needed > max_sg_per_table) ? max_sg_per_table : total_sg_needed; -#ifdef CONFIG_ARCH_HAS_SG_CHAIN - /* * Reserve extra element for chain entry */ if (sg_per_table < total_sg_needed) chain_entry = 1; -#endif /* CONFIG_ARCH_HAS_SG_CHAIN */ - sg = kcalloc(sg_per_table + chain_entry, sizeof(*sg), GFP_KERNEL); if (!sg) { @@ -158,15 +154,11 @@ static int rd_allocate_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table * sg_init_table(sg, sg_per_table + chain_entry); -#ifdef CONFIG_ARCH_HAS_SG_CHAIN - if (i > 0) { sg_chain(sg_table[i - 1].sg_table, max_sg_per_table + 1, sg); } -#endif /* CONFIG_ARCH_HAS_SG_CHAIN */ - sg_table[i].sg_table = sg; sg_table[i].rd_sg_count = sg_per_table; sg_table[i].page_start_offset = page_offset; @@ -429,42 +421,6 @@ static sense_reason_t rd_do_prot_rw(struct se_cmd *cmd, bool is_read) prot_sg = &prot_table->sg_table[prot_page - prot_table->page_start_offset]; -#ifndef CONFIG_ARCH_HAS_SG_CHAIN - - prot_npages = DIV_ROUND_UP(prot_offset + sectors * se_dev->prot_length, - PAGE_SIZE); - - /* - * Allocate temporaly contiguous scatterlist entries if prot pages - * straddles multiple scatterlist tables. - */ - if (prot_table->page_end_offset < prot_page + prot_npages - 1) { - int i; - - prot_sg = kcalloc(prot_npages, sizeof(*prot_sg), GFP_KERNEL); - if (!prot_sg) - return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - - need_to_release = true; - sg_init_table(prot_sg, prot_npages); - - for (i = 0; i < prot_npages; i++) { - if (prot_page + i > prot_table->page_end_offset) { - prot_table = rd_get_prot_table(dev, - prot_page + i); - if (!prot_table) { - kfree(prot_sg); - return rc; - } - sg_unmark_end(&prot_sg[i - 1]); - } - prot_sg[i] = prot_table->sg_table[prot_page + i - - prot_table->page_start_offset]; - } - } - -#endif /* !CONFIG_ARCH_HAS_SG_CHAIN */ - if (is_read) rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, 0, prot_sg, prot_offset); -- cgit v0.10.2 From c56f6d1270b9fc31144b59106bc8590c2acf4a8e Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Fri, 7 Aug 2015 18:15:13 +0200 Subject: crypto: replace scatterwalk_sg_chain with sg_chain Signed-off-by: Dan Williams [hch: split from a larger patch by Dan] Signed-off-by: Christoph Hellwig Acked-by: Herbert Xu Signed-off-by: Jens Axboe diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index 9450752..af31a0e 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -145,7 +145,7 @@ static int skcipher_alloc_sgl(struct sock *sk) sgl->cur = 0; if (sg) - scatterwalk_sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg); + sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg); list_add_tail(&sgl->list, &ctx->tsgl); } diff --git a/crypto/ccm.c b/crypto/ccm.c index a4d1a5e..b3f52f5 100644 --- a/crypto/ccm.c +++ b/crypto/ccm.c @@ -329,13 +329,13 @@ static int crypto_ccm_encrypt(struct aead_request *req) sg_init_table(pctx->src, 2); sg_set_buf(pctx->src, odata, 16); - scatterwalk_sg_chain(pctx->src, 2, req->src); + sg_chain(pctx->src, 2, req->src); dst = pctx->src; if (req->src != req->dst) { sg_init_table(pctx->dst, 2); sg_set_buf(pctx->dst, odata, 16); - scatterwalk_sg_chain(pctx->dst, 2, req->dst); + sg_chain(pctx->dst, 2, req->dst); dst = pctx->dst; } @@ -400,13 +400,13 @@ static int crypto_ccm_decrypt(struct aead_request *req) sg_init_table(pctx->src, 2); sg_set_buf(pctx->src, authtag, 16); - scatterwalk_sg_chain(pctx->src, 2, req->src); + sg_chain(pctx->src, 2, req->src); dst = pctx->src; if (req->src != req->dst) { sg_init_table(pctx->dst, 2); sg_set_buf(pctx->dst, authtag, 16); - scatterwalk_sg_chain(pctx->dst, 2, req->dst); + sg_chain(pctx->dst, 2, req->dst); dst = pctx->dst; } diff --git a/crypto/gcm.c b/crypto/gcm.c index 7d32d47..ab0b2f9 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c @@ -200,14 +200,14 @@ static void crypto_gcm_init_common(struct aead_request *req) sg_set_buf(pctx->src, pctx->auth_tag, sizeof(pctx->auth_tag)); sg = scatterwalk_ffwd(pctx->src + 1, req->src, req->assoclen); if (sg != pctx->src + 1) - scatterwalk_sg_chain(pctx->src, 2, sg); + sg_chain(pctx->src, 2, sg); if (req->src != req->dst) { sg_init_table(pctx->dst, 3); sg_set_buf(pctx->dst, pctx->auth_tag, sizeof(pctx->auth_tag)); sg = scatterwalk_ffwd(pctx->dst + 1, req->dst, req->assoclen); if (sg != pctx->dst + 1) - scatterwalk_sg_chain(pctx->dst, 2, sg); + sg_chain(pctx->dst, 2, sg); } } diff --git a/drivers/crypto/bfin_crc.c b/drivers/crypto/bfin_crc.c index d9af940..2f0b333 100644 --- a/drivers/crypto/bfin_crc.c +++ b/drivers/crypto/bfin_crc.c @@ -370,8 +370,7 @@ static int bfin_crypto_crc_handle_queue(struct bfin_crypto_crc *crc, sg_init_table(ctx->bufsl, nsg); sg_set_buf(ctx->bufsl, ctx->buflast, ctx->buflast_len); if (nsg > 1) - scatterwalk_sg_chain(ctx->bufsl, nsg, - req->src); + sg_chain(ctx->bufsl, nsg, req->src); ctx->sg = ctx->bufsl; } else ctx->sg = req->src; diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c index 5c5df1d..be2f504 100644 --- a/drivers/crypto/qce/sha.c +++ b/drivers/crypto/qce/sha.c @@ -296,7 +296,7 @@ static int qce_ahash_update(struct ahash_request *req) if (rctx->buflen) { sg_init_table(rctx->sg, 2); sg_set_buf(rctx->sg, rctx->tmpbuf, rctx->buflen); - scatterwalk_sg_chain(rctx->sg, 2, req->src); + sg_chain(rctx->sg, 2, req->src); req->src = rctx->sg; } diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c index 397a500..a75cf66 100644 --- a/drivers/crypto/sahara.c +++ b/drivers/crypto/sahara.c @@ -999,7 +999,7 @@ static int sahara_sha_prepare_request(struct ahash_request *req) sg_init_table(rctx->in_sg_chain, 2); sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt); - scatterwalk_sg_chain(rctx->in_sg_chain, 2, req->src); + sg_chain(rctx->in_sg_chain, 2, req->src); rctx->total = req->nbytes + rctx->buf_cnt; rctx->in_sg = rctx->in_sg_chain; diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 83aca95..6a7f024 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -1986,7 +1986,7 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes) sg_init_table(req_ctx->bufsl, nsg); sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf); if (nsg > 1) - scatterwalk_sg_chain(req_ctx->bufsl, 2, areq->src); + sg_chain(req_ctx->bufsl, 2, areq->src); req_ctx->psrc = req_ctx->bufsl; } else req_ctx->psrc = areq->src; diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h index 96670e7..35f99b6 100644 --- a/include/crypto/scatterwalk.h +++ b/include/crypto/scatterwalk.h @@ -25,14 +25,6 @@ #include #include -static inline void scatterwalk_sg_chain(struct scatterlist *sg1, int num, - struct scatterlist *sg2) -{ - sg_set_page(&sg1[num - 1], (void *)sg2, 0, 0); - sg1[num - 1].page_link &= ~0x02; - sg1[num - 1].page_link |= 0x01; -} - static inline void scatterwalk_crypto_chain(struct scatterlist *head, struct scatterlist *sg, int chain, int num) @@ -43,7 +35,7 @@ static inline void scatterwalk_crypto_chain(struct scatterlist *head, } if (sg) - scatterwalk_sg_chain(head, num, sg); + sg_chain(head, num, sg); else sg_mark_end(head); } -- cgit v0.10.2 From da81ed16bdb3bc1bf92e6cb3340915a3204d5ace Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Fri, 7 Aug 2015 18:15:14 +0200 Subject: scatterlist: remove open coded sg_unmark_end instances Signed-off-by: Dan Williams [hch: split from a larger patch by Dan] Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe diff --git a/block/blk-merge.c b/block/blk-merge.c index 0027def..25ebac8 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -394,7 +394,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq, if (rq->cmd_flags & REQ_WRITE) memset(q->dma_drain_buffer, 0, q->dma_drain_size); - sg->page_link &= ~0x02; + sg_unmark_end(sg); sg = sg_next(sg); sg_set_page(sg, virt_to_page(q->dma_drain_buffer), q->dma_drain_size, diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index 5daf302..6f4323c 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c @@ -467,7 +467,7 @@ static unsigned int mmc_queue_packed_map_sg(struct mmc_queue *mq, sg_set_buf(__sg, buf + offset, len); offset += len; remain -= len; - (__sg++)->page_link &= ~0x02; + sg_unmark_end(__sg++); sg_len++; } while (remain); } @@ -475,7 +475,7 @@ static unsigned int mmc_queue_packed_map_sg(struct mmc_queue *mq, list_for_each_entry(req, &packed->list, queuelist) { sg_len += blk_rq_map_sg(mq->queue, req, __sg); __sg = sg + (sg_len - 1); - (__sg++)->page_link &= ~0x02; + sg_unmark_end(__sg++); } sg_mark_end(sg + (sg_len - 1)); return sg_len; -- cgit v0.10.2 From 89e2a8404e4415da1edbac6ca4f7332b4a74fae2 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 7 Aug 2015 18:15:15 +0200 Subject: crypto/omap-sham: remove an open coded access to ->page_link Signed-off-by: Dan Williams [hch: split from a larger patch by Dan] Signed-off-by: Christoph Hellwig Acked-by: Herbert Xu Signed-off-by: Jens Axboe diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index b2024c95..48adb2a 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -588,7 +588,7 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr, * the dmaengine may try to DMA the incorrect amount of data. */ sg_init_table(&ctx->sgl, 1); - ctx->sgl.page_link = ctx->sg->page_link; + sg_assign_page(&ctx->sgl, sg_page(ctx->sg)); ctx->sgl.offset = ctx->sg->offset; sg_dma_len(&ctx->sgl) = len32; sg_dma_address(&ctx->sgl) = sg_dma_address(ctx->sg); -- cgit v0.10.2 From db0fa0cb015794dd19f664933d49c6ce902ec1e1 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 17 Aug 2015 08:13:26 -0600 Subject: scatterlist: use sg_phys() Coccinelle cleanup to replace open coded sg to physical address translations. This is in preparation for introducing scatterlists that reference __pfn_t. // sg_phys.cocci: convert usage page_to_phys(sg_page(sg)) to sg_phys(sg) // usage: make coccicheck COCCI=sg_phys.cocci MODE=patch virtual patch @@ struct scatterlist *sg; @@ - page_to_phys(sg_page(sg)) + sg->offset + sg_phys(sg) @@ struct scatterlist *sg; @@ - page_to_phys(sg_page(sg)) + sg_phys(sg) & PAGE_MASK Signed-off-by: Dan Williams Signed-off-by: Jens Axboe diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 1ced8a0..4efaefd 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -1520,7 +1520,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, return -ENOMEM; for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) { - phys_addr_t phys = page_to_phys(sg_page(s)); + phys_addr_t phys = sg_phys(s) & PAGE_MASK; unsigned int len = PAGE_ALIGN(s->offset + s->length); if (!is_coherent && diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c index bf4dec2..c89da63 100644 --- a/arch/microblaze/kernel/dma.c +++ b/arch/microblaze/kernel/dma.c @@ -61,8 +61,7 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, /* FIXME this part of code is untested */ for_each_sg(sgl, sg, nents, i) { sg->dma_address = sg_phys(sg); - __dma_sync(page_to_phys(sg_page(sg)) + sg->offset, - sg->length, direction); + __dma_sync(sg_phys(sg), sg->length, direction); } return nents; diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index a98a7b2..b261850 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -2094,7 +2094,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, sg_res = aligned_nrpages(sg->offset, sg->length); sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset; sg->dma_length = sg->length; - pteval = page_to_phys(sg_page(sg)) | prot; + pteval = (sg_phys(sg) & PAGE_MASK) | prot; phys_pfn = pteval >> VTD_PAGE_SHIFT; } @@ -3620,7 +3620,7 @@ static int intel_nontranslate_map_sg(struct device *hddev, for_each_sg(sglist, sg, nelems, i) { BUG_ON(!sg_page(sg)); - sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset; + sg->dma_address = sg_phys(sg); sg->dma_length = sg->length; } return nelems; diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index f286090..049df49 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -1408,7 +1408,7 @@ size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova, min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap); for_each_sg(sg, s, nents, i) { - phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset; + phys_addr_t phys = sg_phys(s); /* * We are mapping on IOMMU page boundaries, so offset within diff --git a/drivers/staging/android/ion/ion_chunk_heap.c b/drivers/staging/android/ion/ion_chunk_heap.c index 5474615..f7b6ef9 100644 --- a/drivers/staging/android/ion/ion_chunk_heap.c +++ b/drivers/staging/android/ion/ion_chunk_heap.c @@ -81,7 +81,7 @@ static int ion_chunk_heap_allocate(struct ion_heap *heap, err: sg = table->sgl; for (i -= 1; i >= 0; i--) { - gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)), + gen_pool_free(chunk_heap->pool, sg_phys(sg) & PAGE_MASK, sg->length); sg = sg_next(sg); } @@ -109,7 +109,7 @@ static void ion_chunk_heap_free(struct ion_buffer *buffer) DMA_BIDIRECTIONAL); for_each_sg(table->sgl, sg, table->nents, i) { - gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)), + gen_pool_free(chunk_heap->pool, sg_phys(sg) & PAGE_MASK, sg->length); } chunk_heap->allocated -= allocated_size; -- cgit v0.10.2 From f8bcbe62acd0e1ce9004b83e98a4af87ae385dcf Mon Sep 17 00:00:00 2001 From: Robert Jarzmik Date: Sat, 8 Aug 2015 10:44:10 +0200 Subject: lib: scatterlist: add sg splitting function Sometimes a scatter-gather has to be split into several chunks, or sub scatter lists. This happens for example if a scatter list will be handled by multiple DMA channels, each one filling a part of it. A concrete example comes with the media V4L2 API, where the scatter list is allocated from userspace to hold an image, regardless of the knowledge of how many DMAs will fill it : - in a simple RGB565 case, one DMA will pump data from the camera ISP to memory - in the trickier YUV422 case, 3 DMAs will pump data from the camera ISP pipes, one for pipe Y, one for pipe U and one for pipe V For these cases, it is necessary to split the original scatter list into multiple scatter lists, which is the purpose of this patch. The guarantees that are required for this patch are : - the intersection of spans of any couple of resulting scatter lists is empty. - the union of spans of all resulting scatter lists is a subrange of the span of the original scatter list. - streaming DMA API operations (mapping, unmapping) should not happen both on both the resulting and the original scatter list. It's either the first or the later ones. - the caller is reponsible to call kfree() on the resulting scatterlists. Signed-off-by: Robert Jarzmik Signed-off-by: Jens Axboe diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index 698e906..556ec1e 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h @@ -247,6 +247,11 @@ struct scatterlist *sg_next(struct scatterlist *); struct scatterlist *sg_last(struct scatterlist *s, unsigned int); void sg_init_table(struct scatterlist *, unsigned int); void sg_init_one(struct scatterlist *, const void *, unsigned int); +int sg_split(struct scatterlist *in, const int in_mapped_nents, + const off_t skip, const int nb_splits, + const size_t *split_sizes, + struct scatterlist **out, int *out_mapped_nents, + gfp_t gfp_mask); typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t); typedef void (sg_free_fn)(struct scatterlist *, unsigned int); diff --git a/lib/Kconfig b/lib/Kconfig index 3a2ef67..dc51616 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -521,6 +521,13 @@ config UCS2_STRING source "lib/fonts/Kconfig" +config SG_SPLIT + def_bool n + help + Provides a heler to split scatterlists into chunks, each chunk being a + scatterlist. This should be selected by a driver or an API which + whishes to split a scatterlist amongst multiple DMA channel. + # # sg chaining option # diff --git a/lib/Makefile b/lib/Makefile index 6897b52..2ee6ea2 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -160,6 +160,7 @@ obj-$(CONFIG_GENERIC_STRNLEN_USER) += strnlen_user.o obj-$(CONFIG_GENERIC_NET_UTILS) += net_utils.o +obj-$(CONFIG_SG_SPLIT) += sg_split.o obj-$(CONFIG_STMP_DEVICE) += stmp_device.o libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o \ diff --git a/lib/sg_split.c b/lib/sg_split.c new file mode 100644 index 0000000..b063410 --- /dev/null +++ b/lib/sg_split.c @@ -0,0 +1,202 @@ +/* + * Copyright (C) 2015 Robert Jarzmik + * + * Scatterlist splitting helpers. + * + * This source code is licensed under the GNU General Public License, + * Version 2. See the file COPYING for more details. + */ + +#include +#include + +struct sg_splitter { + struct scatterlist *in_sg0; + int nents; + off_t skip_sg0; + unsigned int length_last_sg; + + struct scatterlist *out_sg; +}; + +static int sg_calculate_split(struct scatterlist *in, int nents, int nb_splits, + off_t skip, const size_t *sizes, + struct sg_splitter *splitters, bool mapped) +{ + int i; + unsigned int sglen; + size_t size = sizes[0], len; + struct sg_splitter *curr = splitters; + struct scatterlist *sg; + + for (i = 0; i < nb_splits; i++) { + splitters[i].in_sg0 = NULL; + splitters[i].nents = 0; + } + + for_each_sg(in, sg, nents, i) { + sglen = mapped ? sg_dma_len(sg) : sg->length; + if (skip > sglen) { + skip -= sglen; + continue; + } + + len = min_t(size_t, size, sglen - skip); + if (!curr->in_sg0) { + curr->in_sg0 = sg; + curr->skip_sg0 = skip; + } + size -= len; + curr->nents++; + curr->length_last_sg = len; + + while (!size && (skip + len < sglen) && (--nb_splits > 0)) { + curr++; + size = *(++sizes); + skip += len; + len = min_t(size_t, size, sglen - skip); + + curr->in_sg0 = sg; + curr->skip_sg0 = skip; + curr->nents = 1; + curr->length_last_sg = len; + size -= len; + } + skip = 0; + + if (!size && --nb_splits > 0) { + curr++; + size = *(++sizes); + } + + if (!nb_splits) + break; + } + + return (size || !splitters[0].in_sg0) ? -EINVAL : 0; +} + +static void sg_split_phys(struct sg_splitter *splitters, const int nb_splits) +{ + int i, j; + struct scatterlist *in_sg, *out_sg; + struct sg_splitter *split; + + for (i = 0, split = splitters; i < nb_splits; i++, split++) { + in_sg = split->in_sg0; + out_sg = split->out_sg; + for (j = 0; j < split->nents; j++, out_sg++) { + *out_sg = *in_sg; + if (!j) { + out_sg->offset += split->skip_sg0; + out_sg->length -= split->skip_sg0; + } else { + out_sg->offset = 0; + } + sg_dma_address(out_sg) = 0; + sg_dma_len(out_sg) = 0; + in_sg = sg_next(in_sg); + } + out_sg[-1].length = split->length_last_sg; + sg_mark_end(out_sg - 1); + } +} + +static void sg_split_mapped(struct sg_splitter *splitters, const int nb_splits) +{ + int i, j; + struct scatterlist *in_sg, *out_sg; + struct sg_splitter *split; + + for (i = 0, split = splitters; i < nb_splits; i++, split++) { + in_sg = split->in_sg0; + out_sg = split->out_sg; + for (j = 0; j < split->nents; j++, out_sg++) { + sg_dma_address(out_sg) = sg_dma_address(in_sg); + sg_dma_len(out_sg) = sg_dma_len(in_sg); + if (!j) { + sg_dma_address(out_sg) += split->skip_sg0; + sg_dma_len(out_sg) -= split->skip_sg0; + } + in_sg = sg_next(in_sg); + } + sg_dma_len(--out_sg) = split->length_last_sg; + } +} + +/** + * sg_split - split a scatterlist into several scatterlists + * @in: the input sg list + * @in_mapped_nents: the result of a dma_map_sg(in, ...), or 0 if not mapped. + * @skip: the number of bytes to skip in the input sg list + * @nb_splits: the number of desired sg outputs + * @split_sizes: the respective size of each output sg list in bytes + * @out: an array where to store the allocated output sg lists + * @out_mapped_nents: the resulting sg lists mapped number of sg entries. Might + * be NULL if sglist not already mapped (in_mapped_nents = 0) + * @gfp_mask: the allocation flag + * + * This function splits the input sg list into nb_splits sg lists, which are + * allocated and stored into out. + * The @in is split into : + * - @out[0], which covers bytes [@skip .. @skip + @split_sizes[0] - 1] of @in + * - @out[1], which covers bytes [@skip + split_sizes[0] .. + * @skip + @split_sizes[0] + @split_sizes[1] -1] + * etc ... + * It will be the caller's duty to kfree() out array members. + * + * Returns 0 upon success, or error code + */ +int sg_split(struct scatterlist *in, const int in_mapped_nents, + const off_t skip, const int nb_splits, + const size_t *split_sizes, + struct scatterlist **out, int *out_mapped_nents, + gfp_t gfp_mask) +{ + int i, ret; + struct sg_splitter *splitters; + + splitters = kcalloc(nb_splits, sizeof(*splitters), gfp_mask); + if (!splitters) + return -ENOMEM; + + ret = sg_calculate_split(in, sg_nents(in), nb_splits, skip, split_sizes, + splitters, false); + if (ret < 0) + goto err; + + ret = -ENOMEM; + for (i = 0; i < nb_splits; i++) { + splitters[i].out_sg = kmalloc_array(splitters[i].nents, + sizeof(struct scatterlist), + gfp_mask); + if (!splitters[i].out_sg) + goto err; + } + + /* + * The order of these 3 calls is important and should be kept. + */ + sg_split_phys(splitters, nb_splits); + ret = sg_calculate_split(in, in_mapped_nents, nb_splits, skip, + split_sizes, splitters, true); + if (ret < 0) + goto err; + sg_split_mapped(splitters, nb_splits); + + for (i = 0; i < nb_splits; i++) { + out[i] = splitters[i].out_sg; + if (out_mapped_nents) + out_mapped_nents[i] = splitters[i].nents; + } + + kfree(splitters); + return 0; + +err: + for (i = 0; i < nb_splits; i++) + kfree(splitters[i].out_sg); + kfree(splitters); + return ret; +} +EXPORT_SYMBOL(sg_split); -- cgit v0.10.2