aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-09-02 16:22:38 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-09-02 16:22:38 -0400
commitd975f309a8b250e67b66eabeb56be6989c783629 (patch)
tree846cae85891024f7c45416d085df90d98ec2e51b
parent52b084d31cbc8e90cb6fc1ac4061d9a24375c89d (diff)
parentf8bcbe62acd0e1ce9004b83e98a4af87ae385dcf (diff)
Merge branch 'for-4.3/sg' of git://git.kernel.dk/linux-block
Pull SG updates from Jens Axboe: "This contains a set of scatter-gather related changes/fixes for 4.3: - Add support for limited chaining of sg tables even for architectures that do not set ARCH_HAS_SG_CHAIN. From Christoph. - Add sg chain support to target_rd. From Christoph. - Fixup open coded sg->page_link in crypto/omap-sham. From Christoph. - Fixup open coded crypto ->page_link manipulation. From Dan. - Also from Dan, automated fixup of manual sg_unmark_end() manipulations. - Also from Dan, automated fixup of open coded sg_phys() implementations. - From Robert Jarzmik, addition of an sg table splitting helper that drivers can use" * 'for-4.3/sg' of git://git.kernel.dk/linux-block: lib: scatterlist: add sg splitting function scatterlist: use sg_phys() crypto/omap-sham: remove an open coded access to ->page_link scatterlist: remove open coded sg_unmark_end instances crypto: replace scatterwalk_sg_chain with sg_chain target/rd: always chain S/G list scatterlist: allow limited chaining without ARCH_HAS_SG_CHAIN
-rw-r--r--arch/arm/mm/dma-mapping.c2
-rw-r--r--arch/microblaze/kernel/dma.c3
-rw-r--r--block/blk-merge.c2
-rw-r--r--crypto/algif_skcipher.c2
-rw-r--r--crypto/gcm.c4
-rw-r--r--drivers/crypto/bfin_crc.c3
-rw-r--r--drivers/crypto/omap-sham.c2
-rw-r--r--drivers/crypto/qce/sha.c2
-rw-r--r--drivers/crypto/sahara.c2
-rw-r--r--drivers/crypto/talitos.c2
-rw-r--r--drivers/iommu/intel-iommu.c4
-rw-r--r--drivers/iommu/iommu.c2
-rw-r--r--drivers/mmc/card/queue.c4
-rw-r--r--drivers/staging/android/ion/ion_chunk_heap.c4
-rw-r--r--drivers/target/target_core_rd.c44
-rw-r--r--include/crypto/scatterwalk.h10
-rw-r--r--include/linux/scatterlist.h9
-rw-r--r--lib/Kconfig7
-rw-r--r--lib/Makefile1
-rw-r--r--lib/scatterlist.c4
-rw-r--r--lib/sg_split.c202
21 files changed, 234 insertions, 81 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index cba12f34ff77..3d3d6aa60c87 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1520,7 +1520,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
1520 return -ENOMEM; 1520 return -ENOMEM;
1521 1521
1522 for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) { 1522 for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
1523 phys_addr_t phys = page_to_phys(sg_page(s)); 1523 phys_addr_t phys = sg_phys(s) & PAGE_MASK;
1524 unsigned int len = PAGE_ALIGN(s->offset + s->length); 1524 unsigned int len = PAGE_ALIGN(s->offset + s->length);
1525 1525
1526 if (!is_coherent && 1526 if (!is_coherent &&
diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c
index bf4dec229437..c89da6312954 100644
--- a/arch/microblaze/kernel/dma.c
+++ b/arch/microblaze/kernel/dma.c
@@ -61,8 +61,7 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
61 /* FIXME this part of code is untested */ 61 /* FIXME this part of code is untested */
62 for_each_sg(sgl, sg, nents, i) { 62 for_each_sg(sgl, sg, nents, i) {
63 sg->dma_address = sg_phys(sg); 63 sg->dma_address = sg_phys(sg);
64 __dma_sync(page_to_phys(sg_page(sg)) + sg->offset, 64 __dma_sync(sg_phys(sg), sg->length, direction);
65 sg->length, direction);
66 } 65 }
67 66
68 return nents; 67 return nents;
diff --git a/block/blk-merge.c b/block/blk-merge.c
index b2625271a572..d088cffb8105 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -393,7 +393,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
393 if (rq->cmd_flags & REQ_WRITE) 393 if (rq->cmd_flags & REQ_WRITE)
394 memset(q->dma_drain_buffer, 0, q->dma_drain_size); 394 memset(q->dma_drain_buffer, 0, q->dma_drain_size);
395 395
396 sg->page_link &= ~0x02; 396 sg_unmark_end(sg);
397 sg = sg_next(sg); 397 sg = sg_next(sg);
398 sg_set_page(sg, virt_to_page(q->dma_drain_buffer), 398 sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
399 q->dma_drain_size, 399 q->dma_drain_size,
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index 945075292bc9..af31a0ee4057 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -145,7 +145,7 @@ static int skcipher_alloc_sgl(struct sock *sk)
145 sgl->cur = 0; 145 sgl->cur = 0;
146 146
147 if (sg) 147 if (sg)
148 scatterwalk_sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg); 148 sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg);
149 149
150 list_add_tail(&sgl->list, &ctx->tsgl); 150 list_add_tail(&sgl->list, &ctx->tsgl);
151 } 151 }
diff --git a/crypto/gcm.c b/crypto/gcm.c
index ddb4f29b2fe6..bec329b3de8d 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -206,14 +206,14 @@ static void crypto_gcm_init_common(struct aead_request *req)
206 sg_set_buf(pctx->src, pctx->auth_tag, sizeof(pctx->auth_tag)); 206 sg_set_buf(pctx->src, pctx->auth_tag, sizeof(pctx->auth_tag));
207 sg = scatterwalk_ffwd(pctx->src + 1, req->src, req->assoclen); 207 sg = scatterwalk_ffwd(pctx->src + 1, req->src, req->assoclen);
208 if (sg != pctx->src + 1) 208 if (sg != pctx->src + 1)
209 scatterwalk_sg_chain(pctx->src, 2, sg); 209 sg_chain(pctx->src, 2, sg);
210 210
211 if (req->src != req->dst) { 211 if (req->src != req->dst) {
212 sg_init_table(pctx->dst, 3); 212 sg_init_table(pctx->dst, 3);
213 sg_set_buf(pctx->dst, pctx->auth_tag, sizeof(pctx->auth_tag)); 213 sg_set_buf(pctx->dst, pctx->auth_tag, sizeof(pctx->auth_tag));
214 sg = scatterwalk_ffwd(pctx->dst + 1, req->dst, req->assoclen); 214 sg = scatterwalk_ffwd(pctx->dst + 1, req->dst, req->assoclen);
215 if (sg != pctx->dst + 1) 215 if (sg != pctx->dst + 1)
216 scatterwalk_sg_chain(pctx->dst, 2, sg); 216 sg_chain(pctx->dst, 2, sg);
217 } 217 }
218} 218}
219 219
diff --git a/drivers/crypto/bfin_crc.c b/drivers/crypto/bfin_crc.c
index d9af9403ab6c..2f0b3337505d 100644
--- a/drivers/crypto/bfin_crc.c
+++ b/drivers/crypto/bfin_crc.c
@@ -370,8 +370,7 @@ static int bfin_crypto_crc_handle_queue(struct bfin_crypto_crc *crc,
370 sg_init_table(ctx->bufsl, nsg); 370 sg_init_table(ctx->bufsl, nsg);
371 sg_set_buf(ctx->bufsl, ctx->buflast, ctx->buflast_len); 371 sg_set_buf(ctx->bufsl, ctx->buflast, ctx->buflast_len);
372 if (nsg > 1) 372 if (nsg > 1)
373 scatterwalk_sg_chain(ctx->bufsl, nsg, 373 sg_chain(ctx->bufsl, nsg, req->src);
374 req->src);
375 ctx->sg = ctx->bufsl; 374 ctx->sg = ctx->bufsl;
376 } else 375 } else
377 ctx->sg = req->src; 376 ctx->sg = req->src;
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index b2024c95a3cf..48adb2a0903e 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -588,7 +588,7 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr,
588 * the dmaengine may try to DMA the incorrect amount of data. 588 * the dmaengine may try to DMA the incorrect amount of data.
589 */ 589 */
590 sg_init_table(&ctx->sgl, 1); 590 sg_init_table(&ctx->sgl, 1);
591 ctx->sgl.page_link = ctx->sg->page_link; 591 sg_assign_page(&ctx->sgl, sg_page(ctx->sg));
592 ctx->sgl.offset = ctx->sg->offset; 592 ctx->sgl.offset = ctx->sg->offset;
593 sg_dma_len(&ctx->sgl) = len32; 593 sg_dma_len(&ctx->sgl) = len32;
594 sg_dma_address(&ctx->sgl) = sg_dma_address(ctx->sg); 594 sg_dma_address(&ctx->sgl) = sg_dma_address(ctx->sg);
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index 5c5df1d17f90..be2f5049256a 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -296,7 +296,7 @@ static int qce_ahash_update(struct ahash_request *req)
296 if (rctx->buflen) { 296 if (rctx->buflen) {
297 sg_init_table(rctx->sg, 2); 297 sg_init_table(rctx->sg, 2);
298 sg_set_buf(rctx->sg, rctx->tmpbuf, rctx->buflen); 298 sg_set_buf(rctx->sg, rctx->tmpbuf, rctx->buflen);
299 scatterwalk_sg_chain(rctx->sg, 2, req->src); 299 sg_chain(rctx->sg, 2, req->src);
300 req->src = rctx->sg; 300 req->src = rctx->sg;
301 } 301 }
302 302
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 1c19e44c3146..820dc3acb28c 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -999,7 +999,7 @@ static int sahara_sha_prepare_request(struct ahash_request *req)
999 sg_init_table(rctx->in_sg_chain, 2); 999 sg_init_table(rctx->in_sg_chain, 2);
1000 sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt); 1000 sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt);
1001 1001
1002 scatterwalk_sg_chain(rctx->in_sg_chain, 2, req->src); 1002 sg_chain(rctx->in_sg_chain, 2, req->src);
1003 1003
1004 rctx->total = req->nbytes + rctx->buf_cnt; 1004 rctx->total = req->nbytes + rctx->buf_cnt;
1005 rctx->in_sg = rctx->in_sg_chain; 1005 rctx->in_sg = rctx->in_sg_chain;
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index cd774534d987..3b20a1bce703 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -1929,7 +1929,7 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1929 sg_init_table(req_ctx->bufsl, nsg); 1929 sg_init_table(req_ctx->bufsl, nsg);
1930 sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf); 1930 sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf);
1931 if (nsg > 1) 1931 if (nsg > 1)
1932 scatterwalk_sg_chain(req_ctx->bufsl, 2, areq->src); 1932 sg_chain(req_ctx->bufsl, 2, areq->src);
1933 req_ctx->psrc = req_ctx->bufsl; 1933 req_ctx->psrc = req_ctx->bufsl;
1934 } else 1934 } else
1935 req_ctx->psrc = areq->src; 1935 req_ctx->psrc = areq->src;
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 697291aceea7..c82ebee6c7e5 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2103,7 +2103,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2103 sg_res = aligned_nrpages(sg->offset, sg->length); 2103 sg_res = aligned_nrpages(sg->offset, sg->length);
2104 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset; 2104 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
2105 sg->dma_length = sg->length; 2105 sg->dma_length = sg->length;
2106 pteval = page_to_phys(sg_page(sg)) | prot; 2106 pteval = (sg_phys(sg) & PAGE_MASK) | prot;
2107 phys_pfn = pteval >> VTD_PAGE_SHIFT; 2107 phys_pfn = pteval >> VTD_PAGE_SHIFT;
2108 } 2108 }
2109 2109
@@ -3631,7 +3631,7 @@ static int intel_nontranslate_map_sg(struct device *hddev,
3631 3631
3632 for_each_sg(sglist, sg, nelems, i) { 3632 for_each_sg(sglist, sg, nelems, i) {
3633 BUG_ON(!sg_page(sg)); 3633 BUG_ON(!sg_page(sg));
3634 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset; 3634 sg->dma_address = sg_phys(sg);
3635 sg->dma_length = sg->length; 3635 sg->dma_length = sg->length;
3636 } 3636 }
3637 return nelems; 3637 return nelems;
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index f286090931cc..049df495c274 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -1408,7 +1408,7 @@ size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
1408 min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap); 1408 min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
1409 1409
1410 for_each_sg(sg, s, nents, i) { 1410 for_each_sg(sg, s, nents, i) {
1411 phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset; 1411 phys_addr_t phys = sg_phys(s);
1412 1412
1413 /* 1413 /*
1414 * We are mapping on IOMMU page boundaries, so offset within 1414 * We are mapping on IOMMU page boundaries, so offset within
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 5daf302835b1..6f4323c6d653 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -467,7 +467,7 @@ static unsigned int mmc_queue_packed_map_sg(struct mmc_queue *mq,
467 sg_set_buf(__sg, buf + offset, len); 467 sg_set_buf(__sg, buf + offset, len);
468 offset += len; 468 offset += len;
469 remain -= len; 469 remain -= len;
470 (__sg++)->page_link &= ~0x02; 470 sg_unmark_end(__sg++);
471 sg_len++; 471 sg_len++;
472 } while (remain); 472 } while (remain);
473 } 473 }
@@ -475,7 +475,7 @@ static unsigned int mmc_queue_packed_map_sg(struct mmc_queue *mq,
475 list_for_each_entry(req, &packed->list, queuelist) { 475 list_for_each_entry(req, &packed->list, queuelist) {
476 sg_len += blk_rq_map_sg(mq->queue, req, __sg); 476 sg_len += blk_rq_map_sg(mq->queue, req, __sg);
477 __sg = sg + (sg_len - 1); 477 __sg = sg + (sg_len - 1);
478 (__sg++)->page_link &= ~0x02; 478 sg_unmark_end(__sg++);
479 } 479 }
480 sg_mark_end(sg + (sg_len - 1)); 480 sg_mark_end(sg + (sg_len - 1));
481 return sg_len; 481 return sg_len;
diff --git a/drivers/staging/android/ion/ion_chunk_heap.c b/drivers/staging/android/ion/ion_chunk_heap.c
index 0813163f962f..195c41d7bd53 100644
--- a/drivers/staging/android/ion/ion_chunk_heap.c
+++ b/drivers/staging/android/ion/ion_chunk_heap.c
@@ -81,7 +81,7 @@ static int ion_chunk_heap_allocate(struct ion_heap *heap,
81err: 81err:
82 sg = table->sgl; 82 sg = table->sgl;
83 for (i -= 1; i >= 0; i--) { 83 for (i -= 1; i >= 0; i--) {
84 gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)), 84 gen_pool_free(chunk_heap->pool, sg_phys(sg) & PAGE_MASK,
85 sg->length); 85 sg->length);
86 sg = sg_next(sg); 86 sg = sg_next(sg);
87 } 87 }
@@ -109,7 +109,7 @@ static void ion_chunk_heap_free(struct ion_buffer *buffer)
109 DMA_BIDIRECTIONAL); 109 DMA_BIDIRECTIONAL);
110 110
111 for_each_sg(table->sgl, sg, table->nents, i) { 111 for_each_sg(table->sgl, sg, table->nents, i) {
112 gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)), 112 gen_pool_free(chunk_heap->pool, sg_phys(sg) & PAGE_MASK,
113 sg->length); 113 sg->length);
114 } 114 }
115 chunk_heap->allocated -= allocated_size; 115 chunk_heap->allocated -= allocated_size;
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 384cf8894411..47a833f3a145 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -138,16 +138,12 @@ static int rd_allocate_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *
138 sg_per_table = (total_sg_needed > max_sg_per_table) ? 138 sg_per_table = (total_sg_needed > max_sg_per_table) ?
139 max_sg_per_table : total_sg_needed; 139 max_sg_per_table : total_sg_needed;
140 140
141#ifdef CONFIG_ARCH_HAS_SG_CHAIN
142
143 /* 141 /*
144 * Reserve extra element for chain entry 142 * Reserve extra element for chain entry
145 */ 143 */
146 if (sg_per_table < total_sg_needed) 144 if (sg_per_table < total_sg_needed)
147 chain_entry = 1; 145 chain_entry = 1;
148 146
149#endif /* CONFIG_ARCH_HAS_SG_CHAIN */
150
151 sg = kcalloc(sg_per_table + chain_entry, sizeof(*sg), 147 sg = kcalloc(sg_per_table + chain_entry, sizeof(*sg),
152 GFP_KERNEL); 148 GFP_KERNEL);
153 if (!sg) { 149 if (!sg) {
@@ -158,15 +154,11 @@ static int rd_allocate_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *
158 154
159 sg_init_table(sg, sg_per_table + chain_entry); 155 sg_init_table(sg, sg_per_table + chain_entry);
160 156
161#ifdef CONFIG_ARCH_HAS_SG_CHAIN
162
163 if (i > 0) { 157 if (i > 0) {
164 sg_chain(sg_table[i - 1].sg_table, 158 sg_chain(sg_table[i - 1].sg_table,
165 max_sg_per_table + 1, sg); 159 max_sg_per_table + 1, sg);
166 } 160 }
167 161
168#endif /* CONFIG_ARCH_HAS_SG_CHAIN */
169
170 sg_table[i].sg_table = sg; 162 sg_table[i].sg_table = sg;
171 sg_table[i].rd_sg_count = sg_per_table; 163 sg_table[i].rd_sg_count = sg_per_table;
172 sg_table[i].page_start_offset = page_offset; 164 sg_table[i].page_start_offset = page_offset;
@@ -430,42 +422,6 @@ static sense_reason_t rd_do_prot_rw(struct se_cmd *cmd, bool is_read)
430 prot_sg = &prot_table->sg_table[prot_page - 422 prot_sg = &prot_table->sg_table[prot_page -
431 prot_table->page_start_offset]; 423 prot_table->page_start_offset];
432 424
433#ifndef CONFIG_ARCH_HAS_SG_CHAIN
434
435 prot_npages = DIV_ROUND_UP(prot_offset + sectors * se_dev->prot_length,
436 PAGE_SIZE);
437
438 /*
439 * Allocate temporaly contiguous scatterlist entries if prot pages
440 * straddles multiple scatterlist tables.
441 */
442 if (prot_table->page_end_offset < prot_page + prot_npages - 1) {
443 int i;
444
445 prot_sg = kcalloc(prot_npages, sizeof(*prot_sg), GFP_KERNEL);
446 if (!prot_sg)
447 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
448
449 need_to_release = true;
450 sg_init_table(prot_sg, prot_npages);
451
452 for (i = 0; i < prot_npages; i++) {
453 if (prot_page + i > prot_table->page_end_offset) {
454 prot_table = rd_get_prot_table(dev,
455 prot_page + i);
456 if (!prot_table) {
457 kfree(prot_sg);
458 return rc;
459 }
460 sg_unmark_end(&prot_sg[i - 1]);
461 }
462 prot_sg[i] = prot_table->sg_table[prot_page + i -
463 prot_table->page_start_offset];
464 }
465 }
466
467#endif /* !CONFIG_ARCH_HAS_SG_CHAIN */
468
469 if (is_read) 425 if (is_read)
470 rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, 0, 426 rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, 0,
471 prot_sg, prot_offset); 427 prot_sg, prot_offset);
diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h
index 96670e7e7c14..35f99b68d037 100644
--- a/include/crypto/scatterwalk.h
+++ b/include/crypto/scatterwalk.h
@@ -25,14 +25,6 @@
25#include <linux/scatterlist.h> 25#include <linux/scatterlist.h>
26#include <linux/sched.h> 26#include <linux/sched.h>
27 27
28static inline void scatterwalk_sg_chain(struct scatterlist *sg1, int num,
29 struct scatterlist *sg2)
30{
31 sg_set_page(&sg1[num - 1], (void *)sg2, 0, 0);
32 sg1[num - 1].page_link &= ~0x02;
33 sg1[num - 1].page_link |= 0x01;
34}
35
36static inline void scatterwalk_crypto_chain(struct scatterlist *head, 28static inline void scatterwalk_crypto_chain(struct scatterlist *head,
37 struct scatterlist *sg, 29 struct scatterlist *sg,
38 int chain, int num) 30 int chain, int num)
@@ -43,7 +35,7 @@ static inline void scatterwalk_crypto_chain(struct scatterlist *head,
43 } 35 }
44 36
45 if (sg) 37 if (sg)
46 scatterwalk_sg_chain(head, num, sg); 38 sg_chain(head, num, sg);
47 else 39 else
48 sg_mark_end(head); 40 sg_mark_end(head);
49} 41}
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index 9b1ef0c820a7..556ec1ea2574 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -161,10 +161,6 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
161static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents, 161static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents,
162 struct scatterlist *sgl) 162 struct scatterlist *sgl)
163{ 163{
164#ifndef CONFIG_ARCH_HAS_SG_CHAIN
165 BUG();
166#endif
167
168 /* 164 /*
169 * offset and length are unused for chain entry. Clear them. 165 * offset and length are unused for chain entry. Clear them.
170 */ 166 */
@@ -251,6 +247,11 @@ struct scatterlist *sg_next(struct scatterlist *);
251struct scatterlist *sg_last(struct scatterlist *s, unsigned int); 247struct scatterlist *sg_last(struct scatterlist *s, unsigned int);
252void sg_init_table(struct scatterlist *, unsigned int); 248void sg_init_table(struct scatterlist *, unsigned int);
253void sg_init_one(struct scatterlist *, const void *, unsigned int); 249void sg_init_one(struct scatterlist *, const void *, unsigned int);
250int sg_split(struct scatterlist *in, const int in_mapped_nents,
251 const off_t skip, const int nb_splits,
252 const size_t *split_sizes,
253 struct scatterlist **out, int *out_mapped_nents,
254 gfp_t gfp_mask);
254 255
255typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t); 256typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t);
256typedef void (sg_free_fn)(struct scatterlist *, unsigned int); 257typedef void (sg_free_fn)(struct scatterlist *, unsigned int);
diff --git a/lib/Kconfig b/lib/Kconfig
index 3a2ef67db6c7..dc516164415a 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -521,6 +521,13 @@ config UCS2_STRING
521 521
522source "lib/fonts/Kconfig" 522source "lib/fonts/Kconfig"
523 523
524config SG_SPLIT
525 def_bool n
526 help
527 Provides a heler to split scatterlists into chunks, each chunk being a
528 scatterlist. This should be selected by a driver or an API which
529 whishes to split a scatterlist amongst multiple DMA channel.
530
524# 531#
525# sg chaining option 532# sg chaining option
526# 533#
diff --git a/lib/Makefile b/lib/Makefile
index 6897b527581a..2ee6ea2e9b08 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -160,6 +160,7 @@ obj-$(CONFIG_GENERIC_STRNLEN_USER) += strnlen_user.o
160 160
161obj-$(CONFIG_GENERIC_NET_UTILS) += net_utils.o 161obj-$(CONFIG_GENERIC_NET_UTILS) += net_utils.o
162 162
163obj-$(CONFIG_SG_SPLIT) += sg_split.o
163obj-$(CONFIG_STMP_DEVICE) += stmp_device.o 164obj-$(CONFIG_STMP_DEVICE) += stmp_device.o
164 165
165libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o \ 166libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o \
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index d105a9f56878..bafa9933fa76 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -105,16 +105,12 @@ EXPORT_SYMBOL(sg_nents_for_len);
105 **/ 105 **/
106struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents) 106struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
107{ 107{
108#ifndef CONFIG_ARCH_HAS_SG_CHAIN
109 struct scatterlist *ret = &sgl[nents - 1];
110#else
111 struct scatterlist *sg, *ret = NULL; 108 struct scatterlist *sg, *ret = NULL;
112 unsigned int i; 109 unsigned int i;
113 110
114 for_each_sg(sgl, sg, nents, i) 111 for_each_sg(sgl, sg, nents, i)
115 ret = sg; 112 ret = sg;
116 113
117#endif
118#ifdef CONFIG_DEBUG_SG 114#ifdef CONFIG_DEBUG_SG
119 BUG_ON(sgl[0].sg_magic != SG_MAGIC); 115 BUG_ON(sgl[0].sg_magic != SG_MAGIC);
120 BUG_ON(!sg_is_last(ret)); 116 BUG_ON(!sg_is_last(ret));
diff --git a/lib/sg_split.c b/lib/sg_split.c
new file mode 100644
index 000000000000..b063410c3593
--- /dev/null
+++ b/lib/sg_split.c
@@ -0,0 +1,202 @@
1/*
2 * Copyright (C) 2015 Robert Jarzmik <robert.jarzmik@free.fr>
3 *
4 * Scatterlist splitting helpers.
5 *
6 * This source code is licensed under the GNU General Public License,
7 * Version 2. See the file COPYING for more details.
8 */
9
10#include <linux/scatterlist.h>
11#include <linux/slab.h>
12
13struct sg_splitter {
14 struct scatterlist *in_sg0;
15 int nents;
16 off_t skip_sg0;
17 unsigned int length_last_sg;
18
19 struct scatterlist *out_sg;
20};
21
22static int sg_calculate_split(struct scatterlist *in, int nents, int nb_splits,
23 off_t skip, const size_t *sizes,
24 struct sg_splitter *splitters, bool mapped)
25{
26 int i;
27 unsigned int sglen;
28 size_t size = sizes[0], len;
29 struct sg_splitter *curr = splitters;
30 struct scatterlist *sg;
31
32 for (i = 0; i < nb_splits; i++) {
33 splitters[i].in_sg0 = NULL;
34 splitters[i].nents = 0;
35 }
36
37 for_each_sg(in, sg, nents, i) {
38 sglen = mapped ? sg_dma_len(sg) : sg->length;
39 if (skip > sglen) {
40 skip -= sglen;
41 continue;
42 }
43
44 len = min_t(size_t, size, sglen - skip);
45 if (!curr->in_sg0) {
46 curr->in_sg0 = sg;
47 curr->skip_sg0 = skip;
48 }
49 size -= len;
50 curr->nents++;
51 curr->length_last_sg = len;
52
53 while (!size && (skip + len < sglen) && (--nb_splits > 0)) {
54 curr++;
55 size = *(++sizes);
56 skip += len;
57 len = min_t(size_t, size, sglen - skip);
58
59 curr->in_sg0 = sg;
60 curr->skip_sg0 = skip;
61 curr->nents = 1;
62 curr->length_last_sg = len;
63 size -= len;
64 }
65 skip = 0;
66
67 if (!size && --nb_splits > 0) {
68 curr++;
69 size = *(++sizes);
70 }
71
72 if (!nb_splits)
73 break;
74 }
75
76 return (size || !splitters[0].in_sg0) ? -EINVAL : 0;
77}
78
79static void sg_split_phys(struct sg_splitter *splitters, const int nb_splits)
80{
81 int i, j;
82 struct scatterlist *in_sg, *out_sg;
83 struct sg_splitter *split;
84
85 for (i = 0, split = splitters; i < nb_splits; i++, split++) {
86 in_sg = split->in_sg0;
87 out_sg = split->out_sg;
88 for (j = 0; j < split->nents; j++, out_sg++) {
89 *out_sg = *in_sg;
90 if (!j) {
91 out_sg->offset += split->skip_sg0;
92 out_sg->length -= split->skip_sg0;
93 } else {
94 out_sg->offset = 0;
95 }
96 sg_dma_address(out_sg) = 0;
97 sg_dma_len(out_sg) = 0;
98 in_sg = sg_next(in_sg);
99 }
100 out_sg[-1].length = split->length_last_sg;
101 sg_mark_end(out_sg - 1);
102 }
103}
104
105static void sg_split_mapped(struct sg_splitter *splitters, const int nb_splits)
106{
107 int i, j;
108 struct scatterlist *in_sg, *out_sg;
109 struct sg_splitter *split;
110
111 for (i = 0, split = splitters; i < nb_splits; i++, split++) {
112 in_sg = split->in_sg0;
113 out_sg = split->out_sg;
114 for (j = 0; j < split->nents; j++, out_sg++) {
115 sg_dma_address(out_sg) = sg_dma_address(in_sg);
116 sg_dma_len(out_sg) = sg_dma_len(in_sg);
117 if (!j) {
118 sg_dma_address(out_sg) += split->skip_sg0;
119 sg_dma_len(out_sg) -= split->skip_sg0;
120 }
121 in_sg = sg_next(in_sg);
122 }
123 sg_dma_len(--out_sg) = split->length_last_sg;
124 }
125}
126
127/**
128 * sg_split - split a scatterlist into several scatterlists
129 * @in: the input sg list
130 * @in_mapped_nents: the result of a dma_map_sg(in, ...), or 0 if not mapped.
131 * @skip: the number of bytes to skip in the input sg list
132 * @nb_splits: the number of desired sg outputs
133 * @split_sizes: the respective size of each output sg list in bytes
134 * @out: an array where to store the allocated output sg lists
135 * @out_mapped_nents: the resulting sg lists mapped number of sg entries. Might
136 * be NULL if sglist not already mapped (in_mapped_nents = 0)
137 * @gfp_mask: the allocation flag
138 *
139 * This function splits the input sg list into nb_splits sg lists, which are
140 * allocated and stored into out.
141 * The @in is split into :
142 * - @out[0], which covers bytes [@skip .. @skip + @split_sizes[0] - 1] of @in
143 * - @out[1], which covers bytes [@skip + split_sizes[0] ..
144 * @skip + @split_sizes[0] + @split_sizes[1] -1]
145 * etc ...
146 * It will be the caller's duty to kfree() out array members.
147 *
148 * Returns 0 upon success, or error code
149 */
150int sg_split(struct scatterlist *in, const int in_mapped_nents,
151 const off_t skip, const int nb_splits,
152 const size_t *split_sizes,
153 struct scatterlist **out, int *out_mapped_nents,
154 gfp_t gfp_mask)
155{
156 int i, ret;
157 struct sg_splitter *splitters;
158
159 splitters = kcalloc(nb_splits, sizeof(*splitters), gfp_mask);
160 if (!splitters)
161 return -ENOMEM;
162
163 ret = sg_calculate_split(in, sg_nents(in), nb_splits, skip, split_sizes,
164 splitters, false);
165 if (ret < 0)
166 goto err;
167
168 ret = -ENOMEM;
169 for (i = 0; i < nb_splits; i++) {
170 splitters[i].out_sg = kmalloc_array(splitters[i].nents,
171 sizeof(struct scatterlist),
172 gfp_mask);
173 if (!splitters[i].out_sg)
174 goto err;
175 }
176
177 /*
178 * The order of these 3 calls is important and should be kept.
179 */
180 sg_split_phys(splitters, nb_splits);
181 ret = sg_calculate_split(in, in_mapped_nents, nb_splits, skip,
182 split_sizes, splitters, true);
183 if (ret < 0)
184 goto err;
185 sg_split_mapped(splitters, nb_splits);
186
187 for (i = 0; i < nb_splits; i++) {
188 out[i] = splitters[i].out_sg;
189 if (out_mapped_nents)
190 out_mapped_nents[i] = splitters[i].nents;
191 }
192
193 kfree(splitters);
194 return 0;
195
196err:
197 for (i = 0; i < nb_splits; i++)
198 kfree(splitters[i].out_sg);
199 kfree(splitters);
200 return ret;
201}
202EXPORT_SYMBOL(sg_split);