aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLABBE Corentin <clabbe.montjoie@gmail.com>2015-09-23 07:55:27 -0400
committerHerbert Xu <herbert@gondor.apana.org.au>2015-10-01 09:56:56 -0400
commit13fb8fd7a81923f7a64b4e688fe0bdaf1ea26adf (patch)
tree87b60d1c96dddfa7c70476cdbef649ab6e7ade2d
parentb8a011d41c988aeb9aa199eb80ccd5179b5940c1 (diff)
crypto: caam - dma_map_sg can handle chained SG
The caam driver use two dma_map_sg path according to SG are chained or not. Since dma_map_sg can handle both case, clean the code with all references to sg chained. Thus removing dma_map_sg_chained, dma_unmap_sg_chained and __sg_count functions. Signed-off-by: LABBE Corentin <clabbe.montjoie@gmail.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
-rw-r--r--drivers/crypto/caam/caamalg.c94
-rw-r--r--drivers/crypto/caam/caamhash.c55
-rw-r--r--drivers/crypto/caam/sg_sw_sec4.h72
3 files changed, 53 insertions, 168 deletions
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index ba79d638f782..ad0d1ec51dd5 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -1708,11 +1708,8 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
1708/* 1708/*
1709 * aead_edesc - s/w-extended aead descriptor 1709 * aead_edesc - s/w-extended aead descriptor
1710 * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist 1710 * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
1711 * @assoc_chained: if source is chained
1712 * @src_nents: number of segments in input scatterlist 1711 * @src_nents: number of segments in input scatterlist
1713 * @src_chained: if source is chained
1714 * @dst_nents: number of segments in output scatterlist 1712 * @dst_nents: number of segments in output scatterlist
1715 * @dst_chained: if destination is chained
1716 * @iv_dma: dma address of iv for checking continuity and link table 1713 * @iv_dma: dma address of iv for checking continuity and link table
1717 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE) 1714 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
1718 * @sec4_sg_bytes: length of dma mapped sec4_sg space 1715 * @sec4_sg_bytes: length of dma mapped sec4_sg space
@@ -1721,11 +1718,8 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
1721 */ 1718 */
1722struct aead_edesc { 1719struct aead_edesc {
1723 int assoc_nents; 1720 int assoc_nents;
1724 bool assoc_chained;
1725 int src_nents; 1721 int src_nents;
1726 bool src_chained;
1727 int dst_nents; 1722 int dst_nents;
1728 bool dst_chained;
1729 dma_addr_t iv_dma; 1723 dma_addr_t iv_dma;
1730 int sec4_sg_bytes; 1724 int sec4_sg_bytes;
1731 dma_addr_t sec4_sg_dma; 1725 dma_addr_t sec4_sg_dma;
@@ -1736,9 +1730,7 @@ struct aead_edesc {
1736/* 1730/*
1737 * ablkcipher_edesc - s/w-extended ablkcipher descriptor 1731 * ablkcipher_edesc - s/w-extended ablkcipher descriptor
1738 * @src_nents: number of segments in input scatterlist 1732 * @src_nents: number of segments in input scatterlist
1739 * @src_chained: if source is chained
1740 * @dst_nents: number of segments in output scatterlist 1733 * @dst_nents: number of segments in output scatterlist
1741 * @dst_chained: if destination is chained
1742 * @iv_dma: dma address of iv for checking continuity and link table 1734 * @iv_dma: dma address of iv for checking continuity and link table
1743 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE) 1735 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
1744 * @sec4_sg_bytes: length of dma mapped sec4_sg space 1736 * @sec4_sg_bytes: length of dma mapped sec4_sg space
@@ -1747,9 +1739,7 @@ struct aead_edesc {
1747 */ 1739 */
1748struct ablkcipher_edesc { 1740struct ablkcipher_edesc {
1749 int src_nents; 1741 int src_nents;
1750 bool src_chained;
1751 int dst_nents; 1742 int dst_nents;
1752 bool dst_chained;
1753 dma_addr_t iv_dma; 1743 dma_addr_t iv_dma;
1754 int sec4_sg_bytes; 1744 int sec4_sg_bytes;
1755 dma_addr_t sec4_sg_dma; 1745 dma_addr_t sec4_sg_dma;
@@ -1759,18 +1749,15 @@ struct ablkcipher_edesc {
1759 1749
1760static void caam_unmap(struct device *dev, struct scatterlist *src, 1750static void caam_unmap(struct device *dev, struct scatterlist *src,
1761 struct scatterlist *dst, int src_nents, 1751 struct scatterlist *dst, int src_nents,
1762 bool src_chained, int dst_nents, bool dst_chained, 1752 int dst_nents,
1763 dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma, 1753 dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
1764 int sec4_sg_bytes) 1754 int sec4_sg_bytes)
1765{ 1755{
1766 if (dst != src) { 1756 if (dst != src) {
1767 dma_unmap_sg_chained(dev, src, src_nents ? : 1, DMA_TO_DEVICE, 1757 dma_unmap_sg(dev, src, src_nents ? : 1, DMA_TO_DEVICE);
1768 src_chained); 1758 dma_unmap_sg(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE);
1769 dma_unmap_sg_chained(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE,
1770 dst_chained);
1771 } else { 1759 } else {
1772 dma_unmap_sg_chained(dev, src, src_nents ? : 1, 1760 dma_unmap_sg(dev, src, src_nents ? : 1, DMA_BIDIRECTIONAL);
1773 DMA_BIDIRECTIONAL, src_chained);
1774 } 1761 }
1775 1762
1776 if (iv_dma) 1763 if (iv_dma)
@@ -1785,8 +1772,7 @@ static void aead_unmap(struct device *dev,
1785 struct aead_request *req) 1772 struct aead_request *req)
1786{ 1773{
1787 caam_unmap(dev, req->src, req->dst, 1774 caam_unmap(dev, req->src, req->dst,
1788 edesc->src_nents, edesc->src_chained, edesc->dst_nents, 1775 edesc->src_nents, edesc->dst_nents, 0, 0,
1789 edesc->dst_chained, 0, 0,
1790 edesc->sec4_sg_dma, edesc->sec4_sg_bytes); 1776 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
1791} 1777}
1792 1778
@@ -1798,8 +1784,8 @@ static void ablkcipher_unmap(struct device *dev,
1798 int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 1784 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1799 1785
1800 caam_unmap(dev, req->src, req->dst, 1786 caam_unmap(dev, req->src, req->dst,
1801 edesc->src_nents, edesc->src_chained, edesc->dst_nents, 1787 edesc->src_nents, edesc->dst_nents,
1802 edesc->dst_chained, edesc->iv_dma, ivsize, 1788 edesc->iv_dma, ivsize,
1803 edesc->sec4_sg_dma, edesc->sec4_sg_bytes); 1789 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
1804} 1790}
1805 1791
@@ -2169,22 +2155,18 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
2169 struct aead_edesc *edesc; 2155 struct aead_edesc *edesc;
2170 int sgc; 2156 int sgc;
2171 bool all_contig = true; 2157 bool all_contig = true;
2172 bool src_chained = false, dst_chained = false;
2173 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; 2158 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
2174 unsigned int authsize = ctx->authsize; 2159 unsigned int authsize = ctx->authsize;
2175 2160
2176 if (unlikely(req->dst != req->src)) { 2161 if (unlikely(req->dst != req->src)) {
2177 src_nents = sg_count(req->src, req->assoclen + req->cryptlen, 2162 src_nents = sg_count(req->src, req->assoclen + req->cryptlen);
2178 &src_chained);
2179 dst_nents = sg_count(req->dst, 2163 dst_nents = sg_count(req->dst,
2180 req->assoclen + req->cryptlen + 2164 req->assoclen + req->cryptlen +
2181 (encrypt ? authsize : (-authsize)), 2165 (encrypt ? authsize : (-authsize)));
2182 &dst_chained);
2183 } else { 2166 } else {
2184 src_nents = sg_count(req->src, 2167 src_nents = sg_count(req->src,
2185 req->assoclen + req->cryptlen + 2168 req->assoclen + req->cryptlen +
2186 (encrypt ? authsize : 0), 2169 (encrypt ? authsize : 0));
2187 &src_chained);
2188 } 2170 }
2189 2171
2190 /* Check if data are contiguous. */ 2172 /* Check if data are contiguous. */
@@ -2207,37 +2189,35 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
2207 } 2189 }
2208 2190
2209 if (likely(req->src == req->dst)) { 2191 if (likely(req->src == req->dst)) {
2210 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, 2192 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2211 DMA_BIDIRECTIONAL, src_chained); 2193 DMA_BIDIRECTIONAL);
2212 if (unlikely(!sgc)) { 2194 if (unlikely(!sgc)) {
2213 dev_err(jrdev, "unable to map source\n"); 2195 dev_err(jrdev, "unable to map source\n");
2214 kfree(edesc); 2196 kfree(edesc);
2215 return ERR_PTR(-ENOMEM); 2197 return ERR_PTR(-ENOMEM);
2216 } 2198 }
2217 } else { 2199 } else {
2218 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, 2200 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2219 DMA_TO_DEVICE, src_chained); 2201 DMA_TO_DEVICE);
2220 if (unlikely(!sgc)) { 2202 if (unlikely(!sgc)) {
2221 dev_err(jrdev, "unable to map source\n"); 2203 dev_err(jrdev, "unable to map source\n");
2222 kfree(edesc); 2204 kfree(edesc);
2223 return ERR_PTR(-ENOMEM); 2205 return ERR_PTR(-ENOMEM);
2224 } 2206 }
2225 2207
2226 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1, 2208 sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
2227 DMA_FROM_DEVICE, dst_chained); 2209 DMA_FROM_DEVICE);
2228 if (unlikely(!sgc)) { 2210 if (unlikely(!sgc)) {
2229 dev_err(jrdev, "unable to map destination\n"); 2211 dev_err(jrdev, "unable to map destination\n");
2230 dma_unmap_sg_chained(jrdev, req->src, src_nents ? : 1, 2212 dma_unmap_sg(jrdev, req->src, src_nents ? : 1,
2231 DMA_TO_DEVICE, src_chained); 2213 DMA_TO_DEVICE);
2232 kfree(edesc); 2214 kfree(edesc);
2233 return ERR_PTR(-ENOMEM); 2215 return ERR_PTR(-ENOMEM);
2234 } 2216 }
2235 } 2217 }
2236 2218
2237 edesc->src_nents = src_nents; 2219 edesc->src_nents = src_nents;
2238 edesc->src_chained = src_chained;
2239 edesc->dst_nents = dst_nents; 2220 edesc->dst_nents = dst_nents;
2240 edesc->dst_chained = dst_chained;
2241 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) + 2221 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
2242 desc_bytes; 2222 desc_bytes;
2243 *all_contig_ptr = all_contig; 2223 *all_contig_ptr = all_contig;
@@ -2467,22 +2447,21 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
2467 bool iv_contig = false; 2447 bool iv_contig = false;
2468 int sgc; 2448 int sgc;
2469 int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 2449 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2470 bool src_chained = false, dst_chained = false;
2471 int sec4_sg_index; 2450 int sec4_sg_index;
2472 2451
2473 src_nents = sg_count(req->src, req->nbytes, &src_chained); 2452 src_nents = sg_count(req->src, req->nbytes);
2474 2453
2475 if (req->dst != req->src) 2454 if (req->dst != req->src)
2476 dst_nents = sg_count(req->dst, req->nbytes, &dst_chained); 2455 dst_nents = sg_count(req->dst, req->nbytes);
2477 2456
2478 if (likely(req->src == req->dst)) { 2457 if (likely(req->src == req->dst)) {
2479 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, 2458 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2480 DMA_BIDIRECTIONAL, src_chained); 2459 DMA_BIDIRECTIONAL);
2481 } else { 2460 } else {
2482 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, 2461 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2483 DMA_TO_DEVICE, src_chained); 2462 DMA_TO_DEVICE);
2484 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1, 2463 sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
2485 DMA_FROM_DEVICE, dst_chained); 2464 DMA_FROM_DEVICE);
2486 } 2465 }
2487 2466
2488 iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE); 2467 iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
@@ -2511,9 +2490,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
2511 } 2490 }
2512 2491
2513 edesc->src_nents = src_nents; 2492 edesc->src_nents = src_nents;
2514 edesc->src_chained = src_chained;
2515 edesc->dst_nents = dst_nents; 2493 edesc->dst_nents = dst_nents;
2516 edesc->dst_chained = dst_chained;
2517 edesc->sec4_sg_bytes = sec4_sg_bytes; 2494 edesc->sec4_sg_bytes = sec4_sg_bytes;
2518 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) + 2495 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
2519 desc_bytes; 2496 desc_bytes;
@@ -2646,22 +2623,21 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
2646 bool iv_contig = false; 2623 bool iv_contig = false;
2647 int sgc; 2624 int sgc;
2648 int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 2625 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2649 bool src_chained = false, dst_chained = false;
2650 int sec4_sg_index; 2626 int sec4_sg_index;
2651 2627
2652 src_nents = sg_count(req->src, req->nbytes, &src_chained); 2628 src_nents = sg_count(req->src, req->nbytes);
2653 2629
2654 if (unlikely(req->dst != req->src)) 2630 if (unlikely(req->dst != req->src))
2655 dst_nents = sg_count(req->dst, req->nbytes, &dst_chained); 2631 dst_nents = sg_count(req->dst, req->nbytes);
2656 2632
2657 if (likely(req->src == req->dst)) { 2633 if (likely(req->src == req->dst)) {
2658 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, 2634 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2659 DMA_BIDIRECTIONAL, src_chained); 2635 DMA_BIDIRECTIONAL);
2660 } else { 2636 } else {
2661 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, 2637 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2662 DMA_TO_DEVICE, src_chained); 2638 DMA_TO_DEVICE);
2663 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1, 2639 sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
2664 DMA_FROM_DEVICE, dst_chained); 2640 DMA_FROM_DEVICE);
2665 } 2641 }
2666 2642
2667 /* 2643 /*
@@ -2690,9 +2666,7 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
2690 } 2666 }
2691 2667
2692 edesc->src_nents = src_nents; 2668 edesc->src_nents = src_nents;
2693 edesc->src_chained = src_chained;
2694 edesc->dst_nents = dst_nents; 2669 edesc->dst_nents = dst_nents;
2695 edesc->dst_chained = dst_chained;
2696 edesc->sec4_sg_bytes = sec4_sg_bytes; 2670 edesc->sec4_sg_bytes = sec4_sg_bytes;
2697 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) + 2671 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
2698 desc_bytes; 2672 desc_bytes;
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 94433b9fc200..9609f6634329 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -181,10 +181,9 @@ static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev,
181/* Map req->src and put it in link table */ 181/* Map req->src and put it in link table */
182static inline void src_map_to_sec4_sg(struct device *jrdev, 182static inline void src_map_to_sec4_sg(struct device *jrdev,
183 struct scatterlist *src, int src_nents, 183 struct scatterlist *src, int src_nents,
184 struct sec4_sg_entry *sec4_sg, 184 struct sec4_sg_entry *sec4_sg)
185 bool chained)
186{ 185{
187 dma_map_sg_chained(jrdev, src, src_nents, DMA_TO_DEVICE, chained); 186 dma_map_sg(jrdev, src, src_nents, DMA_TO_DEVICE);
188 sg_to_sec4_sg_last(src, src_nents, sec4_sg, 0); 187 sg_to_sec4_sg_last(src, src_nents, sec4_sg, 0);
189} 188}
190 189
@@ -585,7 +584,6 @@ badkey:
585 * ahash_edesc - s/w-extended ahash descriptor 584 * ahash_edesc - s/w-extended ahash descriptor
586 * @dst_dma: physical mapped address of req->result 585 * @dst_dma: physical mapped address of req->result
587 * @sec4_sg_dma: physical mapped address of h/w link table 586 * @sec4_sg_dma: physical mapped address of h/w link table
588 * @chained: if source is chained
589 * @src_nents: number of segments in input scatterlist 587 * @src_nents: number of segments in input scatterlist
590 * @sec4_sg_bytes: length of dma mapped sec4_sg space 588 * @sec4_sg_bytes: length of dma mapped sec4_sg space
591 * @sec4_sg: pointer to h/w link table 589 * @sec4_sg: pointer to h/w link table
@@ -594,7 +592,6 @@ badkey:
594struct ahash_edesc { 592struct ahash_edesc {
595 dma_addr_t dst_dma; 593 dma_addr_t dst_dma;
596 dma_addr_t sec4_sg_dma; 594 dma_addr_t sec4_sg_dma;
597 bool chained;
598 int src_nents; 595 int src_nents;
599 int sec4_sg_bytes; 596 int sec4_sg_bytes;
600 struct sec4_sg_entry *sec4_sg; 597 struct sec4_sg_entry *sec4_sg;
@@ -606,8 +603,7 @@ static inline void ahash_unmap(struct device *dev,
606 struct ahash_request *req, int dst_len) 603 struct ahash_request *req, int dst_len)
607{ 604{
608 if (edesc->src_nents) 605 if (edesc->src_nents)
609 dma_unmap_sg_chained(dev, req->src, edesc->src_nents, 606 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
610 DMA_TO_DEVICE, edesc->chained);
611 if (edesc->dst_dma) 607 if (edesc->dst_dma)
612 dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE); 608 dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
613 609
@@ -788,7 +784,6 @@ static int ahash_update_ctx(struct ahash_request *req)
788 dma_addr_t ptr = ctx->sh_desc_update_dma; 784 dma_addr_t ptr = ctx->sh_desc_update_dma;
789 int src_nents, sec4_sg_bytes, sec4_sg_src_index; 785 int src_nents, sec4_sg_bytes, sec4_sg_src_index;
790 struct ahash_edesc *edesc; 786 struct ahash_edesc *edesc;
791 bool chained = false;
792 int ret = 0; 787 int ret = 0;
793 int sh_len; 788 int sh_len;
794 789
@@ -797,8 +792,8 @@ static int ahash_update_ctx(struct ahash_request *req)
797 to_hash = in_len - *next_buflen; 792 to_hash = in_len - *next_buflen;
798 793
799 if (to_hash) { 794 if (to_hash) {
800 src_nents = __sg_count(req->src, req->nbytes - (*next_buflen), 795 src_nents = sg_nents_for_len(req->src,
801 &chained); 796 req->nbytes - (*next_buflen));
802 sec4_sg_src_index = 1 + (*buflen ? 1 : 0); 797 sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
803 sec4_sg_bytes = (sec4_sg_src_index + src_nents) * 798 sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
804 sizeof(struct sec4_sg_entry); 799 sizeof(struct sec4_sg_entry);
@@ -816,7 +811,6 @@ static int ahash_update_ctx(struct ahash_request *req)
816 } 811 }
817 812
818 edesc->src_nents = src_nents; 813 edesc->src_nents = src_nents;
819 edesc->chained = chained;
820 edesc->sec4_sg_bytes = sec4_sg_bytes; 814 edesc->sec4_sg_bytes = sec4_sg_bytes;
821 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + 815 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
822 DESC_JOB_IO_LEN; 816 DESC_JOB_IO_LEN;
@@ -833,8 +827,7 @@ static int ahash_update_ctx(struct ahash_request *req)
833 827
834 if (src_nents) { 828 if (src_nents) {
835 src_map_to_sec4_sg(jrdev, req->src, src_nents, 829 src_map_to_sec4_sg(jrdev, req->src, src_nents,
836 edesc->sec4_sg + sec4_sg_src_index, 830 edesc->sec4_sg + sec4_sg_src_index);
837 chained);
838 if (*next_buflen) 831 if (*next_buflen)
839 scatterwalk_map_and_copy(next_buf, req->src, 832 scatterwalk_map_and_copy(next_buf, req->src,
840 to_hash - *buflen, 833 to_hash - *buflen,
@@ -996,11 +989,10 @@ static int ahash_finup_ctx(struct ahash_request *req)
996 int src_nents; 989 int src_nents;
997 int digestsize = crypto_ahash_digestsize(ahash); 990 int digestsize = crypto_ahash_digestsize(ahash);
998 struct ahash_edesc *edesc; 991 struct ahash_edesc *edesc;
999 bool chained = false;
1000 int ret = 0; 992 int ret = 0;
1001 int sh_len; 993 int sh_len;
1002 994
1003 src_nents = __sg_count(req->src, req->nbytes, &chained); 995 src_nents = sg_nents_for_len(req->src, req->nbytes);
1004 sec4_sg_src_index = 1 + (buflen ? 1 : 0); 996 sec4_sg_src_index = 1 + (buflen ? 1 : 0);
1005 sec4_sg_bytes = (sec4_sg_src_index + src_nents) * 997 sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
1006 sizeof(struct sec4_sg_entry); 998 sizeof(struct sec4_sg_entry);
@@ -1018,7 +1010,6 @@ static int ahash_finup_ctx(struct ahash_request *req)
1018 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); 1010 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1019 1011
1020 edesc->src_nents = src_nents; 1012 edesc->src_nents = src_nents;
1021 edesc->chained = chained;
1022 edesc->sec4_sg_bytes = sec4_sg_bytes; 1013 edesc->sec4_sg_bytes = sec4_sg_bytes;
1023 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + 1014 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1024 DESC_JOB_IO_LEN; 1015 DESC_JOB_IO_LEN;
@@ -1033,7 +1024,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
1033 last_buflen); 1024 last_buflen);
1034 1025
1035 src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1026 src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg +
1036 sec4_sg_src_index, chained); 1027 sec4_sg_src_index);
1037 1028
1038 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 1029 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1039 sec4_sg_bytes, DMA_TO_DEVICE); 1030 sec4_sg_bytes, DMA_TO_DEVICE);
@@ -1081,14 +1072,12 @@ static int ahash_digest(struct ahash_request *req)
1081 int src_nents, sec4_sg_bytes; 1072 int src_nents, sec4_sg_bytes;
1082 dma_addr_t src_dma; 1073 dma_addr_t src_dma;
1083 struct ahash_edesc *edesc; 1074 struct ahash_edesc *edesc;
1084 bool chained = false;
1085 int ret = 0; 1075 int ret = 0;
1086 u32 options; 1076 u32 options;
1087 int sh_len; 1077 int sh_len;
1088 1078
1089 src_nents = sg_count(req->src, req->nbytes, &chained); 1079 src_nents = sg_count(req->src, req->nbytes);
1090 dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE, 1080 dma_map_sg(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE);
1091 chained);
1092 sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry); 1081 sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
1093 1082
1094 /* allocate space for base edesc and hw desc commands, link tables */ 1083 /* allocate space for base edesc and hw desc commands, link tables */
@@ -1102,7 +1091,6 @@ static int ahash_digest(struct ahash_request *req)
1102 DESC_JOB_IO_LEN; 1091 DESC_JOB_IO_LEN;
1103 edesc->sec4_sg_bytes = sec4_sg_bytes; 1092 edesc->sec4_sg_bytes = sec4_sg_bytes;
1104 edesc->src_nents = src_nents; 1093 edesc->src_nents = src_nents;
1105 edesc->chained = chained;
1106 1094
1107 sh_len = desc_len(sh_desc); 1095 sh_len = desc_len(sh_desc);
1108 desc = edesc->hw_desc; 1096 desc = edesc->hw_desc;
@@ -1228,7 +1216,6 @@ static int ahash_update_no_ctx(struct ahash_request *req)
1228 struct ahash_edesc *edesc; 1216 struct ahash_edesc *edesc;
1229 u32 *desc, *sh_desc = ctx->sh_desc_update_first; 1217 u32 *desc, *sh_desc = ctx->sh_desc_update_first;
1230 dma_addr_t ptr = ctx->sh_desc_update_first_dma; 1218 dma_addr_t ptr = ctx->sh_desc_update_first_dma;
1231 bool chained = false;
1232 int ret = 0; 1219 int ret = 0;
1233 int sh_len; 1220 int sh_len;
1234 1221
@@ -1236,8 +1223,8 @@ static int ahash_update_no_ctx(struct ahash_request *req)
1236 to_hash = in_len - *next_buflen; 1223 to_hash = in_len - *next_buflen;
1237 1224
1238 if (to_hash) { 1225 if (to_hash) {
1239 src_nents = __sg_count(req->src, req->nbytes - (*next_buflen), 1226 src_nents = sg_nents_for_len(req->src,
1240 &chained); 1227 req->nbytes - (*next_buflen));
1241 sec4_sg_bytes = (1 + src_nents) * 1228 sec4_sg_bytes = (1 + src_nents) *
1242 sizeof(struct sec4_sg_entry); 1229 sizeof(struct sec4_sg_entry);
1243 1230
@@ -1254,7 +1241,6 @@ static int ahash_update_no_ctx(struct ahash_request *req)
1254 } 1241 }
1255 1242
1256 edesc->src_nents = src_nents; 1243 edesc->src_nents = src_nents;
1257 edesc->chained = chained;
1258 edesc->sec4_sg_bytes = sec4_sg_bytes; 1244 edesc->sec4_sg_bytes = sec4_sg_bytes;
1259 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + 1245 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1260 DESC_JOB_IO_LEN; 1246 DESC_JOB_IO_LEN;
@@ -1263,7 +1249,7 @@ static int ahash_update_no_ctx(struct ahash_request *req)
1263 state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, 1249 state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
1264 buf, *buflen); 1250 buf, *buflen);
1265 src_map_to_sec4_sg(jrdev, req->src, src_nents, 1251 src_map_to_sec4_sg(jrdev, req->src, src_nents,
1266 edesc->sec4_sg + 1, chained); 1252 edesc->sec4_sg + 1);
1267 if (*next_buflen) { 1253 if (*next_buflen) {
1268 scatterwalk_map_and_copy(next_buf, req->src, 1254 scatterwalk_map_and_copy(next_buf, req->src,
1269 to_hash - *buflen, 1255 to_hash - *buflen,
@@ -1343,11 +1329,10 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
1343 int sec4_sg_bytes, sec4_sg_src_index, src_nents; 1329 int sec4_sg_bytes, sec4_sg_src_index, src_nents;
1344 int digestsize = crypto_ahash_digestsize(ahash); 1330 int digestsize = crypto_ahash_digestsize(ahash);
1345 struct ahash_edesc *edesc; 1331 struct ahash_edesc *edesc;
1346 bool chained = false;
1347 int sh_len; 1332 int sh_len;
1348 int ret = 0; 1333 int ret = 0;
1349 1334
1350 src_nents = __sg_count(req->src, req->nbytes, &chained); 1335 src_nents = sg_nents_for_len(req->src, req->nbytes);
1351 sec4_sg_src_index = 2; 1336 sec4_sg_src_index = 2;
1352 sec4_sg_bytes = (sec4_sg_src_index + src_nents) * 1337 sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
1353 sizeof(struct sec4_sg_entry); 1338 sizeof(struct sec4_sg_entry);
@@ -1365,7 +1350,6 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
1365 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); 1350 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1366 1351
1367 edesc->src_nents = src_nents; 1352 edesc->src_nents = src_nents;
1368 edesc->chained = chained;
1369 edesc->sec4_sg_bytes = sec4_sg_bytes; 1353 edesc->sec4_sg_bytes = sec4_sg_bytes;
1370 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + 1354 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1371 DESC_JOB_IO_LEN; 1355 DESC_JOB_IO_LEN;
@@ -1374,8 +1358,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
1374 state->buf_dma, buflen, 1358 state->buf_dma, buflen,
1375 last_buflen); 1359 last_buflen);
1376 1360
1377 src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1, 1361 src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1);
1378 chained);
1379 1362
1380 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 1363 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1381 sec4_sg_bytes, DMA_TO_DEVICE); 1364 sec4_sg_bytes, DMA_TO_DEVICE);
@@ -1429,7 +1412,6 @@ static int ahash_update_first(struct ahash_request *req)
1429 dma_addr_t src_dma; 1412 dma_addr_t src_dma;
1430 u32 options; 1413 u32 options;
1431 struct ahash_edesc *edesc; 1414 struct ahash_edesc *edesc;
1432 bool chained = false;
1433 int ret = 0; 1415 int ret = 0;
1434 int sh_len; 1416 int sh_len;
1435 1417
@@ -1438,10 +1420,8 @@ static int ahash_update_first(struct ahash_request *req)
1438 to_hash = req->nbytes - *next_buflen; 1420 to_hash = req->nbytes - *next_buflen;
1439 1421
1440 if (to_hash) { 1422 if (to_hash) {
1441 src_nents = sg_count(req->src, req->nbytes - (*next_buflen), 1423 src_nents = sg_count(req->src, req->nbytes - (*next_buflen));
1442 &chained); 1424 dma_map_sg(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE);
1443 dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1444 DMA_TO_DEVICE, chained);
1445 sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry); 1425 sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
1446 1426
1447 /* 1427 /*
@@ -1457,7 +1437,6 @@ static int ahash_update_first(struct ahash_request *req)
1457 } 1437 }
1458 1438
1459 edesc->src_nents = src_nents; 1439 edesc->src_nents = src_nents;
1460 edesc->chained = chained;
1461 edesc->sec4_sg_bytes = sec4_sg_bytes; 1440 edesc->sec4_sg_bytes = sec4_sg_bytes;
1462 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + 1441 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1463 DESC_JOB_IO_LEN; 1442 DESC_JOB_IO_LEN;
diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h
index 18cd6d1f5870..12ec6616e89d 100644
--- a/drivers/crypto/caam/sg_sw_sec4.h
+++ b/drivers/crypto/caam/sg_sw_sec4.h
@@ -69,81 +69,13 @@ static inline struct sec4_sg_entry *sg_to_sec4_sg_len(
69 return sec4_sg_ptr - 1; 69 return sec4_sg_ptr - 1;
70} 70}
71 71
72/* count number of elements in scatterlist */
73static inline int __sg_count(struct scatterlist *sg_list, int nbytes,
74 bool *chained)
75{
76 struct scatterlist *sg = sg_list;
77 int sg_nents = 0;
78
79 while (nbytes > 0) {
80 sg_nents++;
81 nbytes -= sg->length;
82 if (!sg_is_last(sg) && (sg + 1)->length == 0)
83 *chained = true;
84 sg = sg_next(sg);
85 }
86
87 return sg_nents;
88}
89
90/* derive number of elements in scatterlist, but return 0 for 1 */ 72/* derive number of elements in scatterlist, but return 0 for 1 */
91static inline int sg_count(struct scatterlist *sg_list, int nbytes, 73static inline int sg_count(struct scatterlist *sg_list, int nbytes)
92 bool *chained)
93{ 74{
94 int sg_nents = __sg_count(sg_list, nbytes, chained); 75 int sg_nents = sg_nents_for_len(sg_list, nbytes);
95 76
96 if (likely(sg_nents == 1)) 77 if (likely(sg_nents == 1))
97 return 0; 78 return 0;
98 79
99 return sg_nents; 80 return sg_nents;
100} 81}
101
102static inline void dma_unmap_sg_chained(
103 struct device *dev, struct scatterlist *sg, unsigned int nents,
104 enum dma_data_direction dir, bool chained)
105{
106 if (unlikely(chained)) {
107 int i;
108 struct scatterlist *tsg = sg;
109
110 /*
111 * Use a local copy of the sg pointer to avoid moving the
112 * head of the list pointed to by sg as we walk the list.
113 */
114 for (i = 0; i < nents; i++) {
115 dma_unmap_sg(dev, tsg, 1, dir);
116 tsg = sg_next(tsg);
117 }
118 } else if (nents) {
119 dma_unmap_sg(dev, sg, nents, dir);
120 }
121}
122
123static inline int dma_map_sg_chained(
124 struct device *dev, struct scatterlist *sg, unsigned int nents,
125 enum dma_data_direction dir, bool chained)
126{
127 if (unlikely(chained)) {
128 int i;
129 struct scatterlist *tsg = sg;
130
131 /*
132 * Use a local copy of the sg pointer to avoid moving the
133 * head of the list pointed to by sg as we walk the list.
134 */
135 for (i = 0; i < nents; i++) {
136 if (!dma_map_sg(dev, tsg, 1, dir)) {
137 dma_unmap_sg_chained(dev, sg, i, dir,
138 chained);
139 nents = 0;
140 break;
141 }
142
143 tsg = sg_next(tsg);
144 }
145 } else
146 nents = dma_map_sg(dev, sg, nents, dir);
147
148 return nents;
149}