aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto/talitos.c
diff options
context:
space:
mode:
authorLee Nipper <lee.nipper@gmail.com>2010-06-16 01:29:15 -0400
committerHerbert Xu <herbert@gondor.apana.org.au>2010-06-16 01:29:15 -0400
commit5e833bc4166ea524c00a95e777e4344844ed189f (patch)
tree5b53201c04a434464fc68379ac2989cf8fd8027a /drivers/crypto/talitos.c
parent5b04fc170382195d7d33fd08e3ccc2ad8e50e782 (diff)
crypto: talitos - fix ahash for multiple of blocksize
Correct ahash_process_req() to properly handle cases where the total hash amount is a multiple of the blocksize. The SEC must have some data to hash during the very last descriptor operation; so up to one whole blocksize of data is buffered until the final hash. Signed-off-by: Lee Nipper <lee.nipper@gmail.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto/talitos.c')
-rw-r--r--drivers/crypto/talitos.c77
1 files changed, 40 insertions, 37 deletions
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 637c105f53d2..0f2483e221ad 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -720,7 +720,6 @@ struct talitos_ctx {
720#define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512 720#define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
721 721
722struct talitos_ahash_req_ctx { 722struct talitos_ahash_req_ctx {
723 u64 count;
724 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)]; 723 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
725 unsigned int hw_context_size; 724 unsigned int hw_context_size;
726 u8 buf[HASH_MAX_BLOCK_SIZE]; 725 u8 buf[HASH_MAX_BLOCK_SIZE];
@@ -729,6 +728,7 @@ struct talitos_ahash_req_ctx {
729 unsigned int first; 728 unsigned int first;
730 unsigned int last; 729 unsigned int last;
731 unsigned int to_hash_later; 730 unsigned int to_hash_later;
731 u64 nbuf;
732 struct scatterlist bufsl[2]; 732 struct scatterlist bufsl[2];
733 struct scatterlist *psrc; 733 struct scatterlist *psrc;
734}; 734};
@@ -1609,6 +1609,7 @@ static void ahash_done(struct device *dev,
1609 if (!req_ctx->last && req_ctx->to_hash_later) { 1609 if (!req_ctx->last && req_ctx->to_hash_later) {
1610 /* Position any partial block for next update/final/finup */ 1610 /* Position any partial block for next update/final/finup */
1611 memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later); 1611 memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later);
1612 req_ctx->nbuf = req_ctx->to_hash_later;
1612 } 1613 }
1613 common_nonsnoop_hash_unmap(dev, edesc, areq); 1614 common_nonsnoop_hash_unmap(dev, edesc, areq);
1614 1615
@@ -1724,7 +1725,7 @@ static int ahash_init(struct ahash_request *areq)
1724 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 1725 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1725 1726
1726 /* Initialize the context */ 1727 /* Initialize the context */
1727 req_ctx->count = 0; 1728 req_ctx->nbuf = 0;
1728 req_ctx->first = 1; /* first indicates h/w must init its context */ 1729 req_ctx->first = 1; /* first indicates h/w must init its context */
1729 req_ctx->swinit = 0; /* assume h/w init of context */ 1730 req_ctx->swinit = 0; /* assume h/w init of context */
1730 req_ctx->hw_context_size = 1731 req_ctx->hw_context_size =
@@ -1772,52 +1773,54 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1772 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); 1773 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1773 unsigned int nbytes_to_hash; 1774 unsigned int nbytes_to_hash;
1774 unsigned int to_hash_later; 1775 unsigned int to_hash_later;
1775 unsigned int index; 1776 unsigned int nsg;
1776 int chained; 1777 int chained;
1777 1778
1778 index = req_ctx->count & (blocksize - 1); 1779 if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1779 req_ctx->count += nbytes; 1780 /* Buffer up to one whole block */
1780
1781 if (!req_ctx->last && (index + nbytes) < blocksize) {
1782 /* Buffer the partial block */
1783 sg_copy_to_buffer(areq->src, 1781 sg_copy_to_buffer(areq->src,
1784 sg_count(areq->src, nbytes, &chained), 1782 sg_count(areq->src, nbytes, &chained),
1785 req_ctx->buf + index, nbytes); 1783 req_ctx->buf + req_ctx->nbuf, nbytes);
1784 req_ctx->nbuf += nbytes;
1786 return 0; 1785 return 0;
1787 } 1786 }
1788 1787
1789 if (index) { 1788 /* At least (blocksize + 1) bytes are available to hash */
1790 /* partial block from previous update; chain it in. */ 1789 nbytes_to_hash = nbytes + req_ctx->nbuf;
1791 sg_init_table(req_ctx->bufsl, (nbytes) ? 2 : 1); 1790 to_hash_later = nbytes_to_hash & (blocksize - 1);
1792 sg_set_buf(req_ctx->bufsl, req_ctx->buf, index); 1791
1793 if (nbytes) 1792 if (req_ctx->last)
1794 scatterwalk_sg_chain(req_ctx->bufsl, 2, 1793 to_hash_later = 0;
1795 areq->src); 1794 else if (to_hash_later)
1795 /* There is a partial block. Hash the full block(s) now */
1796 nbytes_to_hash -= to_hash_later;
1797 else {
1798 /* Keep one block buffered */
1799 nbytes_to_hash -= blocksize;
1800 to_hash_later = blocksize;
1801 }
1802
1803 /* Chain in any previously buffered data */
1804 if (req_ctx->nbuf) {
1805 nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
1806 sg_init_table(req_ctx->bufsl, nsg);
1807 sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf);
1808 if (nsg > 1)
1809 scatterwalk_sg_chain(req_ctx->bufsl, 2, areq->src);
1796 req_ctx->psrc = req_ctx->bufsl; 1810 req_ctx->psrc = req_ctx->bufsl;
1797 } else { 1811 } else
1798 req_ctx->psrc = areq->src; 1812 req_ctx->psrc = areq->src;
1813
1814 if (to_hash_later) {
1815 int nents = sg_count(areq->src, nbytes, &chained);
1816 sg_copy_end_to_buffer(areq->src, nents,
1817 req_ctx->bufnext,
1818 to_hash_later,
1819 nbytes - to_hash_later);
1799 } 1820 }
1800 nbytes_to_hash = index + nbytes; 1821 req_ctx->to_hash_later = to_hash_later;
1801 if (!req_ctx->last) {
1802 to_hash_later = (nbytes_to_hash & (blocksize - 1));
1803 if (to_hash_later) {
1804 int nents;
1805 /* Must copy to_hash_later bytes from the end
1806 * to bufnext (a partial block) for later.
1807 */
1808 nents = sg_count(areq->src, nbytes, &chained);
1809 sg_copy_end_to_buffer(areq->src, nents,
1810 req_ctx->bufnext,
1811 to_hash_later,
1812 nbytes - to_hash_later);
1813
1814 /* Adjust count for what will be hashed now */
1815 nbytes_to_hash -= to_hash_later;
1816 }
1817 req_ctx->to_hash_later = to_hash_later;
1818 }
1819 1822
1820 /* allocate extended descriptor */ 1823 /* Allocate extended descriptor */
1821 edesc = ahash_edesc_alloc(areq, nbytes_to_hash); 1824 edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
1822 if (IS_ERR(edesc)) 1825 if (IS_ERR(edesc))
1823 return PTR_ERR(edesc); 1826 return PTR_ERR(edesc);