aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--crypto/shash.c3
-rw-r--r--drivers/char/hw_random/timeriomem-rng.c39
-rw-r--r--drivers/crypto/ixp4xx_crypto.c182
-rw-r--r--include/linux/timeriomem-rng.h2
4 files changed, 102 insertions, 124 deletions
diff --git a/crypto/shash.c b/crypto/shash.c
index 7a659733f94a..2ccc8b0076ce 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -77,6 +77,9 @@ static int shash_update_unaligned(struct shash_desc *desc, const u8 *data,
77 u8 buf[shash_align_buffer_size(unaligned_len, alignmask)] 77 u8 buf[shash_align_buffer_size(unaligned_len, alignmask)]
78 __attribute__ ((aligned)); 78 __attribute__ ((aligned));
79 79
80 if (unaligned_len > len)
81 unaligned_len = len;
82
80 memcpy(buf, data, unaligned_len); 83 memcpy(buf, data, unaligned_len);
81 84
82 return shash->update(desc, buf, unaligned_len) ?: 85 return shash->update(desc, buf, unaligned_len) ?:
diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c
index 10ad41be5897..dcd352ad0e7f 100644
--- a/drivers/char/hw_random/timeriomem-rng.c
+++ b/drivers/char/hw_random/timeriomem-rng.c
@@ -90,10 +90,30 @@ static struct hwrng timeriomem_rng_ops = {
90 90
91static int __init timeriomem_rng_probe(struct platform_device *pdev) 91static int __init timeriomem_rng_probe(struct platform_device *pdev)
92{ 92{
93 struct resource *res, *mem;
93 int ret; 94 int ret;
94 95
96 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
97
98 if (!res)
99 return -ENOENT;
100
101 mem = request_mem_region(res->start, res->end - res->start + 1,
102 pdev->name);
103 if (mem == NULL)
104 return -EBUSY;
105
106 dev_set_drvdata(&pdev->dev, mem);
107
95 timeriomem_rng_data = pdev->dev.platform_data; 108 timeriomem_rng_data = pdev->dev.platform_data;
96 109
110 timeriomem_rng_data->address = ioremap(res->start,
111 res->end - res->start + 1);
112 if (!timeriomem_rng_data->address) {
113 ret = -ENOMEM;
114 goto err_ioremap;
115 }
116
97 if (timeriomem_rng_data->period != 0 117 if (timeriomem_rng_data->period != 0
98 && usecs_to_jiffies(timeriomem_rng_data->period) > 0) { 118 && usecs_to_jiffies(timeriomem_rng_data->period) > 0) {
99 timeriomem_rng_timer.expires = jiffies; 119 timeriomem_rng_timer.expires = jiffies;
@@ -104,23 +124,34 @@ static int __init timeriomem_rng_probe(struct platform_device *pdev)
104 timeriomem_rng_data->present = 1; 124 timeriomem_rng_data->present = 1;
105 125
106 ret = hwrng_register(&timeriomem_rng_ops); 126 ret = hwrng_register(&timeriomem_rng_ops);
107 if (ret) { 127 if (ret)
108 dev_err(&pdev->dev, "problem registering\n"); 128 goto err_register;
109 return ret;
110 }
111 129
112 dev_info(&pdev->dev, "32bits from 0x%p @ %dus\n", 130 dev_info(&pdev->dev, "32bits from 0x%p @ %dus\n",
113 timeriomem_rng_data->address, 131 timeriomem_rng_data->address,
114 timeriomem_rng_data->period); 132 timeriomem_rng_data->period);
115 133
116 return 0; 134 return 0;
135
136err_register:
137 dev_err(&pdev->dev, "problem registering\n");
138 iounmap(timeriomem_rng_data->address);
139err_ioremap:
140 release_resource(mem);
141
142 return ret;
117} 143}
118 144
119static int __devexit timeriomem_rng_remove(struct platform_device *pdev) 145static int __devexit timeriomem_rng_remove(struct platform_device *pdev)
120{ 146{
147 struct resource *mem = dev_get_drvdata(&pdev->dev);
148
121 del_timer_sync(&timeriomem_rng_timer); 149 del_timer_sync(&timeriomem_rng_timer);
122 hwrng_unregister(&timeriomem_rng_ops); 150 hwrng_unregister(&timeriomem_rng_ops);
123 151
152 iounmap(timeriomem_rng_data->address);
153 release_resource(mem);
154
124 return 0; 155 return 0;
125} 156}
126 157
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index d9e751be8c5f..af9761ccf9f1 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -101,6 +101,7 @@ struct buffer_desc {
101 u32 phys_addr; 101 u32 phys_addr;
102 u32 __reserved[4]; 102 u32 __reserved[4];
103 struct buffer_desc *next; 103 struct buffer_desc *next;
104 enum dma_data_direction dir;
104}; 105};
105 106
106struct crypt_ctl { 107struct crypt_ctl {
@@ -132,14 +133,10 @@ struct crypt_ctl {
132struct ablk_ctx { 133struct ablk_ctx {
133 struct buffer_desc *src; 134 struct buffer_desc *src;
134 struct buffer_desc *dst; 135 struct buffer_desc *dst;
135 unsigned src_nents;
136 unsigned dst_nents;
137}; 136};
138 137
139struct aead_ctx { 138struct aead_ctx {
140 struct buffer_desc *buffer; 139 struct buffer_desc *buffer;
141 unsigned short assoc_nents;
142 unsigned short src_nents;
143 struct scatterlist ivlist; 140 struct scatterlist ivlist;
144 /* used when the hmac is not on one sg entry */ 141 /* used when the hmac is not on one sg entry */
145 u8 *hmac_virt; 142 u8 *hmac_virt;
@@ -312,7 +309,7 @@ static struct crypt_ctl *get_crypt_desc_emerg(void)
312 } 309 }
313} 310}
314 311
315static void free_buf_chain(struct buffer_desc *buf, u32 phys) 312static void free_buf_chain(struct device *dev, struct buffer_desc *buf,u32 phys)
316{ 313{
317 while (buf) { 314 while (buf) {
318 struct buffer_desc *buf1; 315 struct buffer_desc *buf1;
@@ -320,6 +317,7 @@ static void free_buf_chain(struct buffer_desc *buf, u32 phys)
320 317
321 buf1 = buf->next; 318 buf1 = buf->next;
322 phys1 = buf->phys_next; 319 phys1 = buf->phys_next;
320 dma_unmap_single(dev, buf->phys_next, buf->buf_len, buf->dir);
323 dma_pool_free(buffer_pool, buf, phys); 321 dma_pool_free(buffer_pool, buf, phys);
324 buf = buf1; 322 buf = buf1;
325 phys = phys1; 323 phys = phys1;
@@ -348,7 +346,6 @@ static void one_packet(dma_addr_t phys)
348 struct crypt_ctl *crypt; 346 struct crypt_ctl *crypt;
349 struct ixp_ctx *ctx; 347 struct ixp_ctx *ctx;
350 int failed; 348 int failed;
351 enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
352 349
353 failed = phys & 0x1 ? -EBADMSG : 0; 350 failed = phys & 0x1 ? -EBADMSG : 0;
354 phys &= ~0x3; 351 phys &= ~0x3;
@@ -358,13 +355,8 @@ static void one_packet(dma_addr_t phys)
358 case CTL_FLAG_PERFORM_AEAD: { 355 case CTL_FLAG_PERFORM_AEAD: {
359 struct aead_request *req = crypt->data.aead_req; 356 struct aead_request *req = crypt->data.aead_req;
360 struct aead_ctx *req_ctx = aead_request_ctx(req); 357 struct aead_ctx *req_ctx = aead_request_ctx(req);
361 dma_unmap_sg(dev, req->assoc, req_ctx->assoc_nents,
362 DMA_TO_DEVICE);
363 dma_unmap_sg(dev, &req_ctx->ivlist, 1, DMA_BIDIRECTIONAL);
364 dma_unmap_sg(dev, req->src, req_ctx->src_nents,
365 DMA_BIDIRECTIONAL);
366 358
367 free_buf_chain(req_ctx->buffer, crypt->src_buf); 359 free_buf_chain(dev, req_ctx->buffer, crypt->src_buf);
368 if (req_ctx->hmac_virt) { 360 if (req_ctx->hmac_virt) {
369 finish_scattered_hmac(crypt); 361 finish_scattered_hmac(crypt);
370 } 362 }
@@ -374,16 +366,11 @@ static void one_packet(dma_addr_t phys)
374 case CTL_FLAG_PERFORM_ABLK: { 366 case CTL_FLAG_PERFORM_ABLK: {
375 struct ablkcipher_request *req = crypt->data.ablk_req; 367 struct ablkcipher_request *req = crypt->data.ablk_req;
376 struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req); 368 struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
377 int nents; 369
378 if (req_ctx->dst) { 370 if (req_ctx->dst) {
379 nents = req_ctx->dst_nents; 371 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
380 dma_unmap_sg(dev, req->dst, nents, DMA_FROM_DEVICE);
381 free_buf_chain(req_ctx->dst, crypt->dst_buf);
382 src_direction = DMA_TO_DEVICE;
383 } 372 }
384 nents = req_ctx->src_nents; 373 free_buf_chain(dev, req_ctx->src, crypt->src_buf);
385 dma_unmap_sg(dev, req->src, nents, src_direction);
386 free_buf_chain(req_ctx->src, crypt->src_buf);
387 req->base.complete(&req->base, failed); 374 req->base.complete(&req->base, failed);
388 break; 375 break;
389 } 376 }
@@ -750,56 +737,35 @@ static int setup_cipher(struct crypto_tfm *tfm, int encrypt,
750 return 0; 737 return 0;
751} 738}
752 739
753static int count_sg(struct scatterlist *sg, int nbytes) 740static struct buffer_desc *chainup_buffers(struct device *dev,
741 struct scatterlist *sg, unsigned nbytes,
742 struct buffer_desc *buf, gfp_t flags,
743 enum dma_data_direction dir)
754{ 744{
755 int i; 745 for (;nbytes > 0; sg = scatterwalk_sg_next(sg)) {
756 for (i = 0; nbytes > 0; i++, sg = sg_next(sg)) 746 unsigned len = min(nbytes, sg->length);
757 nbytes -= sg->length;
758 return i;
759}
760
761static struct buffer_desc *chainup_buffers(struct scatterlist *sg,
762 unsigned nbytes, struct buffer_desc *buf, gfp_t flags)
763{
764 int nents = 0;
765
766 while (nbytes > 0) {
767 struct buffer_desc *next_buf; 747 struct buffer_desc *next_buf;
768 u32 next_buf_phys; 748 u32 next_buf_phys;
769 unsigned len = min(nbytes, sg_dma_len(sg)); 749 void *ptr;
770 750
771 nents++;
772 nbytes -= len; 751 nbytes -= len;
773 if (!buf->phys_addr) { 752 ptr = page_address(sg_page(sg)) + sg->offset;
774 buf->phys_addr = sg_dma_address(sg);
775 buf->buf_len = len;
776 buf->next = NULL;
777 buf->phys_next = 0;
778 goto next;
779 }
780 /* Two consecutive chunks on one page may be handled by the old
781 * buffer descriptor, increased by the length of the new one
782 */
783 if (sg_dma_address(sg) == buf->phys_addr + buf->buf_len) {
784 buf->buf_len += len;
785 goto next;
786 }
787 next_buf = dma_pool_alloc(buffer_pool, flags, &next_buf_phys); 753 next_buf = dma_pool_alloc(buffer_pool, flags, &next_buf_phys);
788 if (!next_buf) 754 if (!next_buf) {
789 return NULL; 755 buf = NULL;
756 break;
757 }
758 sg_dma_address(sg) = dma_map_single(dev, ptr, len, dir);
790 buf->next = next_buf; 759 buf->next = next_buf;
791 buf->phys_next = next_buf_phys; 760 buf->phys_next = next_buf_phys;
792
793 buf = next_buf; 761 buf = next_buf;
794 buf->next = NULL; 762
795 buf->phys_next = 0;
796 buf->phys_addr = sg_dma_address(sg); 763 buf->phys_addr = sg_dma_address(sg);
797 buf->buf_len = len; 764 buf->buf_len = len;
798next: 765 buf->dir = dir;
799 if (nbytes > 0) {
800 sg = sg_next(sg);
801 }
802 } 766 }
767 buf->next = NULL;
768 buf->phys_next = 0;
803 return buf; 769 return buf;
804} 770}
805 771
@@ -860,12 +826,12 @@ static int ablk_perform(struct ablkcipher_request *req, int encrypt)
860 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); 826 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
861 struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm); 827 struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
862 unsigned ivsize = crypto_ablkcipher_ivsize(tfm); 828 unsigned ivsize = crypto_ablkcipher_ivsize(tfm);
863 int ret = -ENOMEM;
864 struct ix_sa_dir *dir; 829 struct ix_sa_dir *dir;
865 struct crypt_ctl *crypt; 830 struct crypt_ctl *crypt;
866 unsigned int nbytes = req->nbytes, nents; 831 unsigned int nbytes = req->nbytes;
867 enum dma_data_direction src_direction = DMA_BIDIRECTIONAL; 832 enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
868 struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req); 833 struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
834 struct buffer_desc src_hook;
869 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? 835 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
870 GFP_KERNEL : GFP_ATOMIC; 836 GFP_KERNEL : GFP_ATOMIC;
871 837
@@ -878,7 +844,7 @@ static int ablk_perform(struct ablkcipher_request *req, int encrypt)
878 844
879 crypt = get_crypt_desc(); 845 crypt = get_crypt_desc();
880 if (!crypt) 846 if (!crypt)
881 return ret; 847 return -ENOMEM;
882 848
883 crypt->data.ablk_req = req; 849 crypt->data.ablk_req = req;
884 crypt->crypto_ctx = dir->npe_ctx_phys; 850 crypt->crypto_ctx = dir->npe_ctx_phys;
@@ -891,53 +857,41 @@ static int ablk_perform(struct ablkcipher_request *req, int encrypt)
891 BUG_ON(ivsize && !req->info); 857 BUG_ON(ivsize && !req->info);
892 memcpy(crypt->iv, req->info, ivsize); 858 memcpy(crypt->iv, req->info, ivsize);
893 if (req->src != req->dst) { 859 if (req->src != req->dst) {
860 struct buffer_desc dst_hook;
894 crypt->mode |= NPE_OP_NOT_IN_PLACE; 861 crypt->mode |= NPE_OP_NOT_IN_PLACE;
895 nents = count_sg(req->dst, nbytes);
896 /* This was never tested by Intel 862 /* This was never tested by Intel
897 * for more than one dst buffer, I think. */ 863 * for more than one dst buffer, I think. */
898 BUG_ON(nents != 1); 864 BUG_ON(req->dst->length < nbytes);
899 req_ctx->dst_nents = nents; 865 req_ctx->dst = NULL;
900 dma_map_sg(dev, req->dst, nents, DMA_FROM_DEVICE); 866 if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
901 req_ctx->dst = dma_pool_alloc(buffer_pool, flags,&crypt->dst_buf); 867 flags, DMA_FROM_DEVICE))
902 if (!req_ctx->dst)
903 goto unmap_sg_dest;
904 req_ctx->dst->phys_addr = 0;
905 if (!chainup_buffers(req->dst, nbytes, req_ctx->dst, flags))
906 goto free_buf_dest; 868 goto free_buf_dest;
907 src_direction = DMA_TO_DEVICE; 869 src_direction = DMA_TO_DEVICE;
870 req_ctx->dst = dst_hook.next;
871 crypt->dst_buf = dst_hook.phys_next;
908 } else { 872 } else {
909 req_ctx->dst = NULL; 873 req_ctx->dst = NULL;
910 req_ctx->dst_nents = 0;
911 } 874 }
912 nents = count_sg(req->src, nbytes); 875 req_ctx->src = NULL;
913 req_ctx->src_nents = nents; 876 if (!chainup_buffers(dev, req->src, nbytes, &src_hook,
914 dma_map_sg(dev, req->src, nents, src_direction); 877 flags, src_direction))
915
916 req_ctx->src = dma_pool_alloc(buffer_pool, flags, &crypt->src_buf);
917 if (!req_ctx->src)
918 goto unmap_sg_src;
919 req_ctx->src->phys_addr = 0;
920 if (!chainup_buffers(req->src, nbytes, req_ctx->src, flags))
921 goto free_buf_src; 878 goto free_buf_src;
922 879
880 req_ctx->src = src_hook.next;
881 crypt->src_buf = src_hook.phys_next;
923 crypt->ctl_flags |= CTL_FLAG_PERFORM_ABLK; 882 crypt->ctl_flags |= CTL_FLAG_PERFORM_ABLK;
924 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt)); 883 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
925 BUG_ON(qmgr_stat_overflow(SEND_QID)); 884 BUG_ON(qmgr_stat_overflow(SEND_QID));
926 return -EINPROGRESS; 885 return -EINPROGRESS;
927 886
928free_buf_src: 887free_buf_src:
929 free_buf_chain(req_ctx->src, crypt->src_buf); 888 free_buf_chain(dev, req_ctx->src, crypt->src_buf);
930unmap_sg_src:
931 dma_unmap_sg(dev, req->src, req_ctx->src_nents, src_direction);
932free_buf_dest: 889free_buf_dest:
933 if (req->src != req->dst) { 890 if (req->src != req->dst) {
934 free_buf_chain(req_ctx->dst, crypt->dst_buf); 891 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
935unmap_sg_dest:
936 dma_unmap_sg(dev, req->src, req_ctx->dst_nents,
937 DMA_FROM_DEVICE);
938 } 892 }
939 crypt->ctl_flags = CTL_FLAG_UNUSED; 893 crypt->ctl_flags = CTL_FLAG_UNUSED;
940 return ret; 894 return -ENOMEM;
941} 895}
942 896
943static int ablk_encrypt(struct ablkcipher_request *req) 897static int ablk_encrypt(struct ablkcipher_request *req)
@@ -985,7 +939,7 @@ static int hmac_inconsistent(struct scatterlist *sg, unsigned start,
985 break; 939 break;
986 940
987 offset += sg->length; 941 offset += sg->length;
988 sg = sg_next(sg); 942 sg = scatterwalk_sg_next(sg);
989 } 943 }
990 return (start + nbytes > offset + sg->length); 944 return (start + nbytes > offset + sg->length);
991} 945}
@@ -997,11 +951,10 @@ static int aead_perform(struct aead_request *req, int encrypt,
997 struct ixp_ctx *ctx = crypto_aead_ctx(tfm); 951 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
998 unsigned ivsize = crypto_aead_ivsize(tfm); 952 unsigned ivsize = crypto_aead_ivsize(tfm);
999 unsigned authsize = crypto_aead_authsize(tfm); 953 unsigned authsize = crypto_aead_authsize(tfm);
1000 int ret = -ENOMEM;
1001 struct ix_sa_dir *dir; 954 struct ix_sa_dir *dir;
1002 struct crypt_ctl *crypt; 955 struct crypt_ctl *crypt;
1003 unsigned int cryptlen, nents; 956 unsigned int cryptlen;
1004 struct buffer_desc *buf; 957 struct buffer_desc *buf, src_hook;
1005 struct aead_ctx *req_ctx = aead_request_ctx(req); 958 struct aead_ctx *req_ctx = aead_request_ctx(req);
1006 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? 959 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
1007 GFP_KERNEL : GFP_ATOMIC; 960 GFP_KERNEL : GFP_ATOMIC;
@@ -1022,7 +975,7 @@ static int aead_perform(struct aead_request *req, int encrypt,
1022 } 975 }
1023 crypt = get_crypt_desc(); 976 crypt = get_crypt_desc();
1024 if (!crypt) 977 if (!crypt)
1025 return ret; 978 return -ENOMEM;
1026 979
1027 crypt->data.aead_req = req; 980 crypt->data.aead_req = req;
1028 crypt->crypto_ctx = dir->npe_ctx_phys; 981 crypt->crypto_ctx = dir->npe_ctx_phys;
@@ -1041,31 +994,27 @@ static int aead_perform(struct aead_request *req, int encrypt,
1041 BUG(); /* -ENOTSUP because of my lazyness */ 994 BUG(); /* -ENOTSUP because of my lazyness */
1042 } 995 }
1043 996
1044 req_ctx->buffer = dma_pool_alloc(buffer_pool, flags, &crypt->src_buf);
1045 if (!req_ctx->buffer)
1046 goto out;
1047 req_ctx->buffer->phys_addr = 0;
1048 /* ASSOC data */ 997 /* ASSOC data */
1049 nents = count_sg(req->assoc, req->assoclen); 998 buf = chainup_buffers(dev, req->assoc, req->assoclen, &src_hook,
1050 req_ctx->assoc_nents = nents; 999 flags, DMA_TO_DEVICE);
1051 dma_map_sg(dev, req->assoc, nents, DMA_TO_DEVICE); 1000 req_ctx->buffer = src_hook.next;
1052 buf = chainup_buffers(req->assoc, req->assoclen, req_ctx->buffer,flags); 1001 crypt->src_buf = src_hook.phys_next;
1053 if (!buf) 1002 if (!buf)
1054 goto unmap_sg_assoc; 1003 goto out;
1055 /* IV */ 1004 /* IV */
1056 sg_init_table(&req_ctx->ivlist, 1); 1005 sg_init_table(&req_ctx->ivlist, 1);
1057 sg_set_buf(&req_ctx->ivlist, iv, ivsize); 1006 sg_set_buf(&req_ctx->ivlist, iv, ivsize);
1058 dma_map_sg(dev, &req_ctx->ivlist, 1, DMA_BIDIRECTIONAL); 1007 buf = chainup_buffers(dev, &req_ctx->ivlist, ivsize, buf, flags,
1059 buf = chainup_buffers(&req_ctx->ivlist, ivsize, buf, flags); 1008 DMA_BIDIRECTIONAL);
1060 if (!buf) 1009 if (!buf)
1061 goto unmap_sg_iv; 1010 goto free_chain;
1062 if (unlikely(hmac_inconsistent(req->src, cryptlen, authsize))) { 1011 if (unlikely(hmac_inconsistent(req->src, cryptlen, authsize))) {
1063 /* The 12 hmac bytes are scattered, 1012 /* The 12 hmac bytes are scattered,
1064 * we need to copy them into a safe buffer */ 1013 * we need to copy them into a safe buffer */
1065 req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags, 1014 req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
1066 &crypt->icv_rev_aes); 1015 &crypt->icv_rev_aes);
1067 if (unlikely(!req_ctx->hmac_virt)) 1016 if (unlikely(!req_ctx->hmac_virt))
1068 goto unmap_sg_iv; 1017 goto free_chain;
1069 if (!encrypt) { 1018 if (!encrypt) {
1070 scatterwalk_map_and_copy(req_ctx->hmac_virt, 1019 scatterwalk_map_and_copy(req_ctx->hmac_virt,
1071 req->src, cryptlen, authsize, 0); 1020 req->src, cryptlen, authsize, 0);
@@ -1075,33 +1024,28 @@ static int aead_perform(struct aead_request *req, int encrypt,
1075 req_ctx->hmac_virt = NULL; 1024 req_ctx->hmac_virt = NULL;
1076 } 1025 }
1077 /* Crypt */ 1026 /* Crypt */
1078 nents = count_sg(req->src, cryptlen + authsize); 1027 buf = chainup_buffers(dev, req->src, cryptlen + authsize, buf, flags,
1079 req_ctx->src_nents = nents; 1028 DMA_BIDIRECTIONAL);
1080 dma_map_sg(dev, req->src, nents, DMA_BIDIRECTIONAL);
1081 buf = chainup_buffers(req->src, cryptlen + authsize, buf, flags);
1082 if (!buf) 1029 if (!buf)
1083 goto unmap_sg_src; 1030 goto free_hmac_virt;
1084 if (!req_ctx->hmac_virt) { 1031 if (!req_ctx->hmac_virt) {
1085 crypt->icv_rev_aes = buf->phys_addr + buf->buf_len - authsize; 1032 crypt->icv_rev_aes = buf->phys_addr + buf->buf_len - authsize;
1086 } 1033 }
1034
1087 crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD; 1035 crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD;
1088 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt)); 1036 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
1089 BUG_ON(qmgr_stat_overflow(SEND_QID)); 1037 BUG_ON(qmgr_stat_overflow(SEND_QID));
1090 return -EINPROGRESS; 1038 return -EINPROGRESS;
1091unmap_sg_src: 1039free_hmac_virt:
1092 dma_unmap_sg(dev, req->src, req_ctx->src_nents, DMA_BIDIRECTIONAL);
1093 if (req_ctx->hmac_virt) { 1040 if (req_ctx->hmac_virt) {
1094 dma_pool_free(buffer_pool, req_ctx->hmac_virt, 1041 dma_pool_free(buffer_pool, req_ctx->hmac_virt,
1095 crypt->icv_rev_aes); 1042 crypt->icv_rev_aes);
1096 } 1043 }
1097unmap_sg_iv: 1044free_chain:
1098 dma_unmap_sg(dev, &req_ctx->ivlist, 1, DMA_BIDIRECTIONAL); 1045 free_buf_chain(dev, req_ctx->buffer, crypt->src_buf);
1099unmap_sg_assoc:
1100 dma_unmap_sg(dev, req->assoc, req_ctx->assoc_nents, DMA_TO_DEVICE);
1101 free_buf_chain(req_ctx->buffer, crypt->src_buf);
1102out: 1046out:
1103 crypt->ctl_flags = CTL_FLAG_UNUSED; 1047 crypt->ctl_flags = CTL_FLAG_UNUSED;
1104 return ret; 1048 return -ENOMEM;
1105} 1049}
1106 1050
1107static int aead_setup(struct crypto_aead *tfm, unsigned int authsize) 1051static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
diff --git a/include/linux/timeriomem-rng.h b/include/linux/timeriomem-rng.h
index dd253177f65f..3e08a1c86830 100644
--- a/include/linux/timeriomem-rng.h
+++ b/include/linux/timeriomem-rng.h
@@ -14,7 +14,7 @@ struct timeriomem_rng_data {
14 struct completion completion; 14 struct completion completion;
15 unsigned int present:1; 15 unsigned int present:1;
16 16
17 u32 __iomem *address; 17 void __iomem *address;
18 18
19 /* measures in usecs */ 19 /* measures in usecs */
20 unsigned int period; 20 unsigned int period;