summaryrefslogtreecommitdiffstats
path: root/crypto/algif_aead.c
diff options
context:
space:
mode:
authorStephan Mueller <smueller@chronox.de>2017-07-30 08:32:58 -0400
committerHerbert Xu <herbert@gondor.apana.org.au>2017-08-09 08:17:52 -0400
commit72548b093ee38a6d4f2a19e6ef1948ae05c181f7 (patch)
tree630095bdb72ca2c0e616f7accea0f0d826d3f2c0 /crypto/algif_aead.c
parent5703c826b758e0b33c998739af093879979315b8 (diff)
crypto: algif_aead - copy AAD from src to dst
Use the NULL cipher to copy the AAD and PT/CT from the TX SGL to the RX SGL. This allows an in-place crypto operation on the RX SGL for encryption, because the TX data is always smaller or equal to the RX data (the RX data will hold the tag). For decryption, a per-request TX SGL is created which will only hold the tag value. As the RX SGL will have no space for the tag value and an in-place operation will not write the tag buffer, the TX SGL with the tag value is chained to the RX SGL. This now allows an in-place crypto operation. For example: * without the patch: kcapi -x 2 -e -c "gcm(aes)" -p 89154d0d4129d322e4487bafaa4f6b46 -k c0ece3e63198af382b5603331cc23fa8 -i 7e489b83622e7228314d878d -a afcd7202d621e06ca53b70c2bdff7fb2 -l 16 -u -s 00000000000000000000000000000000f4a3eacfbdadd3b1a17117b1d67ffc1f1e21efbbc6d83724a8c296e3bb8cda0c * with the patch: kcapi -x 2 -e -c "gcm(aes)" -p 89154d0d4129d322e4487bafaa4f6b46 -k c0ece3e63198af382b5603331cc23fa8 -i 7e489b83622e7228314d878d -a afcd7202d621e06ca53b70c2bdff7fb2 -l 16 -u -s afcd7202d621e06ca53b70c2bdff7fb2f4a3eacfbdadd3b1a17117b1d67ffc1f1e21efbbc6d83724a8c296e3bb8cda0c Tests covering this functionality have been added to libkcapi. Signed-off-by: Stephan Mueller <smueller@chronox.de> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'crypto/algif_aead.c')
-rw-r--r--crypto/algif_aead.c183
1 files changed, 160 insertions, 23 deletions
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
index 2de056c3139c..1f0696dd64f4 100644
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -30,6 +30,8 @@
30#include <crypto/internal/aead.h> 30#include <crypto/internal/aead.h>
31#include <crypto/scatterwalk.h> 31#include <crypto/scatterwalk.h>
32#include <crypto/if_alg.h> 32#include <crypto/if_alg.h>
33#include <crypto/skcipher.h>
34#include <crypto/null.h>
33#include <linux/init.h> 35#include <linux/init.h>
34#include <linux/list.h> 36#include <linux/list.h>
35#include <linux/kernel.h> 37#include <linux/kernel.h>
@@ -70,6 +72,7 @@ struct aead_async_req {
70struct aead_tfm { 72struct aead_tfm {
71 struct crypto_aead *aead; 73 struct crypto_aead *aead;
72 bool has_key; 74 bool has_key;
75 struct crypto_skcipher *null_tfm;
73}; 76};
74 77
75struct aead_ctx { 78struct aead_ctx {
@@ -168,7 +171,12 @@ static int aead_alloc_tsgl(struct sock *sk)
168 return 0; 171 return 0;
169} 172}
170 173
171static unsigned int aead_count_tsgl(struct sock *sk, size_t bytes) 174/**
175 * Count number of SG entries from the beginning of the SGL to @bytes. If
176 * an offset is provided, the counting of the SG entries starts at the offset.
177 */
178static unsigned int aead_count_tsgl(struct sock *sk, size_t bytes,
179 size_t offset)
172{ 180{
173 struct alg_sock *ask = alg_sk(sk); 181 struct alg_sock *ask = alg_sk(sk);
174 struct aead_ctx *ctx = ask->private; 182 struct aead_ctx *ctx = ask->private;
@@ -183,32 +191,55 @@ static unsigned int aead_count_tsgl(struct sock *sk, size_t bytes)
183 struct scatterlist *sg = sgl->sg; 191 struct scatterlist *sg = sgl->sg;
184 192
185 for (i = 0; i < sgl->cur; i++) { 193 for (i = 0; i < sgl->cur; i++) {
194 size_t bytes_count;
195
196 /* Skip offset */
197 if (offset >= sg[i].length) {
198 offset -= sg[i].length;
199 bytes -= sg[i].length;
200 continue;
201 }
202
203 bytes_count = sg[i].length - offset;
204
205 offset = 0;
186 sgl_count++; 206 sgl_count++;
187 if (sg[i].length >= bytes) 207
208 /* If we have seen requested number of bytes, stop */
209 if (bytes_count >= bytes)
188 return sgl_count; 210 return sgl_count;
189 211
190 bytes -= sg[i].length; 212 bytes -= bytes_count;
191 } 213 }
192 } 214 }
193 215
194 return sgl_count; 216 return sgl_count;
195} 217}
196 218
219/**
220 * Release the specified buffers from TX SGL pointed to by ctx->tsgl_list for
221 * @used bytes.
222 *
223 * If @dst is non-null, reassign the pages to dst. The caller must release
224 * the pages. If @dst_offset is given only reassign the pages to @dst starting
225 * at the @dst_offset (byte). The caller must ensure that @dst is large
226 * enough (e.g. by using aead_count_tsgl with the same offset).
227 */
197static void aead_pull_tsgl(struct sock *sk, size_t used, 228static void aead_pull_tsgl(struct sock *sk, size_t used,
198 struct scatterlist *dst) 229 struct scatterlist *dst, size_t dst_offset)
199{ 230{
200 struct alg_sock *ask = alg_sk(sk); 231 struct alg_sock *ask = alg_sk(sk);
201 struct aead_ctx *ctx = ask->private; 232 struct aead_ctx *ctx = ask->private;
202 struct aead_tsgl *sgl; 233 struct aead_tsgl *sgl;
203 struct scatterlist *sg; 234 struct scatterlist *sg;
204 unsigned int i; 235 unsigned int i, j;
205 236
206 while (!list_empty(&ctx->tsgl_list)) { 237 while (!list_empty(&ctx->tsgl_list)) {
207 sgl = list_first_entry(&ctx->tsgl_list, struct aead_tsgl, 238 sgl = list_first_entry(&ctx->tsgl_list, struct aead_tsgl,
208 list); 239 list);
209 sg = sgl->sg; 240 sg = sgl->sg;
210 241
211 for (i = 0; i < sgl->cur; i++) { 242 for (i = 0, j = 0; i < sgl->cur; i++) {
212 size_t plen = min_t(size_t, used, sg[i].length); 243 size_t plen = min_t(size_t, used, sg[i].length);
213 struct page *page = sg_page(sg + i); 244 struct page *page = sg_page(sg + i);
214 245
@@ -219,8 +250,20 @@ static void aead_pull_tsgl(struct sock *sk, size_t used,
219 * Assumption: caller created aead_count_tsgl(len) 250 * Assumption: caller created aead_count_tsgl(len)
220 * SG entries in dst. 251 * SG entries in dst.
221 */ 252 */
222 if (dst) 253 if (dst) {
223 sg_set_page(dst + i, page, plen, sg[i].offset); 254 if (dst_offset >= plen) {
255 /* discard page before offset */
256 dst_offset -= plen;
257 put_page(page);
258 } else {
259 /* reassign page to dst after offset */
260 sg_set_page(dst + j, page,
261 plen - dst_offset,
262 sg[i].offset + dst_offset);
263 dst_offset = 0;
264 j++;
265 }
266 }
224 267
225 sg[i].length -= plen; 268 sg[i].length -= plen;
226 sg[i].offset += plen; 269 sg[i].offset += plen;
@@ -233,6 +276,7 @@ static void aead_pull_tsgl(struct sock *sk, size_t used,
233 276
234 if (!dst) 277 if (!dst)
235 put_page(page); 278 put_page(page);
279
236 sg_assign_page(sg + i, NULL); 280 sg_assign_page(sg + i, NULL);
237 } 281 }
238 282
@@ -583,6 +627,20 @@ static void aead_async_cb(struct crypto_async_request *_req, int err)
583 release_sock(sk); 627 release_sock(sk);
584} 628}
585 629
630static int crypto_aead_copy_sgl(struct crypto_skcipher *null_tfm,
631 struct scatterlist *src,
632 struct scatterlist *dst, unsigned int len)
633{
634 SKCIPHER_REQUEST_ON_STACK(skreq, null_tfm);
635
636 skcipher_request_set_tfm(skreq, null_tfm);
637 skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_BACKLOG,
638 NULL, NULL);
639 skcipher_request_set_crypt(skreq, src, dst, len, NULL);
640
641 return crypto_skcipher_encrypt(skreq);
642}
643
586static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, 644static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
587 size_t ignored, int flags) 645 size_t ignored, int flags)
588{ 646{
@@ -593,11 +651,14 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
593 struct aead_ctx *ctx = ask->private; 651 struct aead_ctx *ctx = ask->private;
594 struct aead_tfm *aeadc = pask->private; 652 struct aead_tfm *aeadc = pask->private;
595 struct crypto_aead *tfm = aeadc->aead; 653 struct crypto_aead *tfm = aeadc->aead;
654 struct crypto_skcipher *null_tfm = aeadc->null_tfm;
596 unsigned int as = crypto_aead_authsize(tfm); 655 unsigned int as = crypto_aead_authsize(tfm);
597 unsigned int areqlen = 656 unsigned int areqlen =
598 sizeof(struct aead_async_req) + crypto_aead_reqsize(tfm); 657 sizeof(struct aead_async_req) + crypto_aead_reqsize(tfm);
599 struct aead_async_req *areq; 658 struct aead_async_req *areq;
600 struct aead_rsgl *last_rsgl = NULL; 659 struct aead_rsgl *last_rsgl = NULL;
660 struct aead_tsgl *tsgl;
661 struct scatterlist *src;
601 int err = 0; 662 int err = 0;
602 size_t used = 0; /* [in] TX bufs to be en/decrypted */ 663 size_t used = 0; /* [in] TX bufs to be en/decrypted */
603 size_t outlen = 0; /* [out] RX bufs produced by kernel */ 664 size_t outlen = 0; /* [out] RX bufs produced by kernel */
@@ -716,25 +777,91 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
716 outlen -= less; 777 outlen -= less;
717 } 778 }
718 779
780 processed = used + ctx->aead_assoclen;
781 tsgl = list_first_entry(&ctx->tsgl_list, struct aead_tsgl, list);
782
719 /* 783 /*
720 * Create a per request TX SGL for this request which tracks the 784 * Copy of AAD from source to destination
721 * SG entries from the global TX SGL. 785 *
786 * The AAD is copied to the destination buffer without change. Even
787 * when user space uses an in-place cipher operation, the kernel
788 * will copy the data as it does not see whether such in-place operation
789 * is initiated.
790 *
791 * To ensure efficiency, the following implementation ensure that the
792 * ciphers are invoked to perform a crypto operation in-place. This
793 * is achieved by memory management specified as follows.
722 */ 794 */
723 processed = used + ctx->aead_assoclen; 795
724 areq->tsgl_entries = aead_count_tsgl(sk, processed); 796 /* Use the RX SGL as source (and destination) for crypto op. */
725 if (!areq->tsgl_entries) 797 src = areq->first_rsgl.sgl.sg;
726 areq->tsgl_entries = 1; 798
727 areq->tsgl = sock_kmalloc(sk, sizeof(*areq->tsgl) * areq->tsgl_entries, 799 if (ctx->enc) {
728 GFP_KERNEL); 800 /*
729 if (!areq->tsgl) { 801 * Encryption operation - The in-place cipher operation is
730 err = -ENOMEM; 802 * achieved by the following operation:
731 goto free; 803 *
804 * TX SGL: AAD || PT || Tag
805 * | |
806 * | copy |
807 * v v
808 * RX SGL: AAD || PT
809 */
810 err = crypto_aead_copy_sgl(null_tfm, tsgl->sg,
811 areq->first_rsgl.sgl.sg, processed);
812 if (err)
813 goto free;
814 aead_pull_tsgl(sk, processed, NULL, 0);
815 } else {
816 /*
817 * Decryption operation - To achieve an in-place cipher
818 * operation, the following SGL structure is used:
819 *
820 * TX SGL: AAD || CT || Tag
821 * | | ^
822 * | copy | | Create SGL link.
823 * v v |
824 * RX SGL: AAD || CT ----+
825 */
826
827 /* Copy AAD || CT to RX SGL buffer for in-place operation. */
828 err = crypto_aead_copy_sgl(null_tfm, tsgl->sg,
829 areq->first_rsgl.sgl.sg, outlen);
830 if (err)
831 goto free;
832
833 /* Create TX SGL for tag and chain it to RX SGL. */
834 areq->tsgl_entries = aead_count_tsgl(sk, processed,
835 processed - as);
836 if (!areq->tsgl_entries)
837 areq->tsgl_entries = 1;
838 areq->tsgl = sock_kmalloc(sk, sizeof(*areq->tsgl) *
839 areq->tsgl_entries,
840 GFP_KERNEL);
841 if (!areq->tsgl) {
842 err = -ENOMEM;
843 goto free;
844 }
845 sg_init_table(areq->tsgl, areq->tsgl_entries);
846
847 /* Release TX SGL, except for tag data and reassign tag data. */
848 aead_pull_tsgl(sk, processed, areq->tsgl, processed - as);
849
850 /* chain the areq TX SGL holding the tag with RX SGL */
851 if (last_rsgl) {
852 /* RX SGL present */
853 struct af_alg_sgl *sgl_prev = &last_rsgl->sgl;
854
855 sg_unmark_end(sgl_prev->sg + sgl_prev->npages - 1);
856 sg_chain(sgl_prev->sg, sgl_prev->npages + 1,
857 areq->tsgl);
858 } else
859 /* no RX SGL present (e.g. authentication only) */
860 src = areq->tsgl;
732 } 861 }
733 sg_init_table(areq->tsgl, areq->tsgl_entries);
734 aead_pull_tsgl(sk, processed, areq->tsgl);
735 862
736 /* Initialize the crypto operation */ 863 /* Initialize the crypto operation */
737 aead_request_set_crypt(&areq->aead_req, areq->tsgl, 864 aead_request_set_crypt(&areq->aead_req, src,
738 areq->first_rsgl.sgl.sg, used, ctx->iv); 865 areq->first_rsgl.sgl.sg, used, ctx->iv);
739 aead_request_set_ad(&areq->aead_req, ctx->aead_assoclen); 866 aead_request_set_ad(&areq->aead_req, ctx->aead_assoclen);
740 aead_request_set_tfm(&areq->aead_req, tfm); 867 aead_request_set_tfm(&areq->aead_req, tfm);
@@ -951,6 +1078,7 @@ static void *aead_bind(const char *name, u32 type, u32 mask)
951{ 1078{
952 struct aead_tfm *tfm; 1079 struct aead_tfm *tfm;
953 struct crypto_aead *aead; 1080 struct crypto_aead *aead;
1081 struct crypto_skcipher *null_tfm;
954 1082
955 tfm = kzalloc(sizeof(*tfm), GFP_KERNEL); 1083 tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
956 if (!tfm) 1084 if (!tfm)
@@ -962,7 +1090,15 @@ static void *aead_bind(const char *name, u32 type, u32 mask)
962 return ERR_CAST(aead); 1090 return ERR_CAST(aead);
963 } 1091 }
964 1092
1093 null_tfm = crypto_get_default_null_skcipher2();
1094 if (IS_ERR(null_tfm)) {
1095 crypto_free_aead(aead);
1096 kfree(tfm);
1097 return ERR_CAST(null_tfm);
1098 }
1099
965 tfm->aead = aead; 1100 tfm->aead = aead;
1101 tfm->null_tfm = null_tfm;
966 1102
967 return tfm; 1103 return tfm;
968} 1104}
@@ -1003,7 +1139,8 @@ static void aead_sock_destruct(struct sock *sk)
1003 struct crypto_aead *tfm = aeadc->aead; 1139 struct crypto_aead *tfm = aeadc->aead;
1004 unsigned int ivlen = crypto_aead_ivsize(tfm); 1140 unsigned int ivlen = crypto_aead_ivsize(tfm);
1005 1141
1006 aead_pull_tsgl(sk, ctx->used, NULL); 1142 aead_pull_tsgl(sk, ctx->used, NULL, 0);
1143 crypto_put_default_null_skcipher2();
1007 sock_kzfree_s(sk, ctx->iv, ivlen); 1144 sock_kzfree_s(sk, ctx->iv, ivlen);
1008 sock_kfree_s(sk, ctx, ctx->len); 1145 sock_kfree_s(sk, ctx, ctx->len);
1009 af_alg_release_parent(sk); 1146 af_alg_release_parent(sk);