aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc
diff options
context:
space:
mode:
authorKevin Coffman <kwc@citi.umich.edu>2010-03-17 13:03:00 -0400
committerTrond Myklebust <Trond.Myklebust@netapp.com>2010-05-14 15:09:19 -0400
commit934a95aa1c9c6ad77838800b79c306e982437605 (patch)
tree0f7000ffce214a156737fddc127fb0af238dfcff /net/sunrpc
parentde9c17eb4a912c9028f7b470eb80815144883b26 (diff)
gss_krb5: add remaining pieces to enable AES encryption support
Add the remaining pieces to enable support for Kerberos AES encryption types. Signed-off-by: Kevin Coffman <kwc@citi.umich.edu> Signed-off-by: Steve Dickson <steved@redhat.com> Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'net/sunrpc')
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_crypto.c248
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_keys.c30
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_mech.c86
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_wrap.c6
4 files changed, 358 insertions, 12 deletions
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index ca52ac28a537..967484a914f3 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -41,6 +41,7 @@
41#include <linux/crypto.h> 41#include <linux/crypto.h>
42#include <linux/highmem.h> 42#include <linux/highmem.h>
43#include <linux/pagemap.h> 43#include <linux/pagemap.h>
44#include <linux/random.h>
44#include <linux/sunrpc/gss_krb5.h> 45#include <linux/sunrpc/gss_krb5.h>
45#include <linux/sunrpc/xdr.h> 46#include <linux/sunrpc/xdr.h>
46 47
@@ -478,3 +479,250 @@ xdr_extend_head(struct xdr_buf *buf, unsigned int base, unsigned int shiftlen)
478 479
479 return 0; 480 return 0;
480} 481}
482
483static u32
484gss_krb5_cts_crypt(struct crypto_blkcipher *cipher, struct xdr_buf *buf,
485 u32 offset, u8 *iv, struct page **pages, int encrypt)
486{
487 u32 ret;
488 struct scatterlist sg[1];
489 struct blkcipher_desc desc = { .tfm = cipher, .info = iv };
490 u8 data[crypto_blkcipher_blocksize(cipher) * 2];
491 struct page **save_pages;
492 u32 len = buf->len - offset;
493
494 BUG_ON(len > crypto_blkcipher_blocksize(cipher) * 2);
495
496 /*
497 * For encryption, we want to read from the cleartext
498 * page cache pages, and write the encrypted data to
499 * the supplied xdr_buf pages.
500 */
501 save_pages = buf->pages;
502 if (encrypt)
503 buf->pages = pages;
504
505 ret = read_bytes_from_xdr_buf(buf, offset, data, len);
506 buf->pages = save_pages;
507 if (ret)
508 goto out;
509
510 sg_init_one(sg, data, len);
511
512 if (encrypt)
513 ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, len);
514 else
515 ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, len);
516
517 if (ret)
518 goto out;
519
520 ret = write_bytes_to_xdr_buf(buf, offset, data, len);
521
522out:
523 return ret;
524}
525
526u32
527gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
528 struct xdr_buf *buf, int ec, struct page **pages)
529{
530 u32 err;
531 struct xdr_netobj hmac;
532 u8 *cksumkey;
533 u8 *ecptr;
534 struct crypto_blkcipher *cipher, *aux_cipher;
535 int blocksize;
536 struct page **save_pages;
537 int nblocks, nbytes;
538 struct encryptor_desc desc;
539 u32 cbcbytes;
540
541 if (kctx->initiate) {
542 cipher = kctx->initiator_enc;
543 aux_cipher = kctx->initiator_enc_aux;
544 cksumkey = kctx->initiator_integ;
545 } else {
546 cipher = kctx->acceptor_enc;
547 aux_cipher = kctx->acceptor_enc_aux;
548 cksumkey = kctx->acceptor_integ;
549 }
550 blocksize = crypto_blkcipher_blocksize(cipher);
551
552 /* hide the gss token header and insert the confounder */
553 offset += GSS_KRB5_TOK_HDR_LEN;
554 if (xdr_extend_head(buf, offset, blocksize))
555 return GSS_S_FAILURE;
556 gss_krb5_make_confounder(buf->head[0].iov_base + offset, blocksize);
557 offset -= GSS_KRB5_TOK_HDR_LEN;
558
559 if (buf->tail[0].iov_base != NULL) {
560 ecptr = buf->tail[0].iov_base + buf->tail[0].iov_len;
561 } else {
562 buf->tail[0].iov_base = buf->head[0].iov_base
563 + buf->head[0].iov_len;
564 buf->tail[0].iov_len = 0;
565 ecptr = buf->tail[0].iov_base;
566 }
567
568 memset(ecptr, 'X', ec);
569 buf->tail[0].iov_len += ec;
570 buf->len += ec;
571
572 /* copy plaintext gss token header after filler (if any) */
573 memcpy(ecptr + ec, buf->head[0].iov_base + offset,
574 GSS_KRB5_TOK_HDR_LEN);
575 buf->tail[0].iov_len += GSS_KRB5_TOK_HDR_LEN;
576 buf->len += GSS_KRB5_TOK_HDR_LEN;
577
578 /* Do the HMAC */
579 hmac.len = GSS_KRB5_MAX_CKSUM_LEN;
580 hmac.data = buf->tail[0].iov_base + buf->tail[0].iov_len;
581
582 /*
583 * When we are called, pages points to the real page cache
584 * data -- which we can't go and encrypt! buf->pages points
585 * to scratch pages which we are going to send off to the
586 * client/server. Swap in the plaintext pages to calculate
587 * the hmac.
588 */
589 save_pages = buf->pages;
590 buf->pages = pages;
591
592 err = make_checksum_v2(kctx, NULL, 0, buf,
593 offset + GSS_KRB5_TOK_HDR_LEN, cksumkey, &hmac);
594 buf->pages = save_pages;
595 if (err)
596 return GSS_S_FAILURE;
597
598 nbytes = buf->len - offset - GSS_KRB5_TOK_HDR_LEN;
599 nblocks = (nbytes + blocksize - 1) / blocksize;
600 cbcbytes = 0;
601 if (nblocks > 2)
602 cbcbytes = (nblocks - 2) * blocksize;
603
604 memset(desc.iv, 0, sizeof(desc.iv));
605
606 if (cbcbytes) {
607 desc.pos = offset + GSS_KRB5_TOK_HDR_LEN;
608 desc.fragno = 0;
609 desc.fraglen = 0;
610 desc.pages = pages;
611 desc.outbuf = buf;
612 desc.desc.info = desc.iv;
613 desc.desc.flags = 0;
614 desc.desc.tfm = aux_cipher;
615
616 sg_init_table(desc.infrags, 4);
617 sg_init_table(desc.outfrags, 4);
618
619 err = xdr_process_buf(buf, offset + GSS_KRB5_TOK_HDR_LEN,
620 cbcbytes, encryptor, &desc);
621 if (err)
622 goto out_err;
623 }
624
625 /* Make sure IV carries forward from any CBC results. */
626 err = gss_krb5_cts_crypt(cipher, buf,
627 offset + GSS_KRB5_TOK_HDR_LEN + cbcbytes,
628 desc.iv, pages, 1);
629 if (err) {
630 err = GSS_S_FAILURE;
631 goto out_err;
632 }
633
634 /* Now update buf to account for HMAC */
635 buf->tail[0].iov_len += kctx->gk5e->cksumlength;
636 buf->len += kctx->gk5e->cksumlength;
637
638out_err:
639 if (err)
640 err = GSS_S_FAILURE;
641 return err;
642}
643
644u32
645gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf,
646 u32 *headskip, u32 *tailskip)
647{
648 struct xdr_buf subbuf;
649 u32 ret = 0;
650 u8 *cksum_key;
651 struct crypto_blkcipher *cipher, *aux_cipher;
652 struct xdr_netobj our_hmac_obj;
653 u8 our_hmac[GSS_KRB5_MAX_CKSUM_LEN];
654 u8 pkt_hmac[GSS_KRB5_MAX_CKSUM_LEN];
655 int nblocks, blocksize, cbcbytes;
656 struct decryptor_desc desc;
657
658 if (kctx->initiate) {
659 cipher = kctx->acceptor_enc;
660 aux_cipher = kctx->acceptor_enc_aux;
661 cksum_key = kctx->acceptor_integ;
662 } else {
663 cipher = kctx->initiator_enc;
664 aux_cipher = kctx->initiator_enc_aux;
665 cksum_key = kctx->initiator_integ;
666 }
667 blocksize = crypto_blkcipher_blocksize(cipher);
668
669
670 /* create a segment skipping the header and leaving out the checksum */
671 xdr_buf_subsegment(buf, &subbuf, offset + GSS_KRB5_TOK_HDR_LEN,
672 (buf->len - offset - GSS_KRB5_TOK_HDR_LEN -
673 kctx->gk5e->cksumlength));
674
675 nblocks = (subbuf.len + blocksize - 1) / blocksize;
676
677 cbcbytes = 0;
678 if (nblocks > 2)
679 cbcbytes = (nblocks - 2) * blocksize;
680
681 memset(desc.iv, 0, sizeof(desc.iv));
682
683 if (cbcbytes) {
684 desc.fragno = 0;
685 desc.fraglen = 0;
686 desc.desc.info = desc.iv;
687 desc.desc.flags = 0;
688 desc.desc.tfm = aux_cipher;
689
690 sg_init_table(desc.frags, 4);
691
692 ret = xdr_process_buf(&subbuf, 0, cbcbytes, decryptor, &desc);
693 if (ret)
694 goto out_err;
695 }
696
697 /* Make sure IV carries forward from any CBC results. */
698 ret = gss_krb5_cts_crypt(cipher, &subbuf, cbcbytes, desc.iv, NULL, 0);
699 if (ret)
700 goto out_err;
701
702
703 /* Calculate our hmac over the plaintext data */
704 our_hmac_obj.len = sizeof(our_hmac);
705 our_hmac_obj.data = our_hmac;
706
707 ret = make_checksum_v2(kctx, NULL, 0, &subbuf, 0,
708 cksum_key, &our_hmac_obj);
709 if (ret)
710 goto out_err;
711
712 /* Get the packet's hmac value */
713 ret = read_bytes_from_xdr_buf(buf, buf->len - kctx->gk5e->cksumlength,
714 pkt_hmac, kctx->gk5e->cksumlength);
715 if (ret)
716 goto out_err;
717
718 if (memcmp(pkt_hmac, our_hmac, kctx->gk5e->cksumlength) != 0) {
719 ret = GSS_S_BAD_SIG;
720 goto out_err;
721 }
722 *headskip = crypto_blkcipher_blocksize(cipher);
723 *tailskip = kctx->gk5e->cksumlength;
724out_err:
725 if (ret && ret != GSS_S_BAD_SIG)
726 ret = GSS_S_FAILURE;
727 return ret;
728}
diff --git a/net/sunrpc/auth_gss/gss_krb5_keys.c b/net/sunrpc/auth_gss/gss_krb5_keys.c
index d54668790f0c..33b87f04b30b 100644
--- a/net/sunrpc/auth_gss/gss_krb5_keys.c
+++ b/net/sunrpc/auth_gss/gss_krb5_keys.c
@@ -303,3 +303,33 @@ u32 gss_krb5_des3_make_key(const struct gss_krb5_enctype *gk5e,
303err_out: 303err_out:
304 return ret; 304 return ret;
305} 305}
306
307/*
308 * This is the aes key derivation postprocess function
309 */
310u32 gss_krb5_aes_make_key(const struct gss_krb5_enctype *gk5e,
311 struct xdr_netobj *randombits,
312 struct xdr_netobj *key)
313{
314 u32 ret = EINVAL;
315
316 if (key->len != 16 && key->len != 32) {
317 dprintk("%s: key->len is %d\n", __func__, key->len);
318 goto err_out;
319 }
320 if (randombits->len != 16 && randombits->len != 32) {
321 dprintk("%s: randombits->len is %d\n",
322 __func__, randombits->len);
323 goto err_out;
324 }
325 if (randombits->len != key->len) {
326 dprintk("%s: randombits->len is %d, key->len is %d\n",
327 __func__, randombits->len, key->len);
328 goto err_out;
329 }
330 memcpy(key->data, randombits->data, key->len);
331 ret = 0;
332err_out:
333 return ret;
334}
335
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index ce80f996758a..694ad77c86bf 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -91,6 +91,50 @@ static const struct gss_krb5_enctype supported_gss_krb5_enctypes[] = {
91 .cksumlength = 20, 91 .cksumlength = 20,
92 .keyed_cksum = 1, 92 .keyed_cksum = 1,
93 }, 93 },
94 /*
95 * AES128
96 */
97 {
98 .etype = ENCTYPE_AES128_CTS_HMAC_SHA1_96,
99 .ctype = CKSUMTYPE_HMAC_SHA1_96_AES128,
100 .name = "aes128-cts",
101 .encrypt_name = "cts(cbc(aes))",
102 .cksum_name = "hmac(sha1)",
103 .encrypt = krb5_encrypt,
104 .decrypt = krb5_decrypt,
105 .mk_key = gss_krb5_aes_make_key,
106 .encrypt_v2 = gss_krb5_aes_encrypt,
107 .decrypt_v2 = gss_krb5_aes_decrypt,
108 .signalg = -1,
109 .sealalg = -1,
110 .keybytes = 16,
111 .keylength = 16,
112 .blocksize = 16,
113 .cksumlength = 12,
114 .keyed_cksum = 1,
115 },
116 /*
117 * AES256
118 */
119 {
120 .etype = ENCTYPE_AES256_CTS_HMAC_SHA1_96,
121 .ctype = CKSUMTYPE_HMAC_SHA1_96_AES256,
122 .name = "aes256-cts",
123 .encrypt_name = "cts(cbc(aes))",
124 .cksum_name = "hmac(sha1)",
125 .encrypt = krb5_encrypt,
126 .decrypt = krb5_decrypt,
127 .mk_key = gss_krb5_aes_make_key,
128 .encrypt_v2 = gss_krb5_aes_encrypt,
129 .decrypt_v2 = gss_krb5_aes_decrypt,
130 .signalg = -1,
131 .sealalg = -1,
132 .keybytes = 32,
133 .keylength = 32,
134 .blocksize = 16,
135 .cksumlength = 12,
136 .keyed_cksum = 1,
137 },
94}; 138};
95 139
96static const int num_supported_enctypes = 140static const int num_supported_enctypes =
@@ -270,20 +314,19 @@ out_err:
270} 314}
271 315
272struct crypto_blkcipher * 316struct crypto_blkcipher *
273context_v2_alloc_cipher(struct krb5_ctx *ctx, u8 *key) 317context_v2_alloc_cipher(struct krb5_ctx *ctx, const char *cname, u8 *key)
274{ 318{
275 struct crypto_blkcipher *cp; 319 struct crypto_blkcipher *cp;
276 320
277 cp = crypto_alloc_blkcipher(ctx->gk5e->encrypt_name, 321 cp = crypto_alloc_blkcipher(cname, 0, CRYPTO_ALG_ASYNC);
278 0, CRYPTO_ALG_ASYNC);
279 if (IS_ERR(cp)) { 322 if (IS_ERR(cp)) {
280 dprintk("gss_kerberos_mech: unable to initialize " 323 dprintk("gss_kerberos_mech: unable to initialize "
281 "crypto algorithm %s\n", ctx->gk5e->encrypt_name); 324 "crypto algorithm %s\n", cname);
282 return NULL; 325 return NULL;
283 } 326 }
284 if (crypto_blkcipher_setkey(cp, key, ctx->gk5e->keylength)) { 327 if (crypto_blkcipher_setkey(cp, key, ctx->gk5e->keylength)) {
285 dprintk("gss_kerberos_mech: error setting key for " 328 dprintk("gss_kerberos_mech: error setting key for "
286 "crypto algorithm %s\n", ctx->gk5e->encrypt_name); 329 "crypto algorithm %s\n", cname);
287 crypto_free_blkcipher(cp); 330 crypto_free_blkcipher(cp);
288 return NULL; 331 return NULL;
289 } 332 }
@@ -315,11 +358,13 @@ context_derive_keys_des3(struct krb5_ctx *ctx, u8 *rawkey, u32 keylen)
315 keyout.len = keylen; 358 keyout.len = keylen;
316 359
317 /* seq uses the raw key */ 360 /* seq uses the raw key */
318 ctx->seq = context_v2_alloc_cipher(ctx, rawkey); 361 ctx->seq = context_v2_alloc_cipher(ctx, ctx->gk5e->encrypt_name,
362 rawkey);
319 if (ctx->seq == NULL) 363 if (ctx->seq == NULL)
320 goto out_err; 364 goto out_err;
321 365
322 ctx->enc = context_v2_alloc_cipher(ctx, rawkey); 366 ctx->enc = context_v2_alloc_cipher(ctx, ctx->gk5e->encrypt_name,
367 rawkey);
323 if (ctx->enc == NULL) 368 if (ctx->enc == NULL)
324 goto out_free_seq; 369 goto out_free_seq;
325 370
@@ -366,7 +411,9 @@ context_derive_keys_new(struct krb5_ctx *ctx, u8 *rawkey, u32 keylen)
366 __func__, err); 411 __func__, err);
367 goto out_err; 412 goto out_err;
368 } 413 }
369 ctx->initiator_enc = context_v2_alloc_cipher(ctx, ctx->initiator_seal); 414 ctx->initiator_enc = context_v2_alloc_cipher(ctx,
415 ctx->gk5e->encrypt_name,
416 ctx->initiator_seal);
370 if (ctx->initiator_enc == NULL) 417 if (ctx->initiator_enc == NULL)
371 goto out_err; 418 goto out_err;
372 419
@@ -379,7 +426,9 @@ context_derive_keys_new(struct krb5_ctx *ctx, u8 *rawkey, u32 keylen)
379 __func__, err); 426 __func__, err);
380 goto out_free_initiator_enc; 427 goto out_free_initiator_enc;
381 } 428 }
382 ctx->acceptor_enc = context_v2_alloc_cipher(ctx, ctx->acceptor_seal); 429 ctx->acceptor_enc = context_v2_alloc_cipher(ctx,
430 ctx->gk5e->encrypt_name,
431 ctx->acceptor_seal);
383 if (ctx->acceptor_enc == NULL) 432 if (ctx->acceptor_enc == NULL)
384 goto out_free_initiator_enc; 433 goto out_free_initiator_enc;
385 434
@@ -423,6 +472,23 @@ context_derive_keys_new(struct krb5_ctx *ctx, u8 *rawkey, u32 keylen)
423 goto out_free_acceptor_enc; 472 goto out_free_acceptor_enc;
424 } 473 }
425 474
475 switch (ctx->enctype) {
476 case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
477 case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
478 ctx->initiator_enc_aux =
479 context_v2_alloc_cipher(ctx, "cbc(aes)",
480 ctx->initiator_seal);
481 if (ctx->initiator_enc_aux == NULL)
482 goto out_free_acceptor_enc;
483 ctx->acceptor_enc_aux =
484 context_v2_alloc_cipher(ctx, "cbc(aes)",
485 ctx->acceptor_seal);
486 if (ctx->acceptor_enc_aux == NULL) {
487 crypto_free_blkcipher(ctx->initiator_enc_aux);
488 goto out_free_acceptor_enc;
489 }
490 }
491
426 return 0; 492 return 0;
427 493
428out_free_acceptor_enc: 494out_free_acceptor_enc:
@@ -537,6 +603,8 @@ gss_delete_sec_context_kerberos(void *internal_ctx) {
537 crypto_free_blkcipher(kctx->enc); 603 crypto_free_blkcipher(kctx->enc);
538 crypto_free_blkcipher(kctx->acceptor_enc); 604 crypto_free_blkcipher(kctx->acceptor_enc);
539 crypto_free_blkcipher(kctx->initiator_enc); 605 crypto_free_blkcipher(kctx->initiator_enc);
606 crypto_free_blkcipher(kctx->acceptor_enc_aux);
607 crypto_free_blkcipher(kctx->initiator_enc_aux);
540 kfree(kctx->mech_used.data); 608 kfree(kctx->mech_used.data);
541 kfree(kctx); 609 kfree(kctx);
542} 610}
diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c
index 4aa46b28298c..a1a3585fa761 100644
--- a/net/sunrpc/auth_gss/gss_krb5_wrap.c
+++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c
@@ -113,8 +113,8 @@ out:
113 return 0; 113 return 0;
114} 114}
115 115
116static void 116void
117make_confounder(char *p, u32 conflen) 117gss_krb5_make_confounder(char *p, u32 conflen)
118{ 118{
119 static u64 i = 0; 119 static u64 i = 0;
120 u64 *q = (u64 *)p; 120 u64 *q = (u64 *)p;
@@ -204,7 +204,7 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
204 memset(ptr + 4, 0xff, 4); 204 memset(ptr + 4, 0xff, 4);
205 *(__be16 *)(ptr + 4) = cpu_to_le16(kctx->gk5e->sealalg); 205 *(__be16 *)(ptr + 4) = cpu_to_le16(kctx->gk5e->sealalg);
206 206
207 make_confounder(msg_start, blocksize); 207 gss_krb5_make_confounder(msg_start, blocksize);
208 208
209 if (kctx->gk5e->keyed_cksum) 209 if (kctx->gk5e->keyed_cksum)
210 cksumkey = kctx->cksum; 210 cksumkey = kctx->cksum;