aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-05-19 20:24:05 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-05-19 20:24:05 -0400
commit6a6be470c3071559970c5659354484d4f664050e (patch)
treed4b335e863e426acad96fe5e4bce2e3e064abc32 /net/sunrpc
parent98c89cdd3a292af3451e47a2a33132f5183861b0 (diff)
parent126e216a8730532dfb685205309275f87e3d133e (diff)
Merge branch 'nfs-for-2.6.35' of git://git.linux-nfs.org/projects/trondmy/nfs-2.6
* 'nfs-for-2.6.35' of git://git.linux-nfs.org/projects/trondmy/nfs-2.6: (78 commits) SUNRPC: Don't spam gssd with upcall requests when the kerberos key expired SUNRPC: Reorder the struct rpc_task fields SUNRPC: Remove the 'tk_magic' debugging field SUNRPC: Move the task->tk_bytes_sent and tk_rtt to struct rpc_rqst NFS: Don't call iput() in nfs_access_cache_shrinker NFS: Clean up nfs_access_zap_cache() NFS: Don't run nfs_access_cache_shrinker() when the mask is GFP_NOFS SUNRPC: Ensure rpcauth_prune_expired() respects the nr_to_scan parameter SUNRPC: Ensure memory shrinker doesn't waste time in rpcauth_prune_expired() SUNRPC: Dont run rpcauth_cache_shrinker() when gfp_mask is GFP_NOFS NFS: Read requests can use GFP_KERNEL. NFS: Clean up nfs_create_request() NFS: Don't use GFP_KERNEL in rpcsec_gss downcalls NFSv4: Don't use GFP_KERNEL allocations in state recovery SUNRPC: Fix xs_setup_bc_tcp() SUNRPC: Replace jiffies-based metrics with ktime-based metrics ktime: introduce ktime_to_ms() SUNRPC: RPC metrics and RTT estimator should use same RTT value NFS: Calldata for nfs4_renew_done() NFS: Squelch compiler warning in nfs_add_server_stats() ...
Diffstat (limited to 'net/sunrpc')
-rw-r--r--net/sunrpc/auth.c19
-rw-r--r--net/sunrpc/auth_gss/Makefile2
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c89
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_crypto.c697
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_keys.c336
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_mech.c584
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_seal.c155
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_seqnum.c83
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_unseal.c113
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_wrap.c404
-rw-r--r--net/sunrpc/auth_gss/gss_mech_switch.c21
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_mech.c5
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c17
-rw-r--r--net/sunrpc/clnt.c19
-rw-r--r--net/sunrpc/sched.c26
-rw-r--r--net/sunrpc/stats.c29
-rw-r--r--net/sunrpc/xdr.c1
-rw-r--r--net/sunrpc/xprt.c59
-rw-r--r--net/sunrpc/xprtrdma/transport.c31
-rw-r--r--net/sunrpc/xprtsock.c40
20 files changed, 2408 insertions, 322 deletions
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 95afe79dd9d7..73affb8624fa 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -236,10 +236,15 @@ rpcauth_prune_expired(struct list_head *free, int nr_to_scan)
236 236
237 list_for_each_entry_safe(cred, next, &cred_unused, cr_lru) { 237 list_for_each_entry_safe(cred, next, &cred_unused, cr_lru) {
238 238
239 /* Enforce a 60 second garbage collection moratorium */ 239 if (nr_to_scan-- == 0)
240 break;
241 /*
242 * Enforce a 60 second garbage collection moratorium
243 * Note that the cred_unused list must be time-ordered.
244 */
240 if (time_in_range(cred->cr_expire, expired, jiffies) && 245 if (time_in_range(cred->cr_expire, expired, jiffies) &&
241 test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) != 0) 246 test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) != 0)
242 continue; 247 return 0;
243 248
244 list_del_init(&cred->cr_lru); 249 list_del_init(&cred->cr_lru);
245 number_cred_unused--; 250 number_cred_unused--;
@@ -252,13 +257,10 @@ rpcauth_prune_expired(struct list_head *free, int nr_to_scan)
252 get_rpccred(cred); 257 get_rpccred(cred);
253 list_add_tail(&cred->cr_lru, free); 258 list_add_tail(&cred->cr_lru, free);
254 rpcauth_unhash_cred_locked(cred); 259 rpcauth_unhash_cred_locked(cred);
255 nr_to_scan--;
256 } 260 }
257 spin_unlock(cache_lock); 261 spin_unlock(cache_lock);
258 if (nr_to_scan == 0)
259 break;
260 } 262 }
261 return nr_to_scan; 263 return (number_cred_unused / 100) * sysctl_vfs_cache_pressure;
262} 264}
263 265
264/* 266/*
@@ -270,11 +272,12 @@ rpcauth_cache_shrinker(int nr_to_scan, gfp_t gfp_mask)
270 LIST_HEAD(free); 272 LIST_HEAD(free);
271 int res; 273 int res;
272 274
275 if ((gfp_mask & GFP_KERNEL) != GFP_KERNEL)
276 return (nr_to_scan == 0) ? 0 : -1;
273 if (list_empty(&cred_unused)) 277 if (list_empty(&cred_unused))
274 return 0; 278 return 0;
275 spin_lock(&rpc_credcache_lock); 279 spin_lock(&rpc_credcache_lock);
276 nr_to_scan = rpcauth_prune_expired(&free, nr_to_scan); 280 res = rpcauth_prune_expired(&free, nr_to_scan);
277 res = (number_cred_unused / 100) * sysctl_vfs_cache_pressure;
278 spin_unlock(&rpc_credcache_lock); 281 spin_unlock(&rpc_credcache_lock);
279 rpcauth_destroy_credlist(&free); 282 rpcauth_destroy_credlist(&free);
280 return res; 283 return res;
diff --git a/net/sunrpc/auth_gss/Makefile b/net/sunrpc/auth_gss/Makefile
index 4de8bcf26fa7..74a231735f67 100644
--- a/net/sunrpc/auth_gss/Makefile
+++ b/net/sunrpc/auth_gss/Makefile
@@ -10,7 +10,7 @@ auth_rpcgss-objs := auth_gss.o gss_generic_token.o \
10obj-$(CONFIG_RPCSEC_GSS_KRB5) += rpcsec_gss_krb5.o 10obj-$(CONFIG_RPCSEC_GSS_KRB5) += rpcsec_gss_krb5.o
11 11
12rpcsec_gss_krb5-objs := gss_krb5_mech.o gss_krb5_seal.o gss_krb5_unseal.o \ 12rpcsec_gss_krb5-objs := gss_krb5_mech.o gss_krb5_seal.o gss_krb5_unseal.o \
13 gss_krb5_seqnum.o gss_krb5_wrap.o gss_krb5_crypto.o 13 gss_krb5_seqnum.o gss_krb5_wrap.o gss_krb5_crypto.o gss_krb5_keys.o
14 14
15obj-$(CONFIG_RPCSEC_GSS_SPKM3) += rpcsec_gss_spkm3.o 15obj-$(CONFIG_RPCSEC_GSS_SPKM3) += rpcsec_gss_spkm3.o
16 16
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index c389ccf6437d..8da2a0e68574 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -57,11 +57,14 @@ static const struct rpc_authops authgss_ops;
57static const struct rpc_credops gss_credops; 57static const struct rpc_credops gss_credops;
58static const struct rpc_credops gss_nullops; 58static const struct rpc_credops gss_nullops;
59 59
60#define GSS_RETRY_EXPIRED 5
61static unsigned int gss_expired_cred_retry_delay = GSS_RETRY_EXPIRED;
62
60#ifdef RPC_DEBUG 63#ifdef RPC_DEBUG
61# define RPCDBG_FACILITY RPCDBG_AUTH 64# define RPCDBG_FACILITY RPCDBG_AUTH
62#endif 65#endif
63 66
64#define GSS_CRED_SLACK 1024 67#define GSS_CRED_SLACK (RPC_MAX_AUTH_SIZE * 2)
65/* length of a krb5 verifier (48), plus data added before arguments when 68/* length of a krb5 verifier (48), plus data added before arguments when
66 * using integrity (two 4-byte integers): */ 69 * using integrity (two 4-byte integers): */
67#define GSS_VERF_SLACK 100 70#define GSS_VERF_SLACK 100
@@ -229,7 +232,7 @@ gss_fill_context(const void *p, const void *end, struct gss_cl_ctx *ctx, struct
229 p = ERR_PTR(-EFAULT); 232 p = ERR_PTR(-EFAULT);
230 goto err; 233 goto err;
231 } 234 }
232 ret = gss_import_sec_context(p, seclen, gm, &ctx->gc_gss_ctx); 235 ret = gss_import_sec_context(p, seclen, gm, &ctx->gc_gss_ctx, GFP_NOFS);
233 if (ret < 0) { 236 if (ret < 0) {
234 p = ERR_PTR(ret); 237 p = ERR_PTR(ret);
235 goto err; 238 goto err;
@@ -350,6 +353,24 @@ gss_unhash_msg(struct gss_upcall_msg *gss_msg)
350} 353}
351 354
352static void 355static void
356gss_handle_downcall_result(struct gss_cred *gss_cred, struct gss_upcall_msg *gss_msg)
357{
358 switch (gss_msg->msg.errno) {
359 case 0:
360 if (gss_msg->ctx == NULL)
361 break;
362 clear_bit(RPCAUTH_CRED_NEGATIVE, &gss_cred->gc_base.cr_flags);
363 gss_cred_set_ctx(&gss_cred->gc_base, gss_msg->ctx);
364 break;
365 case -EKEYEXPIRED:
366 set_bit(RPCAUTH_CRED_NEGATIVE, &gss_cred->gc_base.cr_flags);
367 }
368 gss_cred->gc_upcall_timestamp = jiffies;
369 gss_cred->gc_upcall = NULL;
370 rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno);
371}
372
373static void
353gss_upcall_callback(struct rpc_task *task) 374gss_upcall_callback(struct rpc_task *task)
354{ 375{
355 struct gss_cred *gss_cred = container_of(task->tk_msg.rpc_cred, 376 struct gss_cred *gss_cred = container_of(task->tk_msg.rpc_cred,
@@ -358,13 +379,9 @@ gss_upcall_callback(struct rpc_task *task)
358 struct inode *inode = &gss_msg->inode->vfs_inode; 379 struct inode *inode = &gss_msg->inode->vfs_inode;
359 380
360 spin_lock(&inode->i_lock); 381 spin_lock(&inode->i_lock);
361 if (gss_msg->ctx) 382 gss_handle_downcall_result(gss_cred, gss_msg);
362 gss_cred_set_ctx(task->tk_msg.rpc_cred, gss_msg->ctx);
363 else
364 task->tk_status = gss_msg->msg.errno;
365 gss_cred->gc_upcall = NULL;
366 rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno);
367 spin_unlock(&inode->i_lock); 383 spin_unlock(&inode->i_lock);
384 task->tk_status = gss_msg->msg.errno;
368 gss_release_msg(gss_msg); 385 gss_release_msg(gss_msg);
369} 386}
370 387
@@ -377,11 +394,12 @@ static void gss_encode_v0_msg(struct gss_upcall_msg *gss_msg)
377static void gss_encode_v1_msg(struct gss_upcall_msg *gss_msg, 394static void gss_encode_v1_msg(struct gss_upcall_msg *gss_msg,
378 struct rpc_clnt *clnt, int machine_cred) 395 struct rpc_clnt *clnt, int machine_cred)
379{ 396{
397 struct gss_api_mech *mech = gss_msg->auth->mech;
380 char *p = gss_msg->databuf; 398 char *p = gss_msg->databuf;
381 int len = 0; 399 int len = 0;
382 400
383 gss_msg->msg.len = sprintf(gss_msg->databuf, "mech=%s uid=%d ", 401 gss_msg->msg.len = sprintf(gss_msg->databuf, "mech=%s uid=%d ",
384 gss_msg->auth->mech->gm_name, 402 mech->gm_name,
385 gss_msg->uid); 403 gss_msg->uid);
386 p += gss_msg->msg.len; 404 p += gss_msg->msg.len;
387 if (clnt->cl_principal) { 405 if (clnt->cl_principal) {
@@ -398,6 +416,11 @@ static void gss_encode_v1_msg(struct gss_upcall_msg *gss_msg,
398 p += len; 416 p += len;
399 gss_msg->msg.len += len; 417 gss_msg->msg.len += len;
400 } 418 }
419 if (mech->gm_upcall_enctypes) {
420 len = sprintf(p, mech->gm_upcall_enctypes);
421 p += len;
422 gss_msg->msg.len += len;
423 }
401 len = sprintf(p, "\n"); 424 len = sprintf(p, "\n");
402 gss_msg->msg.len += len; 425 gss_msg->msg.len += len;
403 426
@@ -507,18 +530,16 @@ gss_refresh_upcall(struct rpc_task *task)
507 spin_lock(&inode->i_lock); 530 spin_lock(&inode->i_lock);
508 if (gss_cred->gc_upcall != NULL) 531 if (gss_cred->gc_upcall != NULL)
509 rpc_sleep_on(&gss_cred->gc_upcall->rpc_waitqueue, task, NULL); 532 rpc_sleep_on(&gss_cred->gc_upcall->rpc_waitqueue, task, NULL);
510 else if (gss_msg->ctx != NULL) { 533 else if (gss_msg->ctx == NULL && gss_msg->msg.errno >= 0) {
511 gss_cred_set_ctx(task->tk_msg.rpc_cred, gss_msg->ctx);
512 gss_cred->gc_upcall = NULL;
513 rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno);
514 } else if (gss_msg->msg.errno >= 0) {
515 task->tk_timeout = 0; 534 task->tk_timeout = 0;
516 gss_cred->gc_upcall = gss_msg; 535 gss_cred->gc_upcall = gss_msg;
517 /* gss_upcall_callback will release the reference to gss_upcall_msg */ 536 /* gss_upcall_callback will release the reference to gss_upcall_msg */
518 atomic_inc(&gss_msg->count); 537 atomic_inc(&gss_msg->count);
519 rpc_sleep_on(&gss_msg->rpc_waitqueue, task, gss_upcall_callback); 538 rpc_sleep_on(&gss_msg->rpc_waitqueue, task, gss_upcall_callback);
520 } else 539 } else {
540 gss_handle_downcall_result(gss_cred, gss_msg);
521 err = gss_msg->msg.errno; 541 err = gss_msg->msg.errno;
542 }
522 spin_unlock(&inode->i_lock); 543 spin_unlock(&inode->i_lock);
523 gss_release_msg(gss_msg); 544 gss_release_msg(gss_msg);
524out: 545out:
@@ -1117,6 +1138,23 @@ static int gss_renew_cred(struct rpc_task *task)
1117 return 0; 1138 return 0;
1118} 1139}
1119 1140
1141static int gss_cred_is_negative_entry(struct rpc_cred *cred)
1142{
1143 if (test_bit(RPCAUTH_CRED_NEGATIVE, &cred->cr_flags)) {
1144 unsigned long now = jiffies;
1145 unsigned long begin, expire;
1146 struct gss_cred *gss_cred;
1147
1148 gss_cred = container_of(cred, struct gss_cred, gc_base);
1149 begin = gss_cred->gc_upcall_timestamp;
1150 expire = begin + gss_expired_cred_retry_delay * HZ;
1151
1152 if (time_in_range_open(now, begin, expire))
1153 return 1;
1154 }
1155 return 0;
1156}
1157
1120/* 1158/*
1121* Refresh credentials. XXX - finish 1159* Refresh credentials. XXX - finish
1122*/ 1160*/
@@ -1126,6 +1164,9 @@ gss_refresh(struct rpc_task *task)
1126 struct rpc_cred *cred = task->tk_msg.rpc_cred; 1164 struct rpc_cred *cred = task->tk_msg.rpc_cred;
1127 int ret = 0; 1165 int ret = 0;
1128 1166
1167 if (gss_cred_is_negative_entry(cred))
1168 return -EKEYEXPIRED;
1169
1129 if (!test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags) && 1170 if (!test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags) &&
1130 !test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags)) { 1171 !test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags)) {
1131 ret = gss_renew_cred(task); 1172 ret = gss_renew_cred(task);
@@ -1316,15 +1357,21 @@ gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1316 inpages = snd_buf->pages + first; 1357 inpages = snd_buf->pages + first;
1317 snd_buf->pages = rqstp->rq_enc_pages; 1358 snd_buf->pages = rqstp->rq_enc_pages;
1318 snd_buf->page_base -= first << PAGE_CACHE_SHIFT; 1359 snd_buf->page_base -= first << PAGE_CACHE_SHIFT;
1319 /* Give the tail its own page, in case we need extra space in the 1360 /*
1320 * head when wrapping: */ 1361 * Give the tail its own page, in case we need extra space in the
1362 * head when wrapping:
1363 *
1364 * call_allocate() allocates twice the slack space required
1365 * by the authentication flavor to rq_callsize.
1366 * For GSS, slack is GSS_CRED_SLACK.
1367 */
1321 if (snd_buf->page_len || snd_buf->tail[0].iov_len) { 1368 if (snd_buf->page_len || snd_buf->tail[0].iov_len) {
1322 tmp = page_address(rqstp->rq_enc_pages[rqstp->rq_enc_pages_num - 1]); 1369 tmp = page_address(rqstp->rq_enc_pages[rqstp->rq_enc_pages_num - 1]);
1323 memcpy(tmp, snd_buf->tail[0].iov_base, snd_buf->tail[0].iov_len); 1370 memcpy(tmp, snd_buf->tail[0].iov_base, snd_buf->tail[0].iov_len);
1324 snd_buf->tail[0].iov_base = tmp; 1371 snd_buf->tail[0].iov_base = tmp;
1325 } 1372 }
1326 maj_stat = gss_wrap(ctx->gc_gss_ctx, offset, snd_buf, inpages); 1373 maj_stat = gss_wrap(ctx->gc_gss_ctx, offset, snd_buf, inpages);
1327 /* RPC_SLACK_SPACE should prevent this ever happening: */ 1374 /* slack space should prevent this ever happening: */
1328 BUG_ON(snd_buf->len > snd_buf->buflen); 1375 BUG_ON(snd_buf->len > snd_buf->buflen);
1329 status = -EIO; 1376 status = -EIO;
1330 /* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was 1377 /* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was
@@ -1573,5 +1620,11 @@ static void __exit exit_rpcsec_gss(void)
1573} 1620}
1574 1621
1575MODULE_LICENSE("GPL"); 1622MODULE_LICENSE("GPL");
1623module_param_named(expired_cred_retry_delay,
1624 gss_expired_cred_retry_delay,
1625 uint, 0644);
1626MODULE_PARM_DESC(expired_cred_retry_delay, "Timeout (in seconds) until "
1627 "the RPC engine retries an expired credential");
1628
1576module_init(init_rpcsec_gss) 1629module_init(init_rpcsec_gss)
1577module_exit(exit_rpcsec_gss) 1630module_exit(exit_rpcsec_gss)
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index e9b636176687..75ee993ea057 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * linux/net/sunrpc/gss_krb5_crypto.c 2 * linux/net/sunrpc/gss_krb5_crypto.c
3 * 3 *
4 * Copyright (c) 2000 The Regents of the University of Michigan. 4 * Copyright (c) 2000-2008 The Regents of the University of Michigan.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Andy Adamson <andros@umich.edu> 7 * Andy Adamson <andros@umich.edu>
@@ -41,6 +41,7 @@
41#include <linux/crypto.h> 41#include <linux/crypto.h>
42#include <linux/highmem.h> 42#include <linux/highmem.h>
43#include <linux/pagemap.h> 43#include <linux/pagemap.h>
44#include <linux/random.h>
44#include <linux/sunrpc/gss_krb5.h> 45#include <linux/sunrpc/gss_krb5.h>
45#include <linux/sunrpc/xdr.h> 46#include <linux/sunrpc/xdr.h>
46 47
@@ -58,13 +59,13 @@ krb5_encrypt(
58{ 59{
59 u32 ret = -EINVAL; 60 u32 ret = -EINVAL;
60 struct scatterlist sg[1]; 61 struct scatterlist sg[1];
61 u8 local_iv[16] = {0}; 62 u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0};
62 struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv }; 63 struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };
63 64
64 if (length % crypto_blkcipher_blocksize(tfm) != 0) 65 if (length % crypto_blkcipher_blocksize(tfm) != 0)
65 goto out; 66 goto out;
66 67
67 if (crypto_blkcipher_ivsize(tfm) > 16) { 68 if (crypto_blkcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
68 dprintk("RPC: gss_k5encrypt: tfm iv size too large %d\n", 69 dprintk("RPC: gss_k5encrypt: tfm iv size too large %d\n",
69 crypto_blkcipher_ivsize(tfm)); 70 crypto_blkcipher_ivsize(tfm));
70 goto out; 71 goto out;
@@ -92,13 +93,13 @@ krb5_decrypt(
92{ 93{
93 u32 ret = -EINVAL; 94 u32 ret = -EINVAL;
94 struct scatterlist sg[1]; 95 struct scatterlist sg[1];
95 u8 local_iv[16] = {0}; 96 u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0};
96 struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv }; 97 struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };
97 98
98 if (length % crypto_blkcipher_blocksize(tfm) != 0) 99 if (length % crypto_blkcipher_blocksize(tfm) != 0)
99 goto out; 100 goto out;
100 101
101 if (crypto_blkcipher_ivsize(tfm) > 16) { 102 if (crypto_blkcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
102 dprintk("RPC: gss_k5decrypt: tfm iv size too large %d\n", 103 dprintk("RPC: gss_k5decrypt: tfm iv size too large %d\n",
103 crypto_blkcipher_ivsize(tfm)); 104 crypto_blkcipher_ivsize(tfm));
104 goto out; 105 goto out;
@@ -123,21 +124,155 @@ checksummer(struct scatterlist *sg, void *data)
123 return crypto_hash_update(desc, sg, sg->length); 124 return crypto_hash_update(desc, sg, sg->length);
124} 125}
125 126
126/* checksum the plaintext data and hdrlen bytes of the token header */ 127static int
127s32 128arcfour_hmac_md5_usage_to_salt(unsigned int usage, u8 salt[4])
128make_checksum(char *cksumname, char *header, int hdrlen, struct xdr_buf *body, 129{
129 int body_offset, struct xdr_netobj *cksum) 130 unsigned int ms_usage;
131
132 switch (usage) {
133 case KG_USAGE_SIGN:
134 ms_usage = 15;
135 break;
136 case KG_USAGE_SEAL:
137 ms_usage = 13;
138 break;
139 default:
140 return EINVAL;;
141 }
142 salt[0] = (ms_usage >> 0) & 0xff;
143 salt[1] = (ms_usage >> 8) & 0xff;
144 salt[2] = (ms_usage >> 16) & 0xff;
145 salt[3] = (ms_usage >> 24) & 0xff;
146
147 return 0;
148}
149
150static u32
151make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen,
152 struct xdr_buf *body, int body_offset, u8 *cksumkey,
153 unsigned int usage, struct xdr_netobj *cksumout)
130{ 154{
131 struct hash_desc desc; /* XXX add to ctx? */ 155 struct hash_desc desc;
132 struct scatterlist sg[1]; 156 struct scatterlist sg[1];
133 int err; 157 int err;
158 u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN];
159 u8 rc4salt[4];
160 struct crypto_hash *md5;
161 struct crypto_hash *hmac_md5;
162
163 if (cksumkey == NULL)
164 return GSS_S_FAILURE;
165
166 if (cksumout->len < kctx->gk5e->cksumlength) {
167 dprintk("%s: checksum buffer length, %u, too small for %s\n",
168 __func__, cksumout->len, kctx->gk5e->name);
169 return GSS_S_FAILURE;
170 }
171
172 if (arcfour_hmac_md5_usage_to_salt(usage, rc4salt)) {
173 dprintk("%s: invalid usage value %u\n", __func__, usage);
174 return GSS_S_FAILURE;
175 }
176
177 md5 = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
178 if (IS_ERR(md5))
179 return GSS_S_FAILURE;
180
181 hmac_md5 = crypto_alloc_hash(kctx->gk5e->cksum_name, 0,
182 CRYPTO_ALG_ASYNC);
183 if (IS_ERR(hmac_md5)) {
184 crypto_free_hash(md5);
185 return GSS_S_FAILURE;
186 }
187
188 desc.tfm = md5;
189 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
190
191 err = crypto_hash_init(&desc);
192 if (err)
193 goto out;
194 sg_init_one(sg, rc4salt, 4);
195 err = crypto_hash_update(&desc, sg, 4);
196 if (err)
197 goto out;
198
199 sg_init_one(sg, header, hdrlen);
200 err = crypto_hash_update(&desc, sg, hdrlen);
201 if (err)
202 goto out;
203 err = xdr_process_buf(body, body_offset, body->len - body_offset,
204 checksummer, &desc);
205 if (err)
206 goto out;
207 err = crypto_hash_final(&desc, checksumdata);
208 if (err)
209 goto out;
210
211 desc.tfm = hmac_md5;
212 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
213
214 err = crypto_hash_init(&desc);
215 if (err)
216 goto out;
217 err = crypto_hash_setkey(hmac_md5, cksumkey, kctx->gk5e->keylength);
218 if (err)
219 goto out;
220
221 sg_init_one(sg, checksumdata, crypto_hash_digestsize(md5));
222 err = crypto_hash_digest(&desc, sg, crypto_hash_digestsize(md5),
223 checksumdata);
224 if (err)
225 goto out;
226
227 memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength);
228 cksumout->len = kctx->gk5e->cksumlength;
229out:
230 crypto_free_hash(md5);
231 crypto_free_hash(hmac_md5);
232 return err ? GSS_S_FAILURE : 0;
233}
234
235/*
236 * checksum the plaintext data and hdrlen bytes of the token header
237 * The checksum is performed over the first 8 bytes of the
238 * gss token header and then over the data body
239 */
240u32
241make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen,
242 struct xdr_buf *body, int body_offset, u8 *cksumkey,
243 unsigned int usage, struct xdr_netobj *cksumout)
244{
245 struct hash_desc desc;
246 struct scatterlist sg[1];
247 int err;
248 u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN];
249 unsigned int checksumlen;
250
251 if (kctx->gk5e->ctype == CKSUMTYPE_HMAC_MD5_ARCFOUR)
252 return make_checksum_hmac_md5(kctx, header, hdrlen,
253 body, body_offset,
254 cksumkey, usage, cksumout);
255
256 if (cksumout->len < kctx->gk5e->cksumlength) {
257 dprintk("%s: checksum buffer length, %u, too small for %s\n",
258 __func__, cksumout->len, kctx->gk5e->name);
259 return GSS_S_FAILURE;
260 }
134 261
135 desc.tfm = crypto_alloc_hash(cksumname, 0, CRYPTO_ALG_ASYNC); 262 desc.tfm = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
136 if (IS_ERR(desc.tfm)) 263 if (IS_ERR(desc.tfm))
137 return GSS_S_FAILURE; 264 return GSS_S_FAILURE;
138 cksum->len = crypto_hash_digestsize(desc.tfm);
139 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; 265 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
140 266
267 checksumlen = crypto_hash_digestsize(desc.tfm);
268
269 if (cksumkey != NULL) {
270 err = crypto_hash_setkey(desc.tfm, cksumkey,
271 kctx->gk5e->keylength);
272 if (err)
273 goto out;
274 }
275
141 err = crypto_hash_init(&desc); 276 err = crypto_hash_init(&desc);
142 if (err) 277 if (err)
143 goto out; 278 goto out;
@@ -149,15 +284,109 @@ make_checksum(char *cksumname, char *header, int hdrlen, struct xdr_buf *body,
149 checksummer, &desc); 284 checksummer, &desc);
150 if (err) 285 if (err)
151 goto out; 286 goto out;
152 err = crypto_hash_final(&desc, cksum->data); 287 err = crypto_hash_final(&desc, checksumdata);
288 if (err)
289 goto out;
153 290
291 switch (kctx->gk5e->ctype) {
292 case CKSUMTYPE_RSA_MD5:
293 err = kctx->gk5e->encrypt(kctx->seq, NULL, checksumdata,
294 checksumdata, checksumlen);
295 if (err)
296 goto out;
297 memcpy(cksumout->data,
298 checksumdata + checksumlen - kctx->gk5e->cksumlength,
299 kctx->gk5e->cksumlength);
300 break;
301 case CKSUMTYPE_HMAC_SHA1_DES3:
302 memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength);
303 break;
304 default:
305 BUG();
306 break;
307 }
308 cksumout->len = kctx->gk5e->cksumlength;
309out:
310 crypto_free_hash(desc.tfm);
311 return err ? GSS_S_FAILURE : 0;
312}
313
314/*
315 * checksum the plaintext data and hdrlen bytes of the token header
316 * Per rfc4121, sec. 4.2.4, the checksum is performed over the data
317 * body then over the first 16 octets of the MIC token
318 * Inclusion of the header data in the calculation of the
319 * checksum is optional.
320 */
321u32
322make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen,
323 struct xdr_buf *body, int body_offset, u8 *cksumkey,
324 unsigned int usage, struct xdr_netobj *cksumout)
325{
326 struct hash_desc desc;
327 struct scatterlist sg[1];
328 int err;
329 u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN];
330 unsigned int checksumlen;
331
332 if (kctx->gk5e->keyed_cksum == 0) {
333 dprintk("%s: expected keyed hash for %s\n",
334 __func__, kctx->gk5e->name);
335 return GSS_S_FAILURE;
336 }
337 if (cksumkey == NULL) {
338 dprintk("%s: no key supplied for %s\n",
339 __func__, kctx->gk5e->name);
340 return GSS_S_FAILURE;
341 }
342
343 desc.tfm = crypto_alloc_hash(kctx->gk5e->cksum_name, 0,
344 CRYPTO_ALG_ASYNC);
345 if (IS_ERR(desc.tfm))
346 return GSS_S_FAILURE;
347 checksumlen = crypto_hash_digestsize(desc.tfm);
348 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
349
350 err = crypto_hash_setkey(desc.tfm, cksumkey, kctx->gk5e->keylength);
351 if (err)
352 goto out;
353
354 err = crypto_hash_init(&desc);
355 if (err)
356 goto out;
357 err = xdr_process_buf(body, body_offset, body->len - body_offset,
358 checksummer, &desc);
359 if (err)
360 goto out;
361 if (header != NULL) {
362 sg_init_one(sg, header, hdrlen);
363 err = crypto_hash_update(&desc, sg, hdrlen);
364 if (err)
365 goto out;
366 }
367 err = crypto_hash_final(&desc, checksumdata);
368 if (err)
369 goto out;
370
371 cksumout->len = kctx->gk5e->cksumlength;
372
373 switch (kctx->gk5e->ctype) {
374 case CKSUMTYPE_HMAC_SHA1_96_AES128:
375 case CKSUMTYPE_HMAC_SHA1_96_AES256:
376 /* note that this truncates the hash */
377 memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength);
378 break;
379 default:
380 BUG();
381 break;
382 }
154out: 383out:
155 crypto_free_hash(desc.tfm); 384 crypto_free_hash(desc.tfm);
156 return err ? GSS_S_FAILURE : 0; 385 return err ? GSS_S_FAILURE : 0;
157} 386}
158 387
159struct encryptor_desc { 388struct encryptor_desc {
160 u8 iv[8]; /* XXX hard-coded blocksize */ 389 u8 iv[GSS_KRB5_MAX_BLOCKSIZE];
161 struct blkcipher_desc desc; 390 struct blkcipher_desc desc;
162 int pos; 391 int pos;
163 struct xdr_buf *outbuf; 392 struct xdr_buf *outbuf;
@@ -198,7 +427,7 @@ encryptor(struct scatterlist *sg, void *data)
198 desc->fraglen += sg->length; 427 desc->fraglen += sg->length;
199 desc->pos += sg->length; 428 desc->pos += sg->length;
200 429
201 fraglen = thislen & 7; /* XXX hardcoded blocksize */ 430 fraglen = thislen & (crypto_blkcipher_blocksize(desc->desc.tfm) - 1);
202 thislen -= fraglen; 431 thislen -= fraglen;
203 432
204 if (thislen == 0) 433 if (thislen == 0)
@@ -256,7 +485,7 @@ gss_encrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,
256} 485}
257 486
258struct decryptor_desc { 487struct decryptor_desc {
259 u8 iv[8]; /* XXX hard-coded blocksize */ 488 u8 iv[GSS_KRB5_MAX_BLOCKSIZE];
260 struct blkcipher_desc desc; 489 struct blkcipher_desc desc;
261 struct scatterlist frags[4]; 490 struct scatterlist frags[4];
262 int fragno; 491 int fragno;
@@ -278,7 +507,7 @@ decryptor(struct scatterlist *sg, void *data)
278 desc->fragno++; 507 desc->fragno++;
279 desc->fraglen += sg->length; 508 desc->fraglen += sg->length;
280 509
281 fraglen = thislen & 7; /* XXX hardcoded blocksize */ 510 fraglen = thislen & (crypto_blkcipher_blocksize(desc->desc.tfm) - 1);
282 thislen -= fraglen; 511 thislen -= fraglen;
283 512
284 if (thislen == 0) 513 if (thislen == 0)
@@ -325,3 +554,437 @@ gss_decrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,
325 554
326 return xdr_process_buf(buf, offset, buf->len - offset, decryptor, &desc); 555 return xdr_process_buf(buf, offset, buf->len - offset, decryptor, &desc);
327} 556}
557
558/*
559 * This function makes the assumption that it was ultimately called
560 * from gss_wrap().
561 *
562 * The client auth_gss code moves any existing tail data into a
563 * separate page before calling gss_wrap.
564 * The server svcauth_gss code ensures that both the head and the
565 * tail have slack space of RPC_MAX_AUTH_SIZE before calling gss_wrap.
566 *
567 * Even with that guarantee, this function may be called more than
568 * once in the processing of gss_wrap(). The best we can do is
569 * verify at compile-time (see GSS_KRB5_SLACK_CHECK) that the
570 * largest expected shift will fit within RPC_MAX_AUTH_SIZE.
571 * At run-time we can verify that a single invocation of this
572 * function doesn't attempt to use more the RPC_MAX_AUTH_SIZE.
573 */
574
575int
576xdr_extend_head(struct xdr_buf *buf, unsigned int base, unsigned int shiftlen)
577{
578 u8 *p;
579
580 if (shiftlen == 0)
581 return 0;
582
583 BUILD_BUG_ON(GSS_KRB5_MAX_SLACK_NEEDED > RPC_MAX_AUTH_SIZE);
584 BUG_ON(shiftlen > RPC_MAX_AUTH_SIZE);
585
586 p = buf->head[0].iov_base + base;
587
588 memmove(p + shiftlen, p, buf->head[0].iov_len - base);
589
590 buf->head[0].iov_len += shiftlen;
591 buf->len += shiftlen;
592
593 return 0;
594}
595
596static u32
597gss_krb5_cts_crypt(struct crypto_blkcipher *cipher, struct xdr_buf *buf,
598 u32 offset, u8 *iv, struct page **pages, int encrypt)
599{
600 u32 ret;
601 struct scatterlist sg[1];
602 struct blkcipher_desc desc = { .tfm = cipher, .info = iv };
603 u8 data[crypto_blkcipher_blocksize(cipher) * 2];
604 struct page **save_pages;
605 u32 len = buf->len - offset;
606
607 BUG_ON(len > crypto_blkcipher_blocksize(cipher) * 2);
608
609 /*
610 * For encryption, we want to read from the cleartext
611 * page cache pages, and write the encrypted data to
612 * the supplied xdr_buf pages.
613 */
614 save_pages = buf->pages;
615 if (encrypt)
616 buf->pages = pages;
617
618 ret = read_bytes_from_xdr_buf(buf, offset, data, len);
619 buf->pages = save_pages;
620 if (ret)
621 goto out;
622
623 sg_init_one(sg, data, len);
624
625 if (encrypt)
626 ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, len);
627 else
628 ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, len);
629
630 if (ret)
631 goto out;
632
633 ret = write_bytes_to_xdr_buf(buf, offset, data, len);
634
635out:
636 return ret;
637}
638
639u32
640gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
641 struct xdr_buf *buf, int ec, struct page **pages)
642{
643 u32 err;
644 struct xdr_netobj hmac;
645 u8 *cksumkey;
646 u8 *ecptr;
647 struct crypto_blkcipher *cipher, *aux_cipher;
648 int blocksize;
649 struct page **save_pages;
650 int nblocks, nbytes;
651 struct encryptor_desc desc;
652 u32 cbcbytes;
653 unsigned int usage;
654
655 if (kctx->initiate) {
656 cipher = kctx->initiator_enc;
657 aux_cipher = kctx->initiator_enc_aux;
658 cksumkey = kctx->initiator_integ;
659 usage = KG_USAGE_INITIATOR_SEAL;
660 } else {
661 cipher = kctx->acceptor_enc;
662 aux_cipher = kctx->acceptor_enc_aux;
663 cksumkey = kctx->acceptor_integ;
664 usage = KG_USAGE_ACCEPTOR_SEAL;
665 }
666 blocksize = crypto_blkcipher_blocksize(cipher);
667
668 /* hide the gss token header and insert the confounder */
669 offset += GSS_KRB5_TOK_HDR_LEN;
670 if (xdr_extend_head(buf, offset, kctx->gk5e->conflen))
671 return GSS_S_FAILURE;
672 gss_krb5_make_confounder(buf->head[0].iov_base + offset, kctx->gk5e->conflen);
673 offset -= GSS_KRB5_TOK_HDR_LEN;
674
675 if (buf->tail[0].iov_base != NULL) {
676 ecptr = buf->tail[0].iov_base + buf->tail[0].iov_len;
677 } else {
678 buf->tail[0].iov_base = buf->head[0].iov_base
679 + buf->head[0].iov_len;
680 buf->tail[0].iov_len = 0;
681 ecptr = buf->tail[0].iov_base;
682 }
683
684 memset(ecptr, 'X', ec);
685 buf->tail[0].iov_len += ec;
686 buf->len += ec;
687
688 /* copy plaintext gss token header after filler (if any) */
689 memcpy(ecptr + ec, buf->head[0].iov_base + offset,
690 GSS_KRB5_TOK_HDR_LEN);
691 buf->tail[0].iov_len += GSS_KRB5_TOK_HDR_LEN;
692 buf->len += GSS_KRB5_TOK_HDR_LEN;
693
694 /* Do the HMAC */
695 hmac.len = GSS_KRB5_MAX_CKSUM_LEN;
696 hmac.data = buf->tail[0].iov_base + buf->tail[0].iov_len;
697
698 /*
699 * When we are called, pages points to the real page cache
700 * data -- which we can't go and encrypt! buf->pages points
701 * to scratch pages which we are going to send off to the
702 * client/server. Swap in the plaintext pages to calculate
703 * the hmac.
704 */
705 save_pages = buf->pages;
706 buf->pages = pages;
707
708 err = make_checksum_v2(kctx, NULL, 0, buf,
709 offset + GSS_KRB5_TOK_HDR_LEN,
710 cksumkey, usage, &hmac);
711 buf->pages = save_pages;
712 if (err)
713 return GSS_S_FAILURE;
714
715 nbytes = buf->len - offset - GSS_KRB5_TOK_HDR_LEN;
716 nblocks = (nbytes + blocksize - 1) / blocksize;
717 cbcbytes = 0;
718 if (nblocks > 2)
719 cbcbytes = (nblocks - 2) * blocksize;
720
721 memset(desc.iv, 0, sizeof(desc.iv));
722
723 if (cbcbytes) {
724 desc.pos = offset + GSS_KRB5_TOK_HDR_LEN;
725 desc.fragno = 0;
726 desc.fraglen = 0;
727 desc.pages = pages;
728 desc.outbuf = buf;
729 desc.desc.info = desc.iv;
730 desc.desc.flags = 0;
731 desc.desc.tfm = aux_cipher;
732
733 sg_init_table(desc.infrags, 4);
734 sg_init_table(desc.outfrags, 4);
735
736 err = xdr_process_buf(buf, offset + GSS_KRB5_TOK_HDR_LEN,
737 cbcbytes, encryptor, &desc);
738 if (err)
739 goto out_err;
740 }
741
742 /* Make sure IV carries forward from any CBC results. */
743 err = gss_krb5_cts_crypt(cipher, buf,
744 offset + GSS_KRB5_TOK_HDR_LEN + cbcbytes,
745 desc.iv, pages, 1);
746 if (err) {
747 err = GSS_S_FAILURE;
748 goto out_err;
749 }
750
751 /* Now update buf to account for HMAC */
752 buf->tail[0].iov_len += kctx->gk5e->cksumlength;
753 buf->len += kctx->gk5e->cksumlength;
754
755out_err:
756 if (err)
757 err = GSS_S_FAILURE;
758 return err;
759}
760
761u32
762gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf,
763 u32 *headskip, u32 *tailskip)
764{
765 struct xdr_buf subbuf;
766 u32 ret = 0;
767 u8 *cksum_key;
768 struct crypto_blkcipher *cipher, *aux_cipher;
769 struct xdr_netobj our_hmac_obj;
770 u8 our_hmac[GSS_KRB5_MAX_CKSUM_LEN];
771 u8 pkt_hmac[GSS_KRB5_MAX_CKSUM_LEN];
772 int nblocks, blocksize, cbcbytes;
773 struct decryptor_desc desc;
774 unsigned int usage;
775
776 if (kctx->initiate) {
777 cipher = kctx->acceptor_enc;
778 aux_cipher = kctx->acceptor_enc_aux;
779 cksum_key = kctx->acceptor_integ;
780 usage = KG_USAGE_ACCEPTOR_SEAL;
781 } else {
782 cipher = kctx->initiator_enc;
783 aux_cipher = kctx->initiator_enc_aux;
784 cksum_key = kctx->initiator_integ;
785 usage = KG_USAGE_INITIATOR_SEAL;
786 }
787 blocksize = crypto_blkcipher_blocksize(cipher);
788
789
790 /* create a segment skipping the header and leaving out the checksum */
791 xdr_buf_subsegment(buf, &subbuf, offset + GSS_KRB5_TOK_HDR_LEN,
792 (buf->len - offset - GSS_KRB5_TOK_HDR_LEN -
793 kctx->gk5e->cksumlength));
794
795 nblocks = (subbuf.len + blocksize - 1) / blocksize;
796
797 cbcbytes = 0;
798 if (nblocks > 2)
799 cbcbytes = (nblocks - 2) * blocksize;
800
801 memset(desc.iv, 0, sizeof(desc.iv));
802
803 if (cbcbytes) {
804 desc.fragno = 0;
805 desc.fraglen = 0;
806 desc.desc.info = desc.iv;
807 desc.desc.flags = 0;
808 desc.desc.tfm = aux_cipher;
809
810 sg_init_table(desc.frags, 4);
811
812 ret = xdr_process_buf(&subbuf, 0, cbcbytes, decryptor, &desc);
813 if (ret)
814 goto out_err;
815 }
816
817 /* Make sure IV carries forward from any CBC results. */
818 ret = gss_krb5_cts_crypt(cipher, &subbuf, cbcbytes, desc.iv, NULL, 0);
819 if (ret)
820 goto out_err;
821
822
823 /* Calculate our hmac over the plaintext data */
824 our_hmac_obj.len = sizeof(our_hmac);
825 our_hmac_obj.data = our_hmac;
826
827 ret = make_checksum_v2(kctx, NULL, 0, &subbuf, 0,
828 cksum_key, usage, &our_hmac_obj);
829 if (ret)
830 goto out_err;
831
832 /* Get the packet's hmac value */
833 ret = read_bytes_from_xdr_buf(buf, buf->len - kctx->gk5e->cksumlength,
834 pkt_hmac, kctx->gk5e->cksumlength);
835 if (ret)
836 goto out_err;
837
838 if (memcmp(pkt_hmac, our_hmac, kctx->gk5e->cksumlength) != 0) {
839 ret = GSS_S_BAD_SIG;
840 goto out_err;
841 }
842 *headskip = kctx->gk5e->conflen;
843 *tailskip = kctx->gk5e->cksumlength;
844out_err:
845 if (ret && ret != GSS_S_BAD_SIG)
846 ret = GSS_S_FAILURE;
847 return ret;
848}
849
850/*
851 * Compute Kseq given the initial session key and the checksum.
852 * Set the key of the given cipher.
853 */
854int
855krb5_rc4_setup_seq_key(struct krb5_ctx *kctx, struct crypto_blkcipher *cipher,
856 unsigned char *cksum)
857{
858 struct crypto_hash *hmac;
859 struct hash_desc desc;
860 struct scatterlist sg[1];
861 u8 Kseq[GSS_KRB5_MAX_KEYLEN];
862 u32 zeroconstant = 0;
863 int err;
864
865 dprintk("%s: entered\n", __func__);
866
867 hmac = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
868 if (IS_ERR(hmac)) {
869 dprintk("%s: error %ld, allocating hash '%s'\n",
870 __func__, PTR_ERR(hmac), kctx->gk5e->cksum_name);
871 return PTR_ERR(hmac);
872 }
873
874 desc.tfm = hmac;
875 desc.flags = 0;
876
877 err = crypto_hash_init(&desc);
878 if (err)
879 goto out_err;
880
881 /* Compute intermediate Kseq from session key */
882 err = crypto_hash_setkey(hmac, kctx->Ksess, kctx->gk5e->keylength);
883 if (err)
884 goto out_err;
885
886 sg_init_table(sg, 1);
887 sg_set_buf(sg, &zeroconstant, 4);
888
889 err = crypto_hash_digest(&desc, sg, 4, Kseq);
890 if (err)
891 goto out_err;
892
893 /* Compute final Kseq from the checksum and intermediate Kseq */
894 err = crypto_hash_setkey(hmac, Kseq, kctx->gk5e->keylength);
895 if (err)
896 goto out_err;
897
898 sg_set_buf(sg, cksum, 8);
899
900 err = crypto_hash_digest(&desc, sg, 8, Kseq);
901 if (err)
902 goto out_err;
903
904 err = crypto_blkcipher_setkey(cipher, Kseq, kctx->gk5e->keylength);
905 if (err)
906 goto out_err;
907
908 err = 0;
909
910out_err:
911 crypto_free_hash(hmac);
912 dprintk("%s: returning %d\n", __func__, err);
913 return err;
914}
915
916/*
917 * Compute Kcrypt given the initial session key and the plaintext seqnum.
918 * Set the key of cipher kctx->enc.
919 */
920int
921krb5_rc4_setup_enc_key(struct krb5_ctx *kctx, struct crypto_blkcipher *cipher,
922 s32 seqnum)
923{
924 struct crypto_hash *hmac;
925 struct hash_desc desc;
926 struct scatterlist sg[1];
927 u8 Kcrypt[GSS_KRB5_MAX_KEYLEN];
928 u8 zeroconstant[4] = {0};
929 u8 seqnumarray[4];
930 int err, i;
931
932 dprintk("%s: entered, seqnum %u\n", __func__, seqnum);
933
934 hmac = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
935 if (IS_ERR(hmac)) {
936 dprintk("%s: error %ld, allocating hash '%s'\n",
937 __func__, PTR_ERR(hmac), kctx->gk5e->cksum_name);
938 return PTR_ERR(hmac);
939 }
940
941 desc.tfm = hmac;
942 desc.flags = 0;
943
944 err = crypto_hash_init(&desc);
945 if (err)
946 goto out_err;
947
948 /* Compute intermediate Kcrypt from session key */
949 for (i = 0; i < kctx->gk5e->keylength; i++)
950 Kcrypt[i] = kctx->Ksess[i] ^ 0xf0;
951
952 err = crypto_hash_setkey(hmac, Kcrypt, kctx->gk5e->keylength);
953 if (err)
954 goto out_err;
955
956 sg_init_table(sg, 1);
957 sg_set_buf(sg, zeroconstant, 4);
958
959 err = crypto_hash_digest(&desc, sg, 4, Kcrypt);
960 if (err)
961 goto out_err;
962
963 /* Compute final Kcrypt from the seqnum and intermediate Kcrypt */
964 err = crypto_hash_setkey(hmac, Kcrypt, kctx->gk5e->keylength);
965 if (err)
966 goto out_err;
967
968 seqnumarray[0] = (unsigned char) ((seqnum >> 24) & 0xff);
969 seqnumarray[1] = (unsigned char) ((seqnum >> 16) & 0xff);
970 seqnumarray[2] = (unsigned char) ((seqnum >> 8) & 0xff);
971 seqnumarray[3] = (unsigned char) ((seqnum >> 0) & 0xff);
972
973 sg_set_buf(sg, seqnumarray, 4);
974
975 err = crypto_hash_digest(&desc, sg, 4, Kcrypt);
976 if (err)
977 goto out_err;
978
979 err = crypto_blkcipher_setkey(cipher, Kcrypt, kctx->gk5e->keylength);
980 if (err)
981 goto out_err;
982
983 err = 0;
984
985out_err:
986 crypto_free_hash(hmac);
987 dprintk("%s: returning %d\n", __func__, err);
988 return err;
989}
990
diff --git a/net/sunrpc/auth_gss/gss_krb5_keys.c b/net/sunrpc/auth_gss/gss_krb5_keys.c
new file mode 100644
index 000000000000..76e42e6be755
--- /dev/null
+++ b/net/sunrpc/auth_gss/gss_krb5_keys.c
@@ -0,0 +1,336 @@
1/*
2 * COPYRIGHT (c) 2008
3 * The Regents of the University of Michigan
4 * ALL RIGHTS RESERVED
5 *
6 * Permission is granted to use, copy, create derivative works
7 * and redistribute this software and such derivative works
8 * for any purpose, so long as the name of The University of
9 * Michigan is not used in any advertising or publicity
10 * pertaining to the use of distribution of this software
11 * without specific, written prior authorization. If the
12 * above copyright notice or any other identification of the
13 * University of Michigan is included in any copy of any
14 * portion of this software, then the disclaimer below must
15 * also be included.
16 *
17 * THIS SOFTWARE IS PROVIDED AS IS, WITHOUT REPRESENTATION
18 * FROM THE UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY
19 * PURPOSE, AND WITHOUT WARRANTY BY THE UNIVERSITY OF
20 * MICHIGAN OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING
21 * WITHOUT LIMITATION THE IMPLIED WARRANTIES OF
22 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE
23 * REGENTS OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE LIABLE
24 * FOR ANY DAMAGES, INCLUDING SPECIAL, INDIRECT, INCIDENTAL, OR
25 * CONSEQUENTIAL DAMAGES, WITH RESPECT TO ANY CLAIM ARISING
26 * OUT OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN
27 * IF IT HAS BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGES.
29 */
30
31/*
32 * Copyright (C) 1998 by the FundsXpress, INC.
33 *
34 * All rights reserved.
35 *
36 * Export of this software from the United States of America may require
37 * a specific license from the United States Government. It is the
38 * responsibility of any person or organization contemplating export to
39 * obtain such a license before exporting.
40 *
41 * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
42 * distribute this software and its documentation for any purpose and
43 * without fee is hereby granted, provided that the above copyright
44 * notice appear in all copies and that both that copyright notice and
45 * this permission notice appear in supporting documentation, and that
46 * the name of FundsXpress. not be used in advertising or publicity pertaining
47 * to distribution of the software without specific, written prior
48 * permission. FundsXpress makes no representations about the suitability of
49 * this software for any purpose. It is provided "as is" without express
50 * or implied warranty.
51 *
52 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
53 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
54 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
55 */
56
57#include <linux/err.h>
58#include <linux/types.h>
59#include <linux/crypto.h>
60#include <linux/sunrpc/gss_krb5.h>
61#include <linux/sunrpc/xdr.h>
62
63#ifdef RPC_DEBUG
64# define RPCDBG_FACILITY RPCDBG_AUTH
65#endif
66
67/*
68 * This is the n-fold function as described in rfc3961, sec 5.1
69 * Taken from MIT Kerberos and modified.
70 */
71
72static void krb5_nfold(u32 inbits, const u8 *in,
73 u32 outbits, u8 *out)
74{
75 int a, b, c, lcm;
76 int byte, i, msbit;
77
78 /* the code below is more readable if I make these bytes
79 instead of bits */
80
81 inbits >>= 3;
82 outbits >>= 3;
83
84 /* first compute lcm(n,k) */
85
86 a = outbits;
87 b = inbits;
88
89 while (b != 0) {
90 c = b;
91 b = a%b;
92 a = c;
93 }
94
95 lcm = outbits*inbits/a;
96
97 /* now do the real work */
98
99 memset(out, 0, outbits);
100 byte = 0;
101
102 /* this will end up cycling through k lcm(k,n)/k times, which
103 is correct */
104 for (i = lcm-1; i >= 0; i--) {
105 /* compute the msbit in k which gets added into this byte */
106 msbit = (
107 /* first, start with the msbit in the first,
108 * unrotated byte */
109 ((inbits << 3) - 1)
110 /* then, for each byte, shift to the right
111 * for each repetition */
112 + (((inbits << 3) + 13) * (i/inbits))
113 /* last, pick out the correct byte within
114 * that shifted repetition */
115 + ((inbits - (i % inbits)) << 3)
116 ) % (inbits << 3);
117
118 /* pull out the byte value itself */
119 byte += (((in[((inbits - 1) - (msbit >> 3)) % inbits] << 8)|
120 (in[((inbits) - (msbit >> 3)) % inbits]))
121 >> ((msbit & 7) + 1)) & 0xff;
122
123 /* do the addition */
124 byte += out[i % outbits];
125 out[i % outbits] = byte & 0xff;
126
127 /* keep around the carry bit, if any */
128 byte >>= 8;
129
130 }
131
132 /* if there's a carry bit left over, add it back in */
133 if (byte) {
134 for (i = outbits - 1; i >= 0; i--) {
135 /* do the addition */
136 byte += out[i];
137 out[i] = byte & 0xff;
138
139 /* keep around the carry bit, if any */
140 byte >>= 8;
141 }
142 }
143}
144
145/*
146 * This is the DK (derive_key) function as described in rfc3961, sec 5.1
147 * Taken from MIT Kerberos and modified.
148 */
149
150u32 krb5_derive_key(const struct gss_krb5_enctype *gk5e,
151 const struct xdr_netobj *inkey,
152 struct xdr_netobj *outkey,
153 const struct xdr_netobj *in_constant,
154 gfp_t gfp_mask)
155{
156 size_t blocksize, keybytes, keylength, n;
157 unsigned char *inblockdata, *outblockdata, *rawkey;
158 struct xdr_netobj inblock, outblock;
159 struct crypto_blkcipher *cipher;
160 u32 ret = EINVAL;
161
162 blocksize = gk5e->blocksize;
163 keybytes = gk5e->keybytes;
164 keylength = gk5e->keylength;
165
166 if ((inkey->len != keylength) || (outkey->len != keylength))
167 goto err_return;
168
169 cipher = crypto_alloc_blkcipher(gk5e->encrypt_name, 0,
170 CRYPTO_ALG_ASYNC);
171 if (IS_ERR(cipher))
172 goto err_return;
173 if (crypto_blkcipher_setkey(cipher, inkey->data, inkey->len))
174 goto err_return;
175
176 /* allocate and set up buffers */
177
178 ret = ENOMEM;
179 inblockdata = kmalloc(blocksize, gfp_mask);
180 if (inblockdata == NULL)
181 goto err_free_cipher;
182
183 outblockdata = kmalloc(blocksize, gfp_mask);
184 if (outblockdata == NULL)
185 goto err_free_in;
186
187 rawkey = kmalloc(keybytes, gfp_mask);
188 if (rawkey == NULL)
189 goto err_free_out;
190
191 inblock.data = (char *) inblockdata;
192 inblock.len = blocksize;
193
194 outblock.data = (char *) outblockdata;
195 outblock.len = blocksize;
196
197 /* initialize the input block */
198
199 if (in_constant->len == inblock.len) {
200 memcpy(inblock.data, in_constant->data, inblock.len);
201 } else {
202 krb5_nfold(in_constant->len * 8, in_constant->data,
203 inblock.len * 8, inblock.data);
204 }
205
206 /* loop encrypting the blocks until enough key bytes are generated */
207
208 n = 0;
209 while (n < keybytes) {
210 (*(gk5e->encrypt))(cipher, NULL, inblock.data,
211 outblock.data, inblock.len);
212
213 if ((keybytes - n) <= outblock.len) {
214 memcpy(rawkey + n, outblock.data, (keybytes - n));
215 break;
216 }
217
218 memcpy(rawkey + n, outblock.data, outblock.len);
219 memcpy(inblock.data, outblock.data, outblock.len);
220 n += outblock.len;
221 }
222
223 /* postprocess the key */
224
225 inblock.data = (char *) rawkey;
226 inblock.len = keybytes;
227
228 BUG_ON(gk5e->mk_key == NULL);
229 ret = (*(gk5e->mk_key))(gk5e, &inblock, outkey);
230 if (ret) {
231 dprintk("%s: got %d from mk_key function for '%s'\n",
232 __func__, ret, gk5e->encrypt_name);
233 goto err_free_raw;
234 }
235
236 /* clean memory, free resources and exit */
237
238 ret = 0;
239
240err_free_raw:
241 memset(rawkey, 0, keybytes);
242 kfree(rawkey);
243err_free_out:
244 memset(outblockdata, 0, blocksize);
245 kfree(outblockdata);
246err_free_in:
247 memset(inblockdata, 0, blocksize);
248 kfree(inblockdata);
249err_free_cipher:
250 crypto_free_blkcipher(cipher);
251err_return:
252 return ret;
253}
254
255#define smask(step) ((1<<step)-1)
256#define pstep(x, step) (((x)&smask(step))^(((x)>>step)&smask(step)))
257#define parity_char(x) pstep(pstep(pstep((x), 4), 2), 1)
258
259static void mit_des_fixup_key_parity(u8 key[8])
260{
261 int i;
262 for (i = 0; i < 8; i++) {
263 key[i] &= 0xfe;
264 key[i] |= 1^parity_char(key[i]);
265 }
266}
267
268/*
269 * This is the des3 key derivation postprocess function
270 */
271u32 gss_krb5_des3_make_key(const struct gss_krb5_enctype *gk5e,
272 struct xdr_netobj *randombits,
273 struct xdr_netobj *key)
274{
275 int i;
276 u32 ret = EINVAL;
277
278 if (key->len != 24) {
279 dprintk("%s: key->len is %d\n", __func__, key->len);
280 goto err_out;
281 }
282 if (randombits->len != 21) {
283 dprintk("%s: randombits->len is %d\n",
284 __func__, randombits->len);
285 goto err_out;
286 }
287
288 /* take the seven bytes, move them around into the top 7 bits of the
289 8 key bytes, then compute the parity bits. Do this three times. */
290
291 for (i = 0; i < 3; i++) {
292 memcpy(key->data + i*8, randombits->data + i*7, 7);
293 key->data[i*8+7] = (((key->data[i*8]&1)<<1) |
294 ((key->data[i*8+1]&1)<<2) |
295 ((key->data[i*8+2]&1)<<3) |
296 ((key->data[i*8+3]&1)<<4) |
297 ((key->data[i*8+4]&1)<<5) |
298 ((key->data[i*8+5]&1)<<6) |
299 ((key->data[i*8+6]&1)<<7));
300
301 mit_des_fixup_key_parity(key->data + i*8);
302 }
303 ret = 0;
304err_out:
305 return ret;
306}
307
308/*
309 * This is the aes key derivation postprocess function
310 */
311u32 gss_krb5_aes_make_key(const struct gss_krb5_enctype *gk5e,
312 struct xdr_netobj *randombits,
313 struct xdr_netobj *key)
314{
315 u32 ret = EINVAL;
316
317 if (key->len != 16 && key->len != 32) {
318 dprintk("%s: key->len is %d\n", __func__, key->len);
319 goto err_out;
320 }
321 if (randombits->len != 16 && randombits->len != 32) {
322 dprintk("%s: randombits->len is %d\n",
323 __func__, randombits->len);
324 goto err_out;
325 }
326 if (randombits->len != key->len) {
327 dprintk("%s: randombits->len is %d, key->len is %d\n",
328 __func__, randombits->len, key->len);
329 goto err_out;
330 }
331 memcpy(key->data, randombits->data, key->len);
332 ret = 0;
333err_out:
334 return ret;
335}
336
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index 2deb0ed72ff4..032644610524 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * linux/net/sunrpc/gss_krb5_mech.c 2 * linux/net/sunrpc/gss_krb5_mech.c
3 * 3 *
4 * Copyright (c) 2001 The Regents of the University of Michigan. 4 * Copyright (c) 2001-2008 The Regents of the University of Michigan.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Andy Adamson <andros@umich.edu> 7 * Andy Adamson <andros@umich.edu>
@@ -48,6 +48,143 @@
48# define RPCDBG_FACILITY RPCDBG_AUTH 48# define RPCDBG_FACILITY RPCDBG_AUTH
49#endif 49#endif
50 50
51static struct gss_api_mech gss_kerberos_mech; /* forward declaration */
52
53static const struct gss_krb5_enctype supported_gss_krb5_enctypes[] = {
54 /*
55 * DES (All DES enctypes are mapped to the same gss functionality)
56 */
57 {
58 .etype = ENCTYPE_DES_CBC_RAW,
59 .ctype = CKSUMTYPE_RSA_MD5,
60 .name = "des-cbc-crc",
61 .encrypt_name = "cbc(des)",
62 .cksum_name = "md5",
63 .encrypt = krb5_encrypt,
64 .decrypt = krb5_decrypt,
65 .mk_key = NULL,
66 .signalg = SGN_ALG_DES_MAC_MD5,
67 .sealalg = SEAL_ALG_DES,
68 .keybytes = 7,
69 .keylength = 8,
70 .blocksize = 8,
71 .conflen = 8,
72 .cksumlength = 8,
73 .keyed_cksum = 0,
74 },
75 /*
76 * RC4-HMAC
77 */
78 {
79 .etype = ENCTYPE_ARCFOUR_HMAC,
80 .ctype = CKSUMTYPE_HMAC_MD5_ARCFOUR,
81 .name = "rc4-hmac",
82 .encrypt_name = "ecb(arc4)",
83 .cksum_name = "hmac(md5)",
84 .encrypt = krb5_encrypt,
85 .decrypt = krb5_decrypt,
86 .mk_key = NULL,
87 .signalg = SGN_ALG_HMAC_MD5,
88 .sealalg = SEAL_ALG_MICROSOFT_RC4,
89 .keybytes = 16,
90 .keylength = 16,
91 .blocksize = 1,
92 .conflen = 8,
93 .cksumlength = 8,
94 .keyed_cksum = 1,
95 },
96 /*
97 * 3DES
98 */
99 {
100 .etype = ENCTYPE_DES3_CBC_RAW,
101 .ctype = CKSUMTYPE_HMAC_SHA1_DES3,
102 .name = "des3-hmac-sha1",
103 .encrypt_name = "cbc(des3_ede)",
104 .cksum_name = "hmac(sha1)",
105 .encrypt = krb5_encrypt,
106 .decrypt = krb5_decrypt,
107 .mk_key = gss_krb5_des3_make_key,
108 .signalg = SGN_ALG_HMAC_SHA1_DES3_KD,
109 .sealalg = SEAL_ALG_DES3KD,
110 .keybytes = 21,
111 .keylength = 24,
112 .blocksize = 8,
113 .conflen = 8,
114 .cksumlength = 20,
115 .keyed_cksum = 1,
116 },
117 /*
118 * AES128
119 */
120 {
121 .etype = ENCTYPE_AES128_CTS_HMAC_SHA1_96,
122 .ctype = CKSUMTYPE_HMAC_SHA1_96_AES128,
123 .name = "aes128-cts",
124 .encrypt_name = "cts(cbc(aes))",
125 .cksum_name = "hmac(sha1)",
126 .encrypt = krb5_encrypt,
127 .decrypt = krb5_decrypt,
128 .mk_key = gss_krb5_aes_make_key,
129 .encrypt_v2 = gss_krb5_aes_encrypt,
130 .decrypt_v2 = gss_krb5_aes_decrypt,
131 .signalg = -1,
132 .sealalg = -1,
133 .keybytes = 16,
134 .keylength = 16,
135 .blocksize = 16,
136 .conflen = 16,
137 .cksumlength = 12,
138 .keyed_cksum = 1,
139 },
140 /*
141 * AES256
142 */
143 {
144 .etype = ENCTYPE_AES256_CTS_HMAC_SHA1_96,
145 .ctype = CKSUMTYPE_HMAC_SHA1_96_AES256,
146 .name = "aes256-cts",
147 .encrypt_name = "cts(cbc(aes))",
148 .cksum_name = "hmac(sha1)",
149 .encrypt = krb5_encrypt,
150 .decrypt = krb5_decrypt,
151 .mk_key = gss_krb5_aes_make_key,
152 .encrypt_v2 = gss_krb5_aes_encrypt,
153 .decrypt_v2 = gss_krb5_aes_decrypt,
154 .signalg = -1,
155 .sealalg = -1,
156 .keybytes = 32,
157 .keylength = 32,
158 .blocksize = 16,
159 .conflen = 16,
160 .cksumlength = 12,
161 .keyed_cksum = 1,
162 },
163};
164
165static const int num_supported_enctypes =
166 ARRAY_SIZE(supported_gss_krb5_enctypes);
167
168static int
169supported_gss_krb5_enctype(int etype)
170{
171 int i;
172 for (i = 0; i < num_supported_enctypes; i++)
173 if (supported_gss_krb5_enctypes[i].etype == etype)
174 return 1;
175 return 0;
176}
177
178static const struct gss_krb5_enctype *
179get_gss_krb5_enctype(int etype)
180{
181 int i;
182 for (i = 0; i < num_supported_enctypes; i++)
183 if (supported_gss_krb5_enctypes[i].etype == etype)
184 return &supported_gss_krb5_enctypes[i];
185 return NULL;
186}
187
51static const void * 188static const void *
52simple_get_bytes(const void *p, const void *end, void *res, int len) 189simple_get_bytes(const void *p, const void *end, void *res, int len)
53{ 190{
@@ -78,35 +215,45 @@ simple_get_netobj(const void *p, const void *end, struct xdr_netobj *res)
78} 215}
79 216
80static inline const void * 217static inline const void *
81get_key(const void *p, const void *end, struct crypto_blkcipher **res) 218get_key(const void *p, const void *end,
219 struct krb5_ctx *ctx, struct crypto_blkcipher **res)
82{ 220{
83 struct xdr_netobj key; 221 struct xdr_netobj key;
84 int alg; 222 int alg;
85 char *alg_name;
86 223
87 p = simple_get_bytes(p, end, &alg, sizeof(alg)); 224 p = simple_get_bytes(p, end, &alg, sizeof(alg));
88 if (IS_ERR(p)) 225 if (IS_ERR(p))
89 goto out_err; 226 goto out_err;
227
228 switch (alg) {
229 case ENCTYPE_DES_CBC_CRC:
230 case ENCTYPE_DES_CBC_MD4:
231 case ENCTYPE_DES_CBC_MD5:
232 /* Map all these key types to ENCTYPE_DES_CBC_RAW */
233 alg = ENCTYPE_DES_CBC_RAW;
234 break;
235 }
236
237 if (!supported_gss_krb5_enctype(alg)) {
238 printk(KERN_WARNING "gss_kerberos_mech: unsupported "
239 "encryption key algorithm %d\n", alg);
240 goto out_err;
241 }
90 p = simple_get_netobj(p, end, &key); 242 p = simple_get_netobj(p, end, &key);
91 if (IS_ERR(p)) 243 if (IS_ERR(p))
92 goto out_err; 244 goto out_err;
93 245
94 switch (alg) { 246 *res = crypto_alloc_blkcipher(ctx->gk5e->encrypt_name, 0,
95 case ENCTYPE_DES_CBC_RAW: 247 CRYPTO_ALG_ASYNC);
96 alg_name = "cbc(des)";
97 break;
98 default:
99 printk("gss_kerberos_mech: unsupported algorithm %d\n", alg);
100 goto out_err_free_key;
101 }
102 *res = crypto_alloc_blkcipher(alg_name, 0, CRYPTO_ALG_ASYNC);
103 if (IS_ERR(*res)) { 248 if (IS_ERR(*res)) {
104 printk("gss_kerberos_mech: unable to initialize crypto algorithm %s\n", alg_name); 249 printk(KERN_WARNING "gss_kerberos_mech: unable to initialize "
250 "crypto algorithm %s\n", ctx->gk5e->encrypt_name);
105 *res = NULL; 251 *res = NULL;
106 goto out_err_free_key; 252 goto out_err_free_key;
107 } 253 }
108 if (crypto_blkcipher_setkey(*res, key.data, key.len)) { 254 if (crypto_blkcipher_setkey(*res, key.data, key.len)) {
109 printk("gss_kerberos_mech: error setting key for crypto algorithm %s\n", alg_name); 255 printk(KERN_WARNING "gss_kerberos_mech: error setting key for "
256 "crypto algorithm %s\n", ctx->gk5e->encrypt_name);
110 goto out_err_free_tfm; 257 goto out_err_free_tfm;
111 } 258 }
112 259
@@ -123,56 +270,55 @@ out_err:
123} 270}
124 271
125static int 272static int
126gss_import_sec_context_kerberos(const void *p, 273gss_import_v1_context(const void *p, const void *end, struct krb5_ctx *ctx)
127 size_t len,
128 struct gss_ctx *ctx_id)
129{ 274{
130 const void *end = (const void *)((const char *)p + len);
131 struct krb5_ctx *ctx;
132 int tmp; 275 int tmp;
133 276
134 if (!(ctx = kzalloc(sizeof(*ctx), GFP_NOFS))) {
135 p = ERR_PTR(-ENOMEM);
136 goto out_err;
137 }
138
139 p = simple_get_bytes(p, end, &ctx->initiate, sizeof(ctx->initiate)); 277 p = simple_get_bytes(p, end, &ctx->initiate, sizeof(ctx->initiate));
140 if (IS_ERR(p)) 278 if (IS_ERR(p))
141 goto out_err_free_ctx; 279 goto out_err;
280
281 /* Old format supports only DES! Any other enctype uses new format */
282 ctx->enctype = ENCTYPE_DES_CBC_RAW;
283
284 ctx->gk5e = get_gss_krb5_enctype(ctx->enctype);
285 if (ctx->gk5e == NULL)
286 goto out_err;
287
142 /* The downcall format was designed before we completely understood 288 /* The downcall format was designed before we completely understood
143 * the uses of the context fields; so it includes some stuff we 289 * the uses of the context fields; so it includes some stuff we
144 * just give some minimal sanity-checking, and some we ignore 290 * just give some minimal sanity-checking, and some we ignore
145 * completely (like the next twenty bytes): */ 291 * completely (like the next twenty bytes): */
146 if (unlikely(p + 20 > end || p + 20 < p)) 292 if (unlikely(p + 20 > end || p + 20 < p))
147 goto out_err_free_ctx; 293 goto out_err;
148 p += 20; 294 p += 20;
149 p = simple_get_bytes(p, end, &tmp, sizeof(tmp)); 295 p = simple_get_bytes(p, end, &tmp, sizeof(tmp));
150 if (IS_ERR(p)) 296 if (IS_ERR(p))
151 goto out_err_free_ctx; 297 goto out_err;
152 if (tmp != SGN_ALG_DES_MAC_MD5) { 298 if (tmp != SGN_ALG_DES_MAC_MD5) {
153 p = ERR_PTR(-ENOSYS); 299 p = ERR_PTR(-ENOSYS);
154 goto out_err_free_ctx; 300 goto out_err;
155 } 301 }
156 p = simple_get_bytes(p, end, &tmp, sizeof(tmp)); 302 p = simple_get_bytes(p, end, &tmp, sizeof(tmp));
157 if (IS_ERR(p)) 303 if (IS_ERR(p))
158 goto out_err_free_ctx; 304 goto out_err;
159 if (tmp != SEAL_ALG_DES) { 305 if (tmp != SEAL_ALG_DES) {
160 p = ERR_PTR(-ENOSYS); 306 p = ERR_PTR(-ENOSYS);
161 goto out_err_free_ctx; 307 goto out_err;
162 } 308 }
163 p = simple_get_bytes(p, end, &ctx->endtime, sizeof(ctx->endtime)); 309 p = simple_get_bytes(p, end, &ctx->endtime, sizeof(ctx->endtime));
164 if (IS_ERR(p)) 310 if (IS_ERR(p))
165 goto out_err_free_ctx; 311 goto out_err;
166 p = simple_get_bytes(p, end, &ctx->seq_send, sizeof(ctx->seq_send)); 312 p = simple_get_bytes(p, end, &ctx->seq_send, sizeof(ctx->seq_send));
167 if (IS_ERR(p)) 313 if (IS_ERR(p))
168 goto out_err_free_ctx; 314 goto out_err;
169 p = simple_get_netobj(p, end, &ctx->mech_used); 315 p = simple_get_netobj(p, end, &ctx->mech_used);
170 if (IS_ERR(p)) 316 if (IS_ERR(p))
171 goto out_err_free_ctx; 317 goto out_err;
172 p = get_key(p, end, &ctx->enc); 318 p = get_key(p, end, ctx, &ctx->enc);
173 if (IS_ERR(p)) 319 if (IS_ERR(p))
174 goto out_err_free_mech; 320 goto out_err_free_mech;
175 p = get_key(p, end, &ctx->seq); 321 p = get_key(p, end, ctx, &ctx->seq);
176 if (IS_ERR(p)) 322 if (IS_ERR(p))
177 goto out_err_free_key1; 323 goto out_err_free_key1;
178 if (p != end) { 324 if (p != end) {
@@ -180,9 +326,6 @@ gss_import_sec_context_kerberos(const void *p,
180 goto out_err_free_key2; 326 goto out_err_free_key2;
181 } 327 }
182 328
183 ctx_id->internal_ctx_id = ctx;
184
185 dprintk("RPC: Successfully imported new context.\n");
186 return 0; 329 return 0;
187 330
188out_err_free_key2: 331out_err_free_key2:
@@ -191,18 +334,378 @@ out_err_free_key1:
191 crypto_free_blkcipher(ctx->enc); 334 crypto_free_blkcipher(ctx->enc);
192out_err_free_mech: 335out_err_free_mech:
193 kfree(ctx->mech_used.data); 336 kfree(ctx->mech_used.data);
194out_err_free_ctx:
195 kfree(ctx);
196out_err: 337out_err:
197 return PTR_ERR(p); 338 return PTR_ERR(p);
198} 339}
199 340
341struct crypto_blkcipher *
342context_v2_alloc_cipher(struct krb5_ctx *ctx, const char *cname, u8 *key)
343{
344 struct crypto_blkcipher *cp;
345
346 cp = crypto_alloc_blkcipher(cname, 0, CRYPTO_ALG_ASYNC);
347 if (IS_ERR(cp)) {
348 dprintk("gss_kerberos_mech: unable to initialize "
349 "crypto algorithm %s\n", cname);
350 return NULL;
351 }
352 if (crypto_blkcipher_setkey(cp, key, ctx->gk5e->keylength)) {
353 dprintk("gss_kerberos_mech: error setting key for "
354 "crypto algorithm %s\n", cname);
355 crypto_free_blkcipher(cp);
356 return NULL;
357 }
358 return cp;
359}
360
361static inline void
362set_cdata(u8 cdata[GSS_KRB5_K5CLENGTH], u32 usage, u8 seed)
363{
364 cdata[0] = (usage>>24)&0xff;
365 cdata[1] = (usage>>16)&0xff;
366 cdata[2] = (usage>>8)&0xff;
367 cdata[3] = usage&0xff;
368 cdata[4] = seed;
369}
370
371static int
372context_derive_keys_des3(struct krb5_ctx *ctx, gfp_t gfp_mask)
373{
374 struct xdr_netobj c, keyin, keyout;
375 u8 cdata[GSS_KRB5_K5CLENGTH];
376 u32 err;
377
378 c.len = GSS_KRB5_K5CLENGTH;
379 c.data = cdata;
380
381 keyin.data = ctx->Ksess;
382 keyin.len = ctx->gk5e->keylength;
383 keyout.len = ctx->gk5e->keylength;
384
385 /* seq uses the raw key */
386 ctx->seq = context_v2_alloc_cipher(ctx, ctx->gk5e->encrypt_name,
387 ctx->Ksess);
388 if (ctx->seq == NULL)
389 goto out_err;
390
391 ctx->enc = context_v2_alloc_cipher(ctx, ctx->gk5e->encrypt_name,
392 ctx->Ksess);
393 if (ctx->enc == NULL)
394 goto out_free_seq;
395
396 /* derive cksum */
397 set_cdata(cdata, KG_USAGE_SIGN, KEY_USAGE_SEED_CHECKSUM);
398 keyout.data = ctx->cksum;
399 err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask);
400 if (err) {
401 dprintk("%s: Error %d deriving cksum key\n",
402 __func__, err);
403 goto out_free_enc;
404 }
405
406 return 0;
407
408out_free_enc:
409 crypto_free_blkcipher(ctx->enc);
410out_free_seq:
411 crypto_free_blkcipher(ctx->seq);
412out_err:
413 return -EINVAL;
414}
415
416/*
417 * Note that RC4 depends on deriving keys using the sequence
418 * number or the checksum of a token. Therefore, the final keys
419 * cannot be calculated until the token is being constructed!
420 */
421static int
422context_derive_keys_rc4(struct krb5_ctx *ctx)
423{
424 struct crypto_hash *hmac;
425 char sigkeyconstant[] = "signaturekey";
426 int slen = strlen(sigkeyconstant) + 1; /* include null terminator */
427 struct hash_desc desc;
428 struct scatterlist sg[1];
429 int err;
430
431 dprintk("RPC: %s: entered\n", __func__);
432 /*
433 * derive cksum (aka Ksign) key
434 */
435 hmac = crypto_alloc_hash(ctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
436 if (IS_ERR(hmac)) {
437 dprintk("%s: error %ld allocating hash '%s'\n",
438 __func__, PTR_ERR(hmac), ctx->gk5e->cksum_name);
439 err = PTR_ERR(hmac);
440 goto out_err;
441 }
442
443 err = crypto_hash_setkey(hmac, ctx->Ksess, ctx->gk5e->keylength);
444 if (err)
445 goto out_err_free_hmac;
446
447 sg_init_table(sg, 1);
448 sg_set_buf(sg, sigkeyconstant, slen);
449
450 desc.tfm = hmac;
451 desc.flags = 0;
452
453 err = crypto_hash_init(&desc);
454 if (err)
455 goto out_err_free_hmac;
456
457 err = crypto_hash_digest(&desc, sg, slen, ctx->cksum);
458 if (err)
459 goto out_err_free_hmac;
460 /*
461 * allocate hash, and blkciphers for data and seqnum encryption
462 */
463 ctx->enc = crypto_alloc_blkcipher(ctx->gk5e->encrypt_name, 0,
464 CRYPTO_ALG_ASYNC);
465 if (IS_ERR(ctx->enc)) {
466 err = PTR_ERR(ctx->enc);
467 goto out_err_free_hmac;
468 }
469
470 ctx->seq = crypto_alloc_blkcipher(ctx->gk5e->encrypt_name, 0,
471 CRYPTO_ALG_ASYNC);
472 if (IS_ERR(ctx->seq)) {
473 crypto_free_blkcipher(ctx->enc);
474 err = PTR_ERR(ctx->seq);
475 goto out_err_free_hmac;
476 }
477
478 dprintk("RPC: %s: returning success\n", __func__);
479
480 err = 0;
481
482out_err_free_hmac:
483 crypto_free_hash(hmac);
484out_err:
485 dprintk("RPC: %s: returning %d\n", __func__, err);
486 return err;
487}
488
489static int
490context_derive_keys_new(struct krb5_ctx *ctx, gfp_t gfp_mask)
491{
492 struct xdr_netobj c, keyin, keyout;
493 u8 cdata[GSS_KRB5_K5CLENGTH];
494 u32 err;
495
496 c.len = GSS_KRB5_K5CLENGTH;
497 c.data = cdata;
498
499 keyin.data = ctx->Ksess;
500 keyin.len = ctx->gk5e->keylength;
501 keyout.len = ctx->gk5e->keylength;
502
503 /* initiator seal encryption */
504 set_cdata(cdata, KG_USAGE_INITIATOR_SEAL, KEY_USAGE_SEED_ENCRYPTION);
505 keyout.data = ctx->initiator_seal;
506 err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask);
507 if (err) {
508 dprintk("%s: Error %d deriving initiator_seal key\n",
509 __func__, err);
510 goto out_err;
511 }
512 ctx->initiator_enc = context_v2_alloc_cipher(ctx,
513 ctx->gk5e->encrypt_name,
514 ctx->initiator_seal);
515 if (ctx->initiator_enc == NULL)
516 goto out_err;
517
518 /* acceptor seal encryption */
519 set_cdata(cdata, KG_USAGE_ACCEPTOR_SEAL, KEY_USAGE_SEED_ENCRYPTION);
520 keyout.data = ctx->acceptor_seal;
521 err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask);
522 if (err) {
523 dprintk("%s: Error %d deriving acceptor_seal key\n",
524 __func__, err);
525 goto out_free_initiator_enc;
526 }
527 ctx->acceptor_enc = context_v2_alloc_cipher(ctx,
528 ctx->gk5e->encrypt_name,
529 ctx->acceptor_seal);
530 if (ctx->acceptor_enc == NULL)
531 goto out_free_initiator_enc;
532
533 /* initiator sign checksum */
534 set_cdata(cdata, KG_USAGE_INITIATOR_SIGN, KEY_USAGE_SEED_CHECKSUM);
535 keyout.data = ctx->initiator_sign;
536 err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask);
537 if (err) {
538 dprintk("%s: Error %d deriving initiator_sign key\n",
539 __func__, err);
540 goto out_free_acceptor_enc;
541 }
542
543 /* acceptor sign checksum */
544 set_cdata(cdata, KG_USAGE_ACCEPTOR_SIGN, KEY_USAGE_SEED_CHECKSUM);
545 keyout.data = ctx->acceptor_sign;
546 err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask);
547 if (err) {
548 dprintk("%s: Error %d deriving acceptor_sign key\n",
549 __func__, err);
550 goto out_free_acceptor_enc;
551 }
552
553 /* initiator seal integrity */
554 set_cdata(cdata, KG_USAGE_INITIATOR_SEAL, KEY_USAGE_SEED_INTEGRITY);
555 keyout.data = ctx->initiator_integ;
556 err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask);
557 if (err) {
558 dprintk("%s: Error %d deriving initiator_integ key\n",
559 __func__, err);
560 goto out_free_acceptor_enc;
561 }
562
563 /* acceptor seal integrity */
564 set_cdata(cdata, KG_USAGE_ACCEPTOR_SEAL, KEY_USAGE_SEED_INTEGRITY);
565 keyout.data = ctx->acceptor_integ;
566 err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask);
567 if (err) {
568 dprintk("%s: Error %d deriving acceptor_integ key\n",
569 __func__, err);
570 goto out_free_acceptor_enc;
571 }
572
573 switch (ctx->enctype) {
574 case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
575 case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
576 ctx->initiator_enc_aux =
577 context_v2_alloc_cipher(ctx, "cbc(aes)",
578 ctx->initiator_seal);
579 if (ctx->initiator_enc_aux == NULL)
580 goto out_free_acceptor_enc;
581 ctx->acceptor_enc_aux =
582 context_v2_alloc_cipher(ctx, "cbc(aes)",
583 ctx->acceptor_seal);
584 if (ctx->acceptor_enc_aux == NULL) {
585 crypto_free_blkcipher(ctx->initiator_enc_aux);
586 goto out_free_acceptor_enc;
587 }
588 }
589
590 return 0;
591
592out_free_acceptor_enc:
593 crypto_free_blkcipher(ctx->acceptor_enc);
594out_free_initiator_enc:
595 crypto_free_blkcipher(ctx->initiator_enc);
596out_err:
597 return -EINVAL;
598}
599
600static int
601gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx,
602 gfp_t gfp_mask)
603{
604 int keylen;
605
606 p = simple_get_bytes(p, end, &ctx->flags, sizeof(ctx->flags));
607 if (IS_ERR(p))
608 goto out_err;
609 ctx->initiate = ctx->flags & KRB5_CTX_FLAG_INITIATOR;
610
611 p = simple_get_bytes(p, end, &ctx->endtime, sizeof(ctx->endtime));
612 if (IS_ERR(p))
613 goto out_err;
614 p = simple_get_bytes(p, end, &ctx->seq_send64, sizeof(ctx->seq_send64));
615 if (IS_ERR(p))
616 goto out_err;
617 /* set seq_send for use by "older" enctypes */
618 ctx->seq_send = ctx->seq_send64;
619 if (ctx->seq_send64 != ctx->seq_send) {
620 dprintk("%s: seq_send64 %lx, seq_send %x overflow?\n", __func__,
621 (long unsigned)ctx->seq_send64, ctx->seq_send);
622 goto out_err;
623 }
624 p = simple_get_bytes(p, end, &ctx->enctype, sizeof(ctx->enctype));
625 if (IS_ERR(p))
626 goto out_err;
627 /* Map ENCTYPE_DES3_CBC_SHA1 to ENCTYPE_DES3_CBC_RAW */
628 if (ctx->enctype == ENCTYPE_DES3_CBC_SHA1)
629 ctx->enctype = ENCTYPE_DES3_CBC_RAW;
630 ctx->gk5e = get_gss_krb5_enctype(ctx->enctype);
631 if (ctx->gk5e == NULL) {
632 dprintk("gss_kerberos_mech: unsupported krb5 enctype %u\n",
633 ctx->enctype);
634 p = ERR_PTR(-EINVAL);
635 goto out_err;
636 }
637 keylen = ctx->gk5e->keylength;
638
639 p = simple_get_bytes(p, end, ctx->Ksess, keylen);
640 if (IS_ERR(p))
641 goto out_err;
642
643 if (p != end) {
644 p = ERR_PTR(-EINVAL);
645 goto out_err;
646 }
647
648 ctx->mech_used.data = kmemdup(gss_kerberos_mech.gm_oid.data,
649 gss_kerberos_mech.gm_oid.len, gfp_mask);
650 if (unlikely(ctx->mech_used.data == NULL)) {
651 p = ERR_PTR(-ENOMEM);
652 goto out_err;
653 }
654 ctx->mech_used.len = gss_kerberos_mech.gm_oid.len;
655
656 switch (ctx->enctype) {
657 case ENCTYPE_DES3_CBC_RAW:
658 return context_derive_keys_des3(ctx, gfp_mask);
659 case ENCTYPE_ARCFOUR_HMAC:
660 return context_derive_keys_rc4(ctx);
661 case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
662 case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
663 return context_derive_keys_new(ctx, gfp_mask);
664 default:
665 return -EINVAL;
666 }
667
668out_err:
669 return PTR_ERR(p);
670}
671
672static int
673gss_import_sec_context_kerberos(const void *p, size_t len,
674 struct gss_ctx *ctx_id,
675 gfp_t gfp_mask)
676{
677 const void *end = (const void *)((const char *)p + len);
678 struct krb5_ctx *ctx;
679 int ret;
680
681 ctx = kzalloc(sizeof(*ctx), gfp_mask);
682 if (ctx == NULL)
683 return -ENOMEM;
684
685 if (len == 85)
686 ret = gss_import_v1_context(p, end, ctx);
687 else
688 ret = gss_import_v2_context(p, end, ctx, gfp_mask);
689
690 if (ret == 0)
691 ctx_id->internal_ctx_id = ctx;
692 else
693 kfree(ctx);
694
695 dprintk("RPC: %s: returning %d\n", __func__, ret);
696 return ret;
697}
698
200static void 699static void
201gss_delete_sec_context_kerberos(void *internal_ctx) { 700gss_delete_sec_context_kerberos(void *internal_ctx) {
202 struct krb5_ctx *kctx = internal_ctx; 701 struct krb5_ctx *kctx = internal_ctx;
203 702
204 crypto_free_blkcipher(kctx->seq); 703 crypto_free_blkcipher(kctx->seq);
205 crypto_free_blkcipher(kctx->enc); 704 crypto_free_blkcipher(kctx->enc);
705 crypto_free_blkcipher(kctx->acceptor_enc);
706 crypto_free_blkcipher(kctx->initiator_enc);
707 crypto_free_blkcipher(kctx->acceptor_enc_aux);
708 crypto_free_blkcipher(kctx->initiator_enc_aux);
206 kfree(kctx->mech_used.data); 709 kfree(kctx->mech_used.data);
207 kfree(kctx); 710 kfree(kctx);
208} 711}
@@ -241,6 +744,7 @@ static struct gss_api_mech gss_kerberos_mech = {
241 .gm_ops = &gss_kerberos_ops, 744 .gm_ops = &gss_kerberos_ops,
242 .gm_pf_num = ARRAY_SIZE(gss_kerberos_pfs), 745 .gm_pf_num = ARRAY_SIZE(gss_kerberos_pfs),
243 .gm_pfs = gss_kerberos_pfs, 746 .gm_pfs = gss_kerberos_pfs,
747 .gm_upcall_enctypes = "enctypes=18,17,16,23,3,1,2 ",
244}; 748};
245 749
246static int __init init_kerberos_module(void) 750static int __init init_kerberos_module(void)
diff --git a/net/sunrpc/auth_gss/gss_krb5_seal.c b/net/sunrpc/auth_gss/gss_krb5_seal.c
index 88fe6e75ed7e..d7941eab7796 100644
--- a/net/sunrpc/auth_gss/gss_krb5_seal.c
+++ b/net/sunrpc/auth_gss/gss_krb5_seal.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Adapted from MIT Kerberos 5-1.2.1 lib/gssapi/krb5/k5seal.c 4 * Adapted from MIT Kerberos 5-1.2.1 lib/gssapi/krb5/k5seal.c
5 * 5 *
6 * Copyright (c) 2000 The Regents of the University of Michigan. 6 * Copyright (c) 2000-2008 The Regents of the University of Michigan.
7 * All rights reserved. 7 * All rights reserved.
8 * 8 *
9 * Andy Adamson <andros@umich.edu> 9 * Andy Adamson <andros@umich.edu>
@@ -70,53 +70,154 @@
70 70
71DEFINE_SPINLOCK(krb5_seq_lock); 71DEFINE_SPINLOCK(krb5_seq_lock);
72 72
73u32 73static char *
74gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text, 74setup_token(struct krb5_ctx *ctx, struct xdr_netobj *token)
75{
76 __be16 *ptr, *krb5_hdr;
77 int body_size = GSS_KRB5_TOK_HDR_LEN + ctx->gk5e->cksumlength;
78
79 token->len = g_token_size(&ctx->mech_used, body_size);
80
81 ptr = (__be16 *)token->data;
82 g_make_token_header(&ctx->mech_used, body_size, (unsigned char **)&ptr);
83
84 /* ptr now at start of header described in rfc 1964, section 1.2.1: */
85 krb5_hdr = ptr;
86 *ptr++ = KG_TOK_MIC_MSG;
87 *ptr++ = cpu_to_le16(ctx->gk5e->signalg);
88 *ptr++ = SEAL_ALG_NONE;
89 *ptr++ = 0xffff;
90
91 return (char *)krb5_hdr;
92}
93
94static void *
95setup_token_v2(struct krb5_ctx *ctx, struct xdr_netobj *token)
96{
97 __be16 *ptr, *krb5_hdr;
98 u8 *p, flags = 0x00;
99
100 if ((ctx->flags & KRB5_CTX_FLAG_INITIATOR) == 0)
101 flags |= 0x01;
102 if (ctx->flags & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY)
103 flags |= 0x04;
104
105 /* Per rfc 4121, sec 4.2.6.1, there is no header,
106 * just start the token */
107 krb5_hdr = ptr = (__be16 *)token->data;
108
109 *ptr++ = KG2_TOK_MIC;
110 p = (u8 *)ptr;
111 *p++ = flags;
112 *p++ = 0xff;
113 ptr = (__be16 *)p;
114 *ptr++ = 0xffff;
115 *ptr++ = 0xffff;
116
117 token->len = GSS_KRB5_TOK_HDR_LEN + ctx->gk5e->cksumlength;
118 return krb5_hdr;
119}
120
121static u32
122gss_get_mic_v1(struct krb5_ctx *ctx, struct xdr_buf *text,
75 struct xdr_netobj *token) 123 struct xdr_netobj *token)
76{ 124{
77 struct krb5_ctx *ctx = gss_ctx->internal_ctx_id; 125 char cksumdata[GSS_KRB5_MAX_CKSUM_LEN];
78 char cksumdata[16]; 126 struct xdr_netobj md5cksum = {.len = sizeof(cksumdata),
79 struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata}; 127 .data = cksumdata};
80 unsigned char *ptr, *msg_start; 128 void *ptr;
81 s32 now; 129 s32 now;
82 u32 seq_send; 130 u32 seq_send;
131 u8 *cksumkey;
83 132
84 dprintk("RPC: gss_krb5_seal\n"); 133 dprintk("RPC: %s\n", __func__);
85 BUG_ON(ctx == NULL); 134 BUG_ON(ctx == NULL);
86 135
87 now = get_seconds(); 136 now = get_seconds();
88 137
89 token->len = g_token_size(&ctx->mech_used, GSS_KRB5_TOK_HDR_LEN + 8); 138 ptr = setup_token(ctx, token);
90 139
91 ptr = token->data; 140 if (ctx->gk5e->keyed_cksum)
92 g_make_token_header(&ctx->mech_used, GSS_KRB5_TOK_HDR_LEN + 8, &ptr); 141 cksumkey = ctx->cksum;
142 else
143 cksumkey = NULL;
93 144
94 /* ptr now at header described in rfc 1964, section 1.2.1: */ 145 if (make_checksum(ctx, ptr, 8, text, 0, cksumkey,
95 ptr[0] = (unsigned char) ((KG_TOK_MIC_MSG >> 8) & 0xff); 146 KG_USAGE_SIGN, &md5cksum))
96 ptr[1] = (unsigned char) (KG_TOK_MIC_MSG & 0xff); 147 return GSS_S_FAILURE;
97 148
98 msg_start = ptr + GSS_KRB5_TOK_HDR_LEN + 8; 149 memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data, md5cksum.len);
99 150
100 *(__be16 *)(ptr + 2) = htons(SGN_ALG_DES_MAC_MD5); 151 spin_lock(&krb5_seq_lock);
101 memset(ptr + 4, 0xff, 4); 152 seq_send = ctx->seq_send++;
153 spin_unlock(&krb5_seq_lock);
102 154
103 if (make_checksum("md5", ptr, 8, text, 0, &md5cksum)) 155 if (krb5_make_seq_num(ctx, ctx->seq, ctx->initiate ? 0 : 0xff,
156 seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8))
104 return GSS_S_FAILURE; 157 return GSS_S_FAILURE;
105 158
106 if (krb5_encrypt(ctx->seq, NULL, md5cksum.data, 159 return (ctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
107 md5cksum.data, md5cksum.len)) 160}
108 return GSS_S_FAILURE; 161
162u32
163gss_get_mic_v2(struct krb5_ctx *ctx, struct xdr_buf *text,
164 struct xdr_netobj *token)
165{
166 char cksumdata[GSS_KRB5_MAX_CKSUM_LEN];
167 struct xdr_netobj cksumobj = { .len = sizeof(cksumdata),
168 .data = cksumdata};
169 void *krb5_hdr;
170 s32 now;
171 u64 seq_send;
172 u8 *cksumkey;
173 unsigned int cksum_usage;
109 174
110 memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data + md5cksum.len - 8, 8); 175 dprintk("RPC: %s\n", __func__);
111 176
177 krb5_hdr = setup_token_v2(ctx, token);
178
179 /* Set up the sequence number. Now 64-bits in clear
180 * text and w/o direction indicator */
112 spin_lock(&krb5_seq_lock); 181 spin_lock(&krb5_seq_lock);
113 seq_send = ctx->seq_send++; 182 seq_send = ctx->seq_send64++;
114 spin_unlock(&krb5_seq_lock); 183 spin_unlock(&krb5_seq_lock);
115 184 *((u64 *)(krb5_hdr + 8)) = cpu_to_be64(seq_send);
116 if (krb5_make_seq_num(ctx->seq, ctx->initiate ? 0 : 0xff, 185
117 seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, 186 if (ctx->initiate) {
118 ptr + 8)) 187 cksumkey = ctx->initiator_sign;
188 cksum_usage = KG_USAGE_INITIATOR_SIGN;
189 } else {
190 cksumkey = ctx->acceptor_sign;
191 cksum_usage = KG_USAGE_ACCEPTOR_SIGN;
192 }
193
194 if (make_checksum_v2(ctx, krb5_hdr, GSS_KRB5_TOK_HDR_LEN,
195 text, 0, cksumkey, cksum_usage, &cksumobj))
119 return GSS_S_FAILURE; 196 return GSS_S_FAILURE;
120 197
198 memcpy(krb5_hdr + GSS_KRB5_TOK_HDR_LEN, cksumobj.data, cksumobj.len);
199
200 now = get_seconds();
201
121 return (ctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE; 202 return (ctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
122} 203}
204
205u32
206gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text,
207 struct xdr_netobj *token)
208{
209 struct krb5_ctx *ctx = gss_ctx->internal_ctx_id;
210
211 switch (ctx->enctype) {
212 default:
213 BUG();
214 case ENCTYPE_DES_CBC_RAW:
215 case ENCTYPE_DES3_CBC_RAW:
216 case ENCTYPE_ARCFOUR_HMAC:
217 return gss_get_mic_v1(ctx, text, token);
218 case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
219 case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
220 return gss_get_mic_v2(ctx, text, token);
221 }
222}
223
diff --git a/net/sunrpc/auth_gss/gss_krb5_seqnum.c b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
index 6331cd6866ec..415c013ba382 100644
--- a/net/sunrpc/auth_gss/gss_krb5_seqnum.c
+++ b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
@@ -39,14 +39,51 @@
39# define RPCDBG_FACILITY RPCDBG_AUTH 39# define RPCDBG_FACILITY RPCDBG_AUTH
40#endif 40#endif
41 41
42static s32
43krb5_make_rc4_seq_num(struct krb5_ctx *kctx, int direction, s32 seqnum,
44 unsigned char *cksum, unsigned char *buf)
45{
46 struct crypto_blkcipher *cipher;
47 unsigned char plain[8];
48 s32 code;
49
50 dprintk("RPC: %s:\n", __func__);
51 cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0,
52 CRYPTO_ALG_ASYNC);
53 if (IS_ERR(cipher))
54 return PTR_ERR(cipher);
55
56 plain[0] = (unsigned char) ((seqnum >> 24) & 0xff);
57 plain[1] = (unsigned char) ((seqnum >> 16) & 0xff);
58 plain[2] = (unsigned char) ((seqnum >> 8) & 0xff);
59 plain[3] = (unsigned char) ((seqnum >> 0) & 0xff);
60 plain[4] = direction;
61 plain[5] = direction;
62 plain[6] = direction;
63 plain[7] = direction;
64
65 code = krb5_rc4_setup_seq_key(kctx, cipher, cksum);
66 if (code)
67 goto out;
68
69 code = krb5_encrypt(cipher, cksum, plain, buf, 8);
70out:
71 crypto_free_blkcipher(cipher);
72 return code;
73}
42s32 74s32
43krb5_make_seq_num(struct crypto_blkcipher *key, 75krb5_make_seq_num(struct krb5_ctx *kctx,
76 struct crypto_blkcipher *key,
44 int direction, 77 int direction,
45 u32 seqnum, 78 u32 seqnum,
46 unsigned char *cksum, unsigned char *buf) 79 unsigned char *cksum, unsigned char *buf)
47{ 80{
48 unsigned char plain[8]; 81 unsigned char plain[8];
49 82
83 if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC)
84 return krb5_make_rc4_seq_num(kctx, direction, seqnum,
85 cksum, buf);
86
50 plain[0] = (unsigned char) (seqnum & 0xff); 87 plain[0] = (unsigned char) (seqnum & 0xff);
51 plain[1] = (unsigned char) ((seqnum >> 8) & 0xff); 88 plain[1] = (unsigned char) ((seqnum >> 8) & 0xff);
52 plain[2] = (unsigned char) ((seqnum >> 16) & 0xff); 89 plain[2] = (unsigned char) ((seqnum >> 16) & 0xff);
@@ -60,17 +97,59 @@ krb5_make_seq_num(struct crypto_blkcipher *key,
60 return krb5_encrypt(key, cksum, plain, buf, 8); 97 return krb5_encrypt(key, cksum, plain, buf, 8);
61} 98}
62 99
100static s32
101krb5_get_rc4_seq_num(struct krb5_ctx *kctx, unsigned char *cksum,
102 unsigned char *buf, int *direction, s32 *seqnum)
103{
104 struct crypto_blkcipher *cipher;
105 unsigned char plain[8];
106 s32 code;
107
108 dprintk("RPC: %s:\n", __func__);
109 cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0,
110 CRYPTO_ALG_ASYNC);
111 if (IS_ERR(cipher))
112 return PTR_ERR(cipher);
113
114 code = krb5_rc4_setup_seq_key(kctx, cipher, cksum);
115 if (code)
116 goto out;
117
118 code = krb5_decrypt(cipher, cksum, buf, plain, 8);
119 if (code)
120 goto out;
121
122 if ((plain[4] != plain[5]) || (plain[4] != plain[6])
123 || (plain[4] != plain[7])) {
124 code = (s32)KG_BAD_SEQ;
125 goto out;
126 }
127
128 *direction = plain[4];
129
130 *seqnum = ((plain[0] << 24) | (plain[1] << 16) |
131 (plain[2] << 8) | (plain[3]));
132out:
133 crypto_free_blkcipher(cipher);
134 return code;
135}
136
63s32 137s32
64krb5_get_seq_num(struct crypto_blkcipher *key, 138krb5_get_seq_num(struct krb5_ctx *kctx,
65 unsigned char *cksum, 139 unsigned char *cksum,
66 unsigned char *buf, 140 unsigned char *buf,
67 int *direction, u32 *seqnum) 141 int *direction, u32 *seqnum)
68{ 142{
69 s32 code; 143 s32 code;
70 unsigned char plain[8]; 144 unsigned char plain[8];
145 struct crypto_blkcipher *key = kctx->seq;
71 146
72 dprintk("RPC: krb5_get_seq_num:\n"); 147 dprintk("RPC: krb5_get_seq_num:\n");
73 148
149 if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC)
150 return krb5_get_rc4_seq_num(kctx, cksum, buf,
151 direction, seqnum);
152
74 if ((code = krb5_decrypt(key, cksum, buf, plain, 8))) 153 if ((code = krb5_decrypt(key, cksum, buf, plain, 8)))
75 return code; 154 return code;
76 155
diff --git a/net/sunrpc/auth_gss/gss_krb5_unseal.c b/net/sunrpc/auth_gss/gss_krb5_unseal.c
index ce6c247edad0..6cd930f3678f 100644
--- a/net/sunrpc/auth_gss/gss_krb5_unseal.c
+++ b/net/sunrpc/auth_gss/gss_krb5_unseal.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Adapted from MIT Kerberos 5-1.2.1 lib/gssapi/krb5/k5unseal.c 4 * Adapted from MIT Kerberos 5-1.2.1 lib/gssapi/krb5/k5unseal.c
5 * 5 *
6 * Copyright (c) 2000 The Regents of the University of Michigan. 6 * Copyright (c) 2000-2008 The Regents of the University of Michigan.
7 * All rights reserved. 7 * All rights reserved.
8 * 8 *
9 * Andy Adamson <andros@umich.edu> 9 * Andy Adamson <andros@umich.edu>
@@ -70,20 +70,21 @@
70/* read_token is a mic token, and message_buffer is the data that the mic was 70/* read_token is a mic token, and message_buffer is the data that the mic was
71 * supposedly taken over. */ 71 * supposedly taken over. */
72 72
73u32 73static u32
74gss_verify_mic_kerberos(struct gss_ctx *gss_ctx, 74gss_verify_mic_v1(struct krb5_ctx *ctx,
75 struct xdr_buf *message_buffer, struct xdr_netobj *read_token) 75 struct xdr_buf *message_buffer, struct xdr_netobj *read_token)
76{ 76{
77 struct krb5_ctx *ctx = gss_ctx->internal_ctx_id;
78 int signalg; 77 int signalg;
79 int sealalg; 78 int sealalg;
80 char cksumdata[16]; 79 char cksumdata[GSS_KRB5_MAX_CKSUM_LEN];
81 struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata}; 80 struct xdr_netobj md5cksum = {.len = sizeof(cksumdata),
81 .data = cksumdata};
82 s32 now; 82 s32 now;
83 int direction; 83 int direction;
84 u32 seqnum; 84 u32 seqnum;
85 unsigned char *ptr = (unsigned char *)read_token->data; 85 unsigned char *ptr = (unsigned char *)read_token->data;
86 int bodysize; 86 int bodysize;
87 u8 *cksumkey;
87 88
88 dprintk("RPC: krb5_read_token\n"); 89 dprintk("RPC: krb5_read_token\n");
89 90
@@ -98,7 +99,7 @@ gss_verify_mic_kerberos(struct gss_ctx *gss_ctx,
98 /* XXX sanity-check bodysize?? */ 99 /* XXX sanity-check bodysize?? */
99 100
100 signalg = ptr[2] + (ptr[3] << 8); 101 signalg = ptr[2] + (ptr[3] << 8);
101 if (signalg != SGN_ALG_DES_MAC_MD5) 102 if (signalg != ctx->gk5e->signalg)
102 return GSS_S_DEFECTIVE_TOKEN; 103 return GSS_S_DEFECTIVE_TOKEN;
103 104
104 sealalg = ptr[4] + (ptr[5] << 8); 105 sealalg = ptr[4] + (ptr[5] << 8);
@@ -108,13 +109,17 @@ gss_verify_mic_kerberos(struct gss_ctx *gss_ctx,
108 if ((ptr[6] != 0xff) || (ptr[7] != 0xff)) 109 if ((ptr[6] != 0xff) || (ptr[7] != 0xff))
109 return GSS_S_DEFECTIVE_TOKEN; 110 return GSS_S_DEFECTIVE_TOKEN;
110 111
111 if (make_checksum("md5", ptr, 8, message_buffer, 0, &md5cksum)) 112 if (ctx->gk5e->keyed_cksum)
112 return GSS_S_FAILURE; 113 cksumkey = ctx->cksum;
114 else
115 cksumkey = NULL;
113 116
114 if (krb5_encrypt(ctx->seq, NULL, md5cksum.data, md5cksum.data, 16)) 117 if (make_checksum(ctx, ptr, 8, message_buffer, 0,
118 cksumkey, KG_USAGE_SIGN, &md5cksum))
115 return GSS_S_FAILURE; 119 return GSS_S_FAILURE;
116 120
117 if (memcmp(md5cksum.data + 8, ptr + GSS_KRB5_TOK_HDR_LEN, 8)) 121 if (memcmp(md5cksum.data, ptr + GSS_KRB5_TOK_HDR_LEN,
122 ctx->gk5e->cksumlength))
118 return GSS_S_BAD_SIG; 123 return GSS_S_BAD_SIG;
119 124
120 /* it got through unscathed. Make sure the context is unexpired */ 125 /* it got through unscathed. Make sure the context is unexpired */
@@ -126,7 +131,8 @@ gss_verify_mic_kerberos(struct gss_ctx *gss_ctx,
126 131
127 /* do sequencing checks */ 132 /* do sequencing checks */
128 133
129 if (krb5_get_seq_num(ctx->seq, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8, &direction, &seqnum)) 134 if (krb5_get_seq_num(ctx, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8,
135 &direction, &seqnum))
130 return GSS_S_FAILURE; 136 return GSS_S_FAILURE;
131 137
132 if ((ctx->initiate && direction != 0xff) || 138 if ((ctx->initiate && direction != 0xff) ||
@@ -135,3 +141,86 @@ gss_verify_mic_kerberos(struct gss_ctx *gss_ctx,
135 141
136 return GSS_S_COMPLETE; 142 return GSS_S_COMPLETE;
137} 143}
144
145static u32
146gss_verify_mic_v2(struct krb5_ctx *ctx,
147 struct xdr_buf *message_buffer, struct xdr_netobj *read_token)
148{
149 char cksumdata[GSS_KRB5_MAX_CKSUM_LEN];
150 struct xdr_netobj cksumobj = {.len = sizeof(cksumdata),
151 .data = cksumdata};
152 s32 now;
153 u64 seqnum;
154 u8 *ptr = read_token->data;
155 u8 *cksumkey;
156 u8 flags;
157 int i;
158 unsigned int cksum_usage;
159
160 dprintk("RPC: %s\n", __func__);
161
162 if (be16_to_cpu(*((__be16 *)ptr)) != KG2_TOK_MIC)
163 return GSS_S_DEFECTIVE_TOKEN;
164
165 flags = ptr[2];
166 if ((!ctx->initiate && (flags & KG2_TOKEN_FLAG_SENTBYACCEPTOR)) ||
167 (ctx->initiate && !(flags & KG2_TOKEN_FLAG_SENTBYACCEPTOR)))
168 return GSS_S_BAD_SIG;
169
170 if (flags & KG2_TOKEN_FLAG_SEALED) {
171 dprintk("%s: token has unexpected sealed flag\n", __func__);
172 return GSS_S_FAILURE;
173 }
174
175 for (i = 3; i < 8; i++)
176 if (ptr[i] != 0xff)
177 return GSS_S_DEFECTIVE_TOKEN;
178
179 if (ctx->initiate) {
180 cksumkey = ctx->acceptor_sign;
181 cksum_usage = KG_USAGE_ACCEPTOR_SIGN;
182 } else {
183 cksumkey = ctx->initiator_sign;
184 cksum_usage = KG_USAGE_INITIATOR_SIGN;
185 }
186
187 if (make_checksum_v2(ctx, ptr, GSS_KRB5_TOK_HDR_LEN, message_buffer, 0,
188 cksumkey, cksum_usage, &cksumobj))
189 return GSS_S_FAILURE;
190
191 if (memcmp(cksumobj.data, ptr + GSS_KRB5_TOK_HDR_LEN,
192 ctx->gk5e->cksumlength))
193 return GSS_S_BAD_SIG;
194
195 /* it got through unscathed. Make sure the context is unexpired */
196 now = get_seconds();
197 if (now > ctx->endtime)
198 return GSS_S_CONTEXT_EXPIRED;
199
200 /* do sequencing checks */
201
202 seqnum = be64_to_cpup((__be64 *)ptr + 8);
203
204 return GSS_S_COMPLETE;
205}
206
207u32
208gss_verify_mic_kerberos(struct gss_ctx *gss_ctx,
209 struct xdr_buf *message_buffer,
210 struct xdr_netobj *read_token)
211{
212 struct krb5_ctx *ctx = gss_ctx->internal_ctx_id;
213
214 switch (ctx->enctype) {
215 default:
216 BUG();
217 case ENCTYPE_DES_CBC_RAW:
218 case ENCTYPE_DES3_CBC_RAW:
219 case ENCTYPE_ARCFOUR_HMAC:
220 return gss_verify_mic_v1(ctx, message_buffer, read_token);
221 case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
222 case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
223 return gss_verify_mic_v2(ctx, message_buffer, read_token);
224 }
225}
226
diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c
index a6e905637e03..2763e3e48db4 100644
--- a/net/sunrpc/auth_gss/gss_krb5_wrap.c
+++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c
@@ -1,3 +1,33 @@
1/*
2 * COPYRIGHT (c) 2008
3 * The Regents of the University of Michigan
4 * ALL RIGHTS RESERVED
5 *
6 * Permission is granted to use, copy, create derivative works
7 * and redistribute this software and such derivative works
8 * for any purpose, so long as the name of The University of
9 * Michigan is not used in any advertising or publicity
10 * pertaining to the use of distribution of this software
11 * without specific, written prior authorization. If the
12 * above copyright notice or any other identification of the
13 * University of Michigan is included in any copy of any
14 * portion of this software, then the disclaimer below must
15 * also be included.
16 *
17 * THIS SOFTWARE IS PROVIDED AS IS, WITHOUT REPRESENTATION
18 * FROM THE UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY
19 * PURPOSE, AND WITHOUT WARRANTY BY THE UNIVERSITY OF
20 * MICHIGAN OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING
21 * WITHOUT LIMITATION THE IMPLIED WARRANTIES OF
22 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE
23 * REGENTS OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE LIABLE
24 * FOR ANY DAMAGES, INCLUDING SPECIAL, INDIRECT, INCIDENTAL, OR
25 * CONSEQUENTIAL DAMAGES, WITH RESPECT TO ANY CLAIM ARISING
26 * OUT OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN
27 * IF IT HAS BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGES.
29 */
30
1#include <linux/types.h> 31#include <linux/types.h>
2#include <linux/jiffies.h> 32#include <linux/jiffies.h>
3#include <linux/sunrpc/gss_krb5.h> 33#include <linux/sunrpc/gss_krb5.h>
@@ -12,10 +42,7 @@
12static inline int 42static inline int
13gss_krb5_padding(int blocksize, int length) 43gss_krb5_padding(int blocksize, int length)
14{ 44{
15 /* Most of the code is block-size independent but currently we 45 return blocksize - (length % blocksize);
16 * use only 8: */
17 BUG_ON(blocksize != 8);
18 return 8 - (length & 7);
19} 46}
20 47
21static inline void 48static inline void
@@ -86,8 +113,8 @@ out:
86 return 0; 113 return 0;
87} 114}
88 115
89static void 116void
90make_confounder(char *p, u32 conflen) 117gss_krb5_make_confounder(char *p, u32 conflen)
91{ 118{
92 static u64 i = 0; 119 static u64 i = 0;
93 u64 *q = (u64 *)p; 120 u64 *q = (u64 *)p;
@@ -127,69 +154,73 @@ make_confounder(char *p, u32 conflen)
127 154
128/* XXX factor out common code with seal/unseal. */ 155/* XXX factor out common code with seal/unseal. */
129 156
130u32 157static u32
131gss_wrap_kerberos(struct gss_ctx *ctx, int offset, 158gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
132 struct xdr_buf *buf, struct page **pages) 159 struct xdr_buf *buf, struct page **pages)
133{ 160{
134 struct krb5_ctx *kctx = ctx->internal_ctx_id; 161 char cksumdata[GSS_KRB5_MAX_CKSUM_LEN];
135 char cksumdata[16]; 162 struct xdr_netobj md5cksum = {.len = sizeof(cksumdata),
136 struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata}; 163 .data = cksumdata};
137 int blocksize = 0, plainlen; 164 int blocksize = 0, plainlen;
138 unsigned char *ptr, *msg_start; 165 unsigned char *ptr, *msg_start;
139 s32 now; 166 s32 now;
140 int headlen; 167 int headlen;
141 struct page **tmp_pages; 168 struct page **tmp_pages;
142 u32 seq_send; 169 u32 seq_send;
170 u8 *cksumkey;
171 u32 conflen = kctx->gk5e->conflen;
143 172
144 dprintk("RPC: gss_wrap_kerberos\n"); 173 dprintk("RPC: %s\n", __func__);
145 174
146 now = get_seconds(); 175 now = get_seconds();
147 176
148 blocksize = crypto_blkcipher_blocksize(kctx->enc); 177 blocksize = crypto_blkcipher_blocksize(kctx->enc);
149 gss_krb5_add_padding(buf, offset, blocksize); 178 gss_krb5_add_padding(buf, offset, blocksize);
150 BUG_ON((buf->len - offset) % blocksize); 179 BUG_ON((buf->len - offset) % blocksize);
151 plainlen = blocksize + buf->len - offset; 180 plainlen = conflen + buf->len - offset;
152 181
153 headlen = g_token_size(&kctx->mech_used, 24 + plainlen) - 182 headlen = g_token_size(&kctx->mech_used,
154 (buf->len - offset); 183 GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength + plainlen) -
184 (buf->len - offset);
155 185
156 ptr = buf->head[0].iov_base + offset; 186 ptr = buf->head[0].iov_base + offset;
157 /* shift data to make room for header. */ 187 /* shift data to make room for header. */
188 xdr_extend_head(buf, offset, headlen);
189
158 /* XXX Would be cleverer to encrypt while copying. */ 190 /* XXX Would be cleverer to encrypt while copying. */
159 /* XXX bounds checking, slack, etc. */
160 memmove(ptr + headlen, ptr, buf->head[0].iov_len - offset);
161 buf->head[0].iov_len += headlen;
162 buf->len += headlen;
163 BUG_ON((buf->len - offset - headlen) % blocksize); 191 BUG_ON((buf->len - offset - headlen) % blocksize);
164 192
165 g_make_token_header(&kctx->mech_used, 193 g_make_token_header(&kctx->mech_used,
166 GSS_KRB5_TOK_HDR_LEN + 8 + plainlen, &ptr); 194 GSS_KRB5_TOK_HDR_LEN +
195 kctx->gk5e->cksumlength + plainlen, &ptr);
167 196
168 197
169 /* ptr now at header described in rfc 1964, section 1.2.1: */ 198 /* ptr now at header described in rfc 1964, section 1.2.1: */
170 ptr[0] = (unsigned char) ((KG_TOK_WRAP_MSG >> 8) & 0xff); 199 ptr[0] = (unsigned char) ((KG_TOK_WRAP_MSG >> 8) & 0xff);
171 ptr[1] = (unsigned char) (KG_TOK_WRAP_MSG & 0xff); 200 ptr[1] = (unsigned char) (KG_TOK_WRAP_MSG & 0xff);
172 201
173 msg_start = ptr + 24; 202 msg_start = ptr + GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength;
174 203
175 *(__be16 *)(ptr + 2) = htons(SGN_ALG_DES_MAC_MD5); 204 *(__be16 *)(ptr + 2) = cpu_to_le16(kctx->gk5e->signalg);
176 memset(ptr + 4, 0xff, 4); 205 memset(ptr + 4, 0xff, 4);
177 *(__be16 *)(ptr + 4) = htons(SEAL_ALG_DES); 206 *(__be16 *)(ptr + 4) = cpu_to_le16(kctx->gk5e->sealalg);
178 207
179 make_confounder(msg_start, blocksize); 208 gss_krb5_make_confounder(msg_start, conflen);
209
210 if (kctx->gk5e->keyed_cksum)
211 cksumkey = kctx->cksum;
212 else
213 cksumkey = NULL;
180 214
181 /* XXXJBF: UGH!: */ 215 /* XXXJBF: UGH!: */
182 tmp_pages = buf->pages; 216 tmp_pages = buf->pages;
183 buf->pages = pages; 217 buf->pages = pages;
184 if (make_checksum("md5", ptr, 8, buf, 218 if (make_checksum(kctx, ptr, 8, buf, offset + headlen - conflen,
185 offset + headlen - blocksize, &md5cksum)) 219 cksumkey, KG_USAGE_SEAL, &md5cksum))
186 return GSS_S_FAILURE; 220 return GSS_S_FAILURE;
187 buf->pages = tmp_pages; 221 buf->pages = tmp_pages;
188 222
189 if (krb5_encrypt(kctx->seq, NULL, md5cksum.data, 223 memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data, md5cksum.len);
190 md5cksum.data, md5cksum.len))
191 return GSS_S_FAILURE;
192 memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data + md5cksum.len - 8, 8);
193 224
194 spin_lock(&krb5_seq_lock); 225 spin_lock(&krb5_seq_lock);
195 seq_send = kctx->seq_send++; 226 seq_send = kctx->seq_send++;
@@ -197,25 +228,42 @@ gss_wrap_kerberos(struct gss_ctx *ctx, int offset,
197 228
198 /* XXX would probably be more efficient to compute checksum 229 /* XXX would probably be more efficient to compute checksum
199 * and encrypt at the same time: */ 230 * and encrypt at the same time: */
200 if ((krb5_make_seq_num(kctx->seq, kctx->initiate ? 0 : 0xff, 231 if ((krb5_make_seq_num(kctx, kctx->seq, kctx->initiate ? 0 : 0xff,
201 seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8))) 232 seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8)))
202 return GSS_S_FAILURE; 233 return GSS_S_FAILURE;
203 234
204 if (gss_encrypt_xdr_buf(kctx->enc, buf, offset + headlen - blocksize, 235 if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) {
205 pages)) 236 struct crypto_blkcipher *cipher;
206 return GSS_S_FAILURE; 237 int err;
238 cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0,
239 CRYPTO_ALG_ASYNC);
240 if (IS_ERR(cipher))
241 return GSS_S_FAILURE;
242
243 krb5_rc4_setup_enc_key(kctx, cipher, seq_send);
244
245 err = gss_encrypt_xdr_buf(cipher, buf,
246 offset + headlen - conflen, pages);
247 crypto_free_blkcipher(cipher);
248 if (err)
249 return GSS_S_FAILURE;
250 } else {
251 if (gss_encrypt_xdr_buf(kctx->enc, buf,
252 offset + headlen - conflen, pages))
253 return GSS_S_FAILURE;
254 }
207 255
208 return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE; 256 return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
209} 257}
210 258
211u32 259static u32
212gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf) 260gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
213{ 261{
214 struct krb5_ctx *kctx = ctx->internal_ctx_id;
215 int signalg; 262 int signalg;
216 int sealalg; 263 int sealalg;
217 char cksumdata[16]; 264 char cksumdata[GSS_KRB5_MAX_CKSUM_LEN];
218 struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata}; 265 struct xdr_netobj md5cksum = {.len = sizeof(cksumdata),
266 .data = cksumdata};
219 s32 now; 267 s32 now;
220 int direction; 268 int direction;
221 s32 seqnum; 269 s32 seqnum;
@@ -224,6 +272,9 @@ gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf)
224 void *data_start, *orig_start; 272 void *data_start, *orig_start;
225 int data_len; 273 int data_len;
226 int blocksize; 274 int blocksize;
275 u32 conflen = kctx->gk5e->conflen;
276 int crypt_offset;
277 u8 *cksumkey;
227 278
228 dprintk("RPC: gss_unwrap_kerberos\n"); 279 dprintk("RPC: gss_unwrap_kerberos\n");
229 280
@@ -241,29 +292,65 @@ gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf)
241 /* get the sign and seal algorithms */ 292 /* get the sign and seal algorithms */
242 293
243 signalg = ptr[2] + (ptr[3] << 8); 294 signalg = ptr[2] + (ptr[3] << 8);
244 if (signalg != SGN_ALG_DES_MAC_MD5) 295 if (signalg != kctx->gk5e->signalg)
245 return GSS_S_DEFECTIVE_TOKEN; 296 return GSS_S_DEFECTIVE_TOKEN;
246 297
247 sealalg = ptr[4] + (ptr[5] << 8); 298 sealalg = ptr[4] + (ptr[5] << 8);
248 if (sealalg != SEAL_ALG_DES) 299 if (sealalg != kctx->gk5e->sealalg)
249 return GSS_S_DEFECTIVE_TOKEN; 300 return GSS_S_DEFECTIVE_TOKEN;
250 301
251 if ((ptr[6] != 0xff) || (ptr[7] != 0xff)) 302 if ((ptr[6] != 0xff) || (ptr[7] != 0xff))
252 return GSS_S_DEFECTIVE_TOKEN; 303 return GSS_S_DEFECTIVE_TOKEN;
253 304
254 if (gss_decrypt_xdr_buf(kctx->enc, buf, 305 /*
255 ptr + GSS_KRB5_TOK_HDR_LEN + 8 - (unsigned char *)buf->head[0].iov_base)) 306 * Data starts after token header and checksum. ptr points
256 return GSS_S_DEFECTIVE_TOKEN; 307 * to the beginning of the token header
308 */
309 crypt_offset = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) -
310 (unsigned char *)buf->head[0].iov_base;
311
312 /*
313 * Need plaintext seqnum to derive encryption key for arcfour-hmac
314 */
315 if (krb5_get_seq_num(kctx, ptr + GSS_KRB5_TOK_HDR_LEN,
316 ptr + 8, &direction, &seqnum))
317 return GSS_S_BAD_SIG;
257 318
258 if (make_checksum("md5", ptr, 8, buf, 319 if ((kctx->initiate && direction != 0xff) ||
259 ptr + GSS_KRB5_TOK_HDR_LEN + 8 - (unsigned char *)buf->head[0].iov_base, &md5cksum)) 320 (!kctx->initiate && direction != 0))
260 return GSS_S_FAILURE; 321 return GSS_S_BAD_SIG;
322
323 if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) {
324 struct crypto_blkcipher *cipher;
325 int err;
326
327 cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0,
328 CRYPTO_ALG_ASYNC);
329 if (IS_ERR(cipher))
330 return GSS_S_FAILURE;
331
332 krb5_rc4_setup_enc_key(kctx, cipher, seqnum);
261 333
262 if (krb5_encrypt(kctx->seq, NULL, md5cksum.data, 334 err = gss_decrypt_xdr_buf(cipher, buf, crypt_offset);
263 md5cksum.data, md5cksum.len)) 335 crypto_free_blkcipher(cipher);
336 if (err)
337 return GSS_S_DEFECTIVE_TOKEN;
338 } else {
339 if (gss_decrypt_xdr_buf(kctx->enc, buf, crypt_offset))
340 return GSS_S_DEFECTIVE_TOKEN;
341 }
342
343 if (kctx->gk5e->keyed_cksum)
344 cksumkey = kctx->cksum;
345 else
346 cksumkey = NULL;
347
348 if (make_checksum(kctx, ptr, 8, buf, crypt_offset,
349 cksumkey, KG_USAGE_SEAL, &md5cksum))
264 return GSS_S_FAILURE; 350 return GSS_S_FAILURE;
265 351
266 if (memcmp(md5cksum.data + 8, ptr + GSS_KRB5_TOK_HDR_LEN, 8)) 352 if (memcmp(md5cksum.data, ptr + GSS_KRB5_TOK_HDR_LEN,
353 kctx->gk5e->cksumlength))
267 return GSS_S_BAD_SIG; 354 return GSS_S_BAD_SIG;
268 355
269 /* it got through unscathed. Make sure the context is unexpired */ 356 /* it got through unscathed. Make sure the context is unexpired */
@@ -275,19 +362,12 @@ gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf)
275 362
276 /* do sequencing checks */ 363 /* do sequencing checks */
277 364
278 if (krb5_get_seq_num(kctx->seq, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8,
279 &direction, &seqnum))
280 return GSS_S_BAD_SIG;
281
282 if ((kctx->initiate && direction != 0xff) ||
283 (!kctx->initiate && direction != 0))
284 return GSS_S_BAD_SIG;
285
286 /* Copy the data back to the right position. XXX: Would probably be 365 /* Copy the data back to the right position. XXX: Would probably be
287 * better to copy and encrypt at the same time. */ 366 * better to copy and encrypt at the same time. */
288 367
289 blocksize = crypto_blkcipher_blocksize(kctx->enc); 368 blocksize = crypto_blkcipher_blocksize(kctx->enc);
290 data_start = ptr + GSS_KRB5_TOK_HDR_LEN + 8 + blocksize; 369 data_start = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) +
370 conflen;
291 orig_start = buf->head[0].iov_base + offset; 371 orig_start = buf->head[0].iov_base + offset;
292 data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start; 372 data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start;
293 memmove(orig_start, data_start, data_len); 373 memmove(orig_start, data_start, data_len);
@@ -299,3 +379,209 @@ gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf)
299 379
300 return GSS_S_COMPLETE; 380 return GSS_S_COMPLETE;
301} 381}
382
383/*
384 * We cannot currently handle tokens with rotated data. We need a
385 * generalized routine to rotate the data in place. It is anticipated
386 * that we won't encounter rotated data in the general case.
387 */
388static u32
389rotate_left(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf, u16 rrc)
390{
391 unsigned int realrrc = rrc % (buf->len - offset - GSS_KRB5_TOK_HDR_LEN);
392
393 if (realrrc == 0)
394 return 0;
395
396 dprintk("%s: cannot process token with rotated data: "
397 "rrc %u, realrrc %u\n", __func__, rrc, realrrc);
398 return 1;
399}
400
401static u32
402gss_wrap_kerberos_v2(struct krb5_ctx *kctx, u32 offset,
403 struct xdr_buf *buf, struct page **pages)
404{
405 int blocksize;
406 u8 *ptr, *plainhdr;
407 s32 now;
408 u8 flags = 0x00;
409 __be16 *be16ptr, ec = 0;
410 __be64 *be64ptr;
411 u32 err;
412
413 dprintk("RPC: %s\n", __func__);
414
415 if (kctx->gk5e->encrypt_v2 == NULL)
416 return GSS_S_FAILURE;
417
418 /* make room for gss token header */
419 if (xdr_extend_head(buf, offset, GSS_KRB5_TOK_HDR_LEN))
420 return GSS_S_FAILURE;
421
422 /* construct gss token header */
423 ptr = plainhdr = buf->head[0].iov_base + offset;
424 *ptr++ = (unsigned char) ((KG2_TOK_WRAP>>8) & 0xff);
425 *ptr++ = (unsigned char) (KG2_TOK_WRAP & 0xff);
426
427 if ((kctx->flags & KRB5_CTX_FLAG_INITIATOR) == 0)
428 flags |= KG2_TOKEN_FLAG_SENTBYACCEPTOR;
429 if ((kctx->flags & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY) != 0)
430 flags |= KG2_TOKEN_FLAG_ACCEPTORSUBKEY;
431 /* We always do confidentiality in wrap tokens */
432 flags |= KG2_TOKEN_FLAG_SEALED;
433
434 *ptr++ = flags;
435 *ptr++ = 0xff;
436 be16ptr = (__be16 *)ptr;
437
438 blocksize = crypto_blkcipher_blocksize(kctx->acceptor_enc);
439 *be16ptr++ = cpu_to_be16(ec);
440 /* "inner" token header always uses 0 for RRC */
441 *be16ptr++ = cpu_to_be16(0);
442
443 be64ptr = (__be64 *)be16ptr;
444 spin_lock(&krb5_seq_lock);
445 *be64ptr = cpu_to_be64(kctx->seq_send64++);
446 spin_unlock(&krb5_seq_lock);
447
448 err = (*kctx->gk5e->encrypt_v2)(kctx, offset, buf, ec, pages);
449 if (err)
450 return err;
451
452 now = get_seconds();
453 return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
454}
455
456static u32
457gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
458{
459 s32 now;
460 u64 seqnum;
461 u8 *ptr;
462 u8 flags = 0x00;
463 u16 ec, rrc;
464 int err;
465 u32 headskip, tailskip;
466 u8 decrypted_hdr[GSS_KRB5_TOK_HDR_LEN];
467 unsigned int movelen;
468
469
470 dprintk("RPC: %s\n", __func__);
471
472 if (kctx->gk5e->decrypt_v2 == NULL)
473 return GSS_S_FAILURE;
474
475 ptr = buf->head[0].iov_base + offset;
476
477 if (be16_to_cpu(*((__be16 *)ptr)) != KG2_TOK_WRAP)
478 return GSS_S_DEFECTIVE_TOKEN;
479
480 flags = ptr[2];
481 if ((!kctx->initiate && (flags & KG2_TOKEN_FLAG_SENTBYACCEPTOR)) ||
482 (kctx->initiate && !(flags & KG2_TOKEN_FLAG_SENTBYACCEPTOR)))
483 return GSS_S_BAD_SIG;
484
485 if ((flags & KG2_TOKEN_FLAG_SEALED) == 0) {
486 dprintk("%s: token missing expected sealed flag\n", __func__);
487 return GSS_S_DEFECTIVE_TOKEN;
488 }
489
490 if (ptr[3] != 0xff)
491 return GSS_S_DEFECTIVE_TOKEN;
492
493 ec = be16_to_cpup((__be16 *)(ptr + 4));
494 rrc = be16_to_cpup((__be16 *)(ptr + 6));
495
496 seqnum = be64_to_cpup((__be64 *)(ptr + 8));
497
498 if (rrc != 0) {
499 err = rotate_left(kctx, offset, buf, rrc);
500 if (err)
501 return GSS_S_FAILURE;
502 }
503
504 err = (*kctx->gk5e->decrypt_v2)(kctx, offset, buf,
505 &headskip, &tailskip);
506 if (err)
507 return GSS_S_FAILURE;
508
509 /*
510 * Retrieve the decrypted gss token header and verify
511 * it against the original
512 */
513 err = read_bytes_from_xdr_buf(buf,
514 buf->len - GSS_KRB5_TOK_HDR_LEN - tailskip,
515 decrypted_hdr, GSS_KRB5_TOK_HDR_LEN);
516 if (err) {
517 dprintk("%s: error %u getting decrypted_hdr\n", __func__, err);
518 return GSS_S_FAILURE;
519 }
520 if (memcmp(ptr, decrypted_hdr, 6)
521 || memcmp(ptr + 8, decrypted_hdr + 8, 8)) {
522 dprintk("%s: token hdr, plaintext hdr mismatch!\n", __func__);
523 return GSS_S_FAILURE;
524 }
525
526 /* do sequencing checks */
527
528 /* it got through unscathed. Make sure the context is unexpired */
529 now = get_seconds();
530 if (now > kctx->endtime)
531 return GSS_S_CONTEXT_EXPIRED;
532
533 /*
534 * Move the head data back to the right position in xdr_buf.
535 * We ignore any "ec" data since it might be in the head or
536 * the tail, and we really don't need to deal with it.
537 * Note that buf->head[0].iov_len may indicate the available
538 * head buffer space rather than that actually occupied.
539 */
540 movelen = min_t(unsigned int, buf->head[0].iov_len, buf->len);
541 movelen -= offset + GSS_KRB5_TOK_HDR_LEN + headskip;
542 BUG_ON(offset + GSS_KRB5_TOK_HDR_LEN + headskip + movelen >
543 buf->head[0].iov_len);
544 memmove(ptr, ptr + GSS_KRB5_TOK_HDR_LEN + headskip, movelen);
545 buf->head[0].iov_len -= GSS_KRB5_TOK_HDR_LEN + headskip;
546 buf->len -= GSS_KRB5_TOK_HDR_LEN + headskip;
547
548 return GSS_S_COMPLETE;
549}
550
551u32
552gss_wrap_kerberos(struct gss_ctx *gctx, int offset,
553 struct xdr_buf *buf, struct page **pages)
554{
555 struct krb5_ctx *kctx = gctx->internal_ctx_id;
556
557 switch (kctx->enctype) {
558 default:
559 BUG();
560 case ENCTYPE_DES_CBC_RAW:
561 case ENCTYPE_DES3_CBC_RAW:
562 case ENCTYPE_ARCFOUR_HMAC:
563 return gss_wrap_kerberos_v1(kctx, offset, buf, pages);
564 case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
565 case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
566 return gss_wrap_kerberos_v2(kctx, offset, buf, pages);
567 }
568}
569
570u32
571gss_unwrap_kerberos(struct gss_ctx *gctx, int offset, struct xdr_buf *buf)
572{
573 struct krb5_ctx *kctx = gctx->internal_ctx_id;
574
575 switch (kctx->enctype) {
576 default:
577 BUG();
578 case ENCTYPE_DES_CBC_RAW:
579 case ENCTYPE_DES3_CBC_RAW:
580 case ENCTYPE_ARCFOUR_HMAC:
581 return gss_unwrap_kerberos_v1(kctx, offset, buf);
582 case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
583 case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
584 return gss_unwrap_kerberos_v2(kctx, offset, buf);
585 }
586}
587
diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c
index 76e4c6f4ac3c..2689de39dc78 100644
--- a/net/sunrpc/auth_gss/gss_mech_switch.c
+++ b/net/sunrpc/auth_gss/gss_mech_switch.c
@@ -249,14 +249,15 @@ EXPORT_SYMBOL_GPL(gss_mech_put);
249int 249int
250gss_import_sec_context(const void *input_token, size_t bufsize, 250gss_import_sec_context(const void *input_token, size_t bufsize,
251 struct gss_api_mech *mech, 251 struct gss_api_mech *mech,
252 struct gss_ctx **ctx_id) 252 struct gss_ctx **ctx_id,
253 gfp_t gfp_mask)
253{ 254{
254 if (!(*ctx_id = kzalloc(sizeof(**ctx_id), GFP_KERNEL))) 255 if (!(*ctx_id = kzalloc(sizeof(**ctx_id), gfp_mask)))
255 return -ENOMEM; 256 return -ENOMEM;
256 (*ctx_id)->mech_type = gss_mech_get(mech); 257 (*ctx_id)->mech_type = gss_mech_get(mech);
257 258
258 return mech->gm_ops 259 return mech->gm_ops
259 ->gss_import_sec_context(input_token, bufsize, *ctx_id); 260 ->gss_import_sec_context(input_token, bufsize, *ctx_id, gfp_mask);
260} 261}
261 262
262/* gss_get_mic: compute a mic over message and return mic_token. */ 263/* gss_get_mic: compute a mic over message and return mic_token. */
@@ -285,6 +286,20 @@ gss_verify_mic(struct gss_ctx *context_handle,
285 mic_token); 286 mic_token);
286} 287}
287 288
289/*
290 * This function is called from both the client and server code.
291 * Each makes guarantees about how much "slack" space is available
292 * for the underlying function in "buf"'s head and tail while
293 * performing the wrap.
294 *
295 * The client and server code allocate RPC_MAX_AUTH_SIZE extra
296 * space in both the head and tail which is available for use by
297 * the wrap function.
298 *
299 * Underlying functions should verify they do not use more than
300 * RPC_MAX_AUTH_SIZE of extra space in either the head or tail
301 * when performing the wrap.
302 */
288u32 303u32
289gss_wrap(struct gss_ctx *ctx_id, 304gss_wrap(struct gss_ctx *ctx_id,
290 int offset, 305 int offset,
diff --git a/net/sunrpc/auth_gss/gss_spkm3_mech.c b/net/sunrpc/auth_gss/gss_spkm3_mech.c
index 035e1dd6af1b..dc3f1f5ed865 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_mech.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_mech.c
@@ -84,13 +84,14 @@ simple_get_netobj(const void *p, const void *end, struct xdr_netobj *res)
84 84
85static int 85static int
86gss_import_sec_context_spkm3(const void *p, size_t len, 86gss_import_sec_context_spkm3(const void *p, size_t len,
87 struct gss_ctx *ctx_id) 87 struct gss_ctx *ctx_id,
88 gfp_t gfp_mask)
88{ 89{
89 const void *end = (const void *)((const char *)p + len); 90 const void *end = (const void *)((const char *)p + len);
90 struct spkm3_ctx *ctx; 91 struct spkm3_ctx *ctx;
91 int version; 92 int version;
92 93
93 if (!(ctx = kzalloc(sizeof(*ctx), GFP_NOFS))) 94 if (!(ctx = kzalloc(sizeof(*ctx), gfp_mask)))
94 goto out_err; 95 goto out_err;
95 96
96 p = simple_get_bytes(p, end, &version, sizeof(version)); 97 p = simple_get_bytes(p, end, &version, sizeof(version));
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index b81e790ef9f4..cc385b3a59c2 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -494,7 +494,7 @@ static int rsc_parse(struct cache_detail *cd,
494 len = qword_get(&mesg, buf, mlen); 494 len = qword_get(&mesg, buf, mlen);
495 if (len < 0) 495 if (len < 0)
496 goto out; 496 goto out;
497 status = gss_import_sec_context(buf, len, gm, &rsci.mechctx); 497 status = gss_import_sec_context(buf, len, gm, &rsci.mechctx, GFP_KERNEL);
498 if (status) 498 if (status)
499 goto out; 499 goto out;
500 500
@@ -1315,6 +1315,14 @@ svcauth_gss_wrap_resp_priv(struct svc_rqst *rqstp)
1315 inpages = resbuf->pages; 1315 inpages = resbuf->pages;
1316 /* XXX: Would be better to write some xdr helper functions for 1316 /* XXX: Would be better to write some xdr helper functions for
1317 * nfs{2,3,4}xdr.c that place the data right, instead of copying: */ 1317 * nfs{2,3,4}xdr.c that place the data right, instead of copying: */
1318
1319 /*
1320 * If there is currently tail data, make sure there is
1321 * room for the head, tail, and 2 * RPC_MAX_AUTH_SIZE in
1322 * the page, and move the current tail data such that
1323 * there is RPC_MAX_AUTH_SIZE slack space available in
1324 * both the head and tail.
1325 */
1318 if (resbuf->tail[0].iov_base) { 1326 if (resbuf->tail[0].iov_base) {
1319 BUG_ON(resbuf->tail[0].iov_base >= resbuf->head[0].iov_base 1327 BUG_ON(resbuf->tail[0].iov_base >= resbuf->head[0].iov_base
1320 + PAGE_SIZE); 1328 + PAGE_SIZE);
@@ -1327,6 +1335,13 @@ svcauth_gss_wrap_resp_priv(struct svc_rqst *rqstp)
1327 resbuf->tail[0].iov_len); 1335 resbuf->tail[0].iov_len);
1328 resbuf->tail[0].iov_base += RPC_MAX_AUTH_SIZE; 1336 resbuf->tail[0].iov_base += RPC_MAX_AUTH_SIZE;
1329 } 1337 }
1338 /*
1339 * If there is no current tail data, make sure there is
1340 * room for the head data, and 2 * RPC_MAX_AUTH_SIZE in the
1341 * allotted page, and set up tail information such that there
1342 * is RPC_MAX_AUTH_SIZE slack space available in both the
1343 * head and tail.
1344 */
1330 if (resbuf->tail[0].iov_base == NULL) { 1345 if (resbuf->tail[0].iov_base == NULL) {
1331 if (resbuf->head[0].iov_len + 2*RPC_MAX_AUTH_SIZE > PAGE_SIZE) 1346 if (resbuf->head[0].iov_len + 2*RPC_MAX_AUTH_SIZE > PAGE_SIZE)
1332 return -ENOMEM; 1347 return -ENOMEM;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 19c9983d5360..8c7b5433022a 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -556,26 +556,16 @@ static const struct rpc_call_ops rpc_default_ops = {
556 */ 556 */
557struct rpc_task *rpc_run_task(const struct rpc_task_setup *task_setup_data) 557struct rpc_task *rpc_run_task(const struct rpc_task_setup *task_setup_data)
558{ 558{
559 struct rpc_task *task, *ret; 559 struct rpc_task *task;
560 560
561 task = rpc_new_task(task_setup_data); 561 task = rpc_new_task(task_setup_data);
562 if (task == NULL) { 562 if (IS_ERR(task))
563 rpc_release_calldata(task_setup_data->callback_ops,
564 task_setup_data->callback_data);
565 ret = ERR_PTR(-ENOMEM);
566 goto out; 563 goto out;
567 }
568 564
569 if (task->tk_status != 0) {
570 ret = ERR_PTR(task->tk_status);
571 rpc_put_task(task);
572 goto out;
573 }
574 atomic_inc(&task->tk_count); 565 atomic_inc(&task->tk_count);
575 rpc_execute(task); 566 rpc_execute(task);
576 ret = task;
577out: 567out:
578 return ret; 568 return task;
579} 569}
580EXPORT_SYMBOL_GPL(rpc_run_task); 570EXPORT_SYMBOL_GPL(rpc_run_task);
581 571
@@ -657,9 +647,8 @@ struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req,
657 * Create an rpc_task to send the data 647 * Create an rpc_task to send the data
658 */ 648 */
659 task = rpc_new_task(&task_setup_data); 649 task = rpc_new_task(&task_setup_data);
660 if (!task) { 650 if (IS_ERR(task)) {
661 xprt_free_bc_request(req); 651 xprt_free_bc_request(req);
662 task = ERR_PTR(-ENOMEM);
663 goto out; 652 goto out;
664 } 653 }
665 task->tk_rqstp = req; 654 task->tk_rqstp = req;
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index aae6907fd546..4a843b883b89 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -25,7 +25,6 @@
25 25
26#ifdef RPC_DEBUG 26#ifdef RPC_DEBUG
27#define RPCDBG_FACILITY RPCDBG_SCHED 27#define RPCDBG_FACILITY RPCDBG_SCHED
28#define RPC_TASK_MAGIC_ID 0xf00baa
29#endif 28#endif
30 29
31/* 30/*
@@ -237,7 +236,6 @@ static void rpc_task_set_debuginfo(struct rpc_task *task)
237{ 236{
238 static atomic_t rpc_pid; 237 static atomic_t rpc_pid;
239 238
240 task->tk_magic = RPC_TASK_MAGIC_ID;
241 task->tk_pid = atomic_inc_return(&rpc_pid); 239 task->tk_pid = atomic_inc_return(&rpc_pid);
242} 240}
243#else 241#else
@@ -360,9 +358,6 @@ static void __rpc_do_wake_up_task(struct rpc_wait_queue *queue, struct rpc_task
360 dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n", 358 dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n",
361 task->tk_pid, jiffies); 359 task->tk_pid, jiffies);
362 360
363#ifdef RPC_DEBUG
364 BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID);
365#endif
366 /* Has the task been executed yet? If not, we cannot wake it up! */ 361 /* Has the task been executed yet? If not, we cannot wake it up! */
367 if (!RPC_IS_ACTIVATED(task)) { 362 if (!RPC_IS_ACTIVATED(task)) {
368 printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task); 363 printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task);
@@ -834,7 +829,7 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta
834 } 829 }
835 830
836 /* starting timestamp */ 831 /* starting timestamp */
837 task->tk_start = jiffies; 832 task->tk_start = ktime_get();
838 833
839 dprintk("RPC: new task initialized, procpid %u\n", 834 dprintk("RPC: new task initialized, procpid %u\n",
840 task_pid_nr(current)); 835 task_pid_nr(current));
@@ -856,16 +851,23 @@ struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data)
856 851
857 if (task == NULL) { 852 if (task == NULL) {
858 task = rpc_alloc_task(); 853 task = rpc_alloc_task();
859 if (task == NULL) 854 if (task == NULL) {
860 goto out; 855 rpc_release_calldata(setup_data->callback_ops,
856 setup_data->callback_data);
857 return ERR_PTR(-ENOMEM);
858 }
861 flags = RPC_TASK_DYNAMIC; 859 flags = RPC_TASK_DYNAMIC;
862 } 860 }
863 861
864 rpc_init_task(task, setup_data); 862 rpc_init_task(task, setup_data);
863 if (task->tk_status < 0) {
864 int err = task->tk_status;
865 rpc_put_task(task);
866 return ERR_PTR(err);
867 }
865 868
866 task->tk_flags |= flags; 869 task->tk_flags |= flags;
867 dprintk("RPC: allocated task %p\n", task); 870 dprintk("RPC: allocated task %p\n", task);
868out:
869 return task; 871 return task;
870} 872}
871 873
@@ -909,9 +911,6 @@ EXPORT_SYMBOL_GPL(rpc_put_task);
909 911
910static void rpc_release_task(struct rpc_task *task) 912static void rpc_release_task(struct rpc_task *task)
911{ 913{
912#ifdef RPC_DEBUG
913 BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID);
914#endif
915 dprintk("RPC: %5u release task\n", task->tk_pid); 914 dprintk("RPC: %5u release task\n", task->tk_pid);
916 915
917 if (!list_empty(&task->tk_task)) { 916 if (!list_empty(&task->tk_task)) {
@@ -923,9 +922,6 @@ static void rpc_release_task(struct rpc_task *task)
923 } 922 }
924 BUG_ON (RPC_IS_QUEUED(task)); 923 BUG_ON (RPC_IS_QUEUED(task));
925 924
926#ifdef RPC_DEBUG
927 task->tk_magic = 0;
928#endif
929 /* Wake up anyone who is waiting for task completion */ 925 /* Wake up anyone who is waiting for task completion */
930 rpc_mark_complete_task(task); 926 rpc_mark_complete_task(task);
931 927
diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c
index 5785d2037f45..ea1046f3f9a3 100644
--- a/net/sunrpc/stats.c
+++ b/net/sunrpc/stats.c
@@ -144,7 +144,7 @@ void rpc_count_iostats(struct rpc_task *task)
144 struct rpc_rqst *req = task->tk_rqstp; 144 struct rpc_rqst *req = task->tk_rqstp;
145 struct rpc_iostats *stats; 145 struct rpc_iostats *stats;
146 struct rpc_iostats *op_metrics; 146 struct rpc_iostats *op_metrics;
147 long rtt, execute, queue; 147 ktime_t delta;
148 148
149 if (!task->tk_client || !task->tk_client->cl_metrics || !req) 149 if (!task->tk_client || !task->tk_client->cl_metrics || !req)
150 return; 150 return;
@@ -156,23 +156,16 @@ void rpc_count_iostats(struct rpc_task *task)
156 op_metrics->om_ntrans += req->rq_ntrans; 156 op_metrics->om_ntrans += req->rq_ntrans;
157 op_metrics->om_timeouts += task->tk_timeouts; 157 op_metrics->om_timeouts += task->tk_timeouts;
158 158
159 op_metrics->om_bytes_sent += task->tk_bytes_sent; 159 op_metrics->om_bytes_sent += req->rq_xmit_bytes_sent;
160 op_metrics->om_bytes_recv += req->rq_reply_bytes_recvd; 160 op_metrics->om_bytes_recv += req->rq_reply_bytes_recvd;
161 161
162 queue = (long)req->rq_xtime - task->tk_start; 162 delta = ktime_sub(req->rq_xtime, task->tk_start);
163 if (queue < 0) 163 op_metrics->om_queue = ktime_add(op_metrics->om_queue, delta);
164 queue = -queue;
165 op_metrics->om_queue += queue;
166 164
167 rtt = task->tk_rtt; 165 op_metrics->om_rtt = ktime_add(op_metrics->om_rtt, req->rq_rtt);
168 if (rtt < 0)
169 rtt = -rtt;
170 op_metrics->om_rtt += rtt;
171 166
172 execute = (long)jiffies - task->tk_start; 167 delta = ktime_sub(ktime_get(), task->tk_start);
173 if (execute < 0) 168 op_metrics->om_execute = ktime_add(op_metrics->om_execute, delta);
174 execute = -execute;
175 op_metrics->om_execute += execute;
176} 169}
177 170
178static void _print_name(struct seq_file *seq, unsigned int op, 171static void _print_name(struct seq_file *seq, unsigned int op,
@@ -186,8 +179,6 @@ static void _print_name(struct seq_file *seq, unsigned int op,
186 seq_printf(seq, "\t%12u: ", op); 179 seq_printf(seq, "\t%12u: ", op);
187} 180}
188 181
189#define MILLISECS_PER_JIFFY (1000 / HZ)
190
191void rpc_print_iostats(struct seq_file *seq, struct rpc_clnt *clnt) 182void rpc_print_iostats(struct seq_file *seq, struct rpc_clnt *clnt)
192{ 183{
193 struct rpc_iostats *stats = clnt->cl_metrics; 184 struct rpc_iostats *stats = clnt->cl_metrics;
@@ -214,9 +205,9 @@ void rpc_print_iostats(struct seq_file *seq, struct rpc_clnt *clnt)
214 metrics->om_timeouts, 205 metrics->om_timeouts,
215 metrics->om_bytes_sent, 206 metrics->om_bytes_sent,
216 metrics->om_bytes_recv, 207 metrics->om_bytes_recv,
217 metrics->om_queue * MILLISECS_PER_JIFFY, 208 ktime_to_ms(metrics->om_queue),
218 metrics->om_rtt * MILLISECS_PER_JIFFY, 209 ktime_to_ms(metrics->om_rtt),
219 metrics->om_execute * MILLISECS_PER_JIFFY); 210 ktime_to_ms(metrics->om_execute));
220 } 211 }
221} 212}
222EXPORT_SYMBOL_GPL(rpc_print_iostats); 213EXPORT_SYMBOL_GPL(rpc_print_iostats);
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 2763fde88499..a1f82a87d34d 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -762,6 +762,7 @@ int write_bytes_to_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, un
762 __write_bytes_to_xdr_buf(&subbuf, obj, len); 762 __write_bytes_to_xdr_buf(&subbuf, obj, len);
763 return 0; 763 return 0;
764} 764}
765EXPORT_SYMBOL_GPL(write_bytes_to_xdr_buf);
765 766
766int 767int
767xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj) 768xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj)
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 42f09ade0044..65fe2e4e7cbf 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -43,6 +43,7 @@
43#include <linux/interrupt.h> 43#include <linux/interrupt.h>
44#include <linux/workqueue.h> 44#include <linux/workqueue.h>
45#include <linux/net.h> 45#include <linux/net.h>
46#include <linux/ktime.h>
46 47
47#include <linux/sunrpc/clnt.h> 48#include <linux/sunrpc/clnt.h>
48#include <linux/sunrpc/metrics.h> 49#include <linux/sunrpc/metrics.h>
@@ -62,7 +63,6 @@
62 * Local functions 63 * Local functions
63 */ 64 */
64static void xprt_request_init(struct rpc_task *, struct rpc_xprt *); 65static void xprt_request_init(struct rpc_task *, struct rpc_xprt *);
65static inline void do_xprt_reserve(struct rpc_task *);
66static void xprt_connect_status(struct rpc_task *task); 66static void xprt_connect_status(struct rpc_task *task);
67static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *); 67static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *);
68 68
@@ -711,12 +711,16 @@ void xprt_connect(struct rpc_task *task)
711 if (task->tk_rqstp) 711 if (task->tk_rqstp)
712 task->tk_rqstp->rq_bytes_sent = 0; 712 task->tk_rqstp->rq_bytes_sent = 0;
713 713
714 task->tk_timeout = xprt->connect_timeout; 714 task->tk_timeout = task->tk_rqstp->rq_timeout;
715 rpc_sleep_on(&xprt->pending, task, xprt_connect_status); 715 rpc_sleep_on(&xprt->pending, task, xprt_connect_status);
716
717 if (test_bit(XPRT_CLOSING, &xprt->state))
718 return;
719 if (xprt_test_and_set_connecting(xprt))
720 return;
716 xprt->stat.connect_start = jiffies; 721 xprt->stat.connect_start = jiffies;
717 xprt->ops->connect(task); 722 xprt->ops->connect(task);
718 } 723 }
719 return;
720} 724}
721 725
722static void xprt_connect_status(struct rpc_task *task) 726static void xprt_connect_status(struct rpc_task *task)
@@ -771,25 +775,19 @@ struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
771} 775}
772EXPORT_SYMBOL_GPL(xprt_lookup_rqst); 776EXPORT_SYMBOL_GPL(xprt_lookup_rqst);
773 777
774/** 778static void xprt_update_rtt(struct rpc_task *task)
775 * xprt_update_rtt - update an RPC client's RTT state after receiving a reply
776 * @task: RPC request that recently completed
777 *
778 */
779void xprt_update_rtt(struct rpc_task *task)
780{ 779{
781 struct rpc_rqst *req = task->tk_rqstp; 780 struct rpc_rqst *req = task->tk_rqstp;
782 struct rpc_rtt *rtt = task->tk_client->cl_rtt; 781 struct rpc_rtt *rtt = task->tk_client->cl_rtt;
783 unsigned timer = task->tk_msg.rpc_proc->p_timer; 782 unsigned timer = task->tk_msg.rpc_proc->p_timer;
783 long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt));
784 784
785 if (timer) { 785 if (timer) {
786 if (req->rq_ntrans == 1) 786 if (req->rq_ntrans == 1)
787 rpc_update_rtt(rtt, timer, 787 rpc_update_rtt(rtt, timer, m);
788 (long)jiffies - req->rq_xtime);
789 rpc_set_timeo(rtt, timer, req->rq_ntrans - 1); 788 rpc_set_timeo(rtt, timer, req->rq_ntrans - 1);
790 } 789 }
791} 790}
792EXPORT_SYMBOL_GPL(xprt_update_rtt);
793 791
794/** 792/**
795 * xprt_complete_rqst - called when reply processing is complete 793 * xprt_complete_rqst - called when reply processing is complete
@@ -807,7 +805,9 @@ void xprt_complete_rqst(struct rpc_task *task, int copied)
807 task->tk_pid, ntohl(req->rq_xid), copied); 805 task->tk_pid, ntohl(req->rq_xid), copied);
808 806
809 xprt->stat.recvs++; 807 xprt->stat.recvs++;
810 task->tk_rtt = (long)jiffies - req->rq_xtime; 808 req->rq_rtt = ktime_sub(ktime_get(), req->rq_xtime);
809 if (xprt->ops->timer != NULL)
810 xprt_update_rtt(task);
811 811
812 list_del_init(&req->rq_list); 812 list_del_init(&req->rq_list);
813 req->rq_private_buf.len = copied; 813 req->rq_private_buf.len = copied;
@@ -906,7 +906,7 @@ void xprt_transmit(struct rpc_task *task)
906 return; 906 return;
907 907
908 req->rq_connect_cookie = xprt->connect_cookie; 908 req->rq_connect_cookie = xprt->connect_cookie;
909 req->rq_xtime = jiffies; 909 req->rq_xtime = ktime_get();
910 status = xprt->ops->send_request(task); 910 status = xprt->ops->send_request(task);
911 if (status != 0) { 911 if (status != 0) {
912 task->tk_status = status; 912 task->tk_status = status;
@@ -935,7 +935,7 @@ void xprt_transmit(struct rpc_task *task)
935 spin_unlock_bh(&xprt->transport_lock); 935 spin_unlock_bh(&xprt->transport_lock);
936} 936}
937 937
938static inline void do_xprt_reserve(struct rpc_task *task) 938static void xprt_alloc_slot(struct rpc_task *task)
939{ 939{
940 struct rpc_xprt *xprt = task->tk_xprt; 940 struct rpc_xprt *xprt = task->tk_xprt;
941 941
@@ -955,6 +955,16 @@ static inline void do_xprt_reserve(struct rpc_task *task)
955 rpc_sleep_on(&xprt->backlog, task, NULL); 955 rpc_sleep_on(&xprt->backlog, task, NULL);
956} 956}
957 957
958static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
959{
960 memset(req, 0, sizeof(*req)); /* mark unused */
961
962 spin_lock(&xprt->reserve_lock);
963 list_add(&req->rq_list, &xprt->free);
964 rpc_wake_up_next(&xprt->backlog);
965 spin_unlock(&xprt->reserve_lock);
966}
967
958/** 968/**
959 * xprt_reserve - allocate an RPC request slot 969 * xprt_reserve - allocate an RPC request slot
960 * @task: RPC task requesting a slot allocation 970 * @task: RPC task requesting a slot allocation
@@ -968,7 +978,7 @@ void xprt_reserve(struct rpc_task *task)
968 978
969 task->tk_status = -EIO; 979 task->tk_status = -EIO;
970 spin_lock(&xprt->reserve_lock); 980 spin_lock(&xprt->reserve_lock);
971 do_xprt_reserve(task); 981 xprt_alloc_slot(task);
972 spin_unlock(&xprt->reserve_lock); 982 spin_unlock(&xprt->reserve_lock);
973} 983}
974 984
@@ -1006,14 +1016,10 @@ void xprt_release(struct rpc_task *task)
1006{ 1016{
1007 struct rpc_xprt *xprt; 1017 struct rpc_xprt *xprt;
1008 struct rpc_rqst *req; 1018 struct rpc_rqst *req;
1009 int is_bc_request;
1010 1019
1011 if (!(req = task->tk_rqstp)) 1020 if (!(req = task->tk_rqstp))
1012 return; 1021 return;
1013 1022
1014 /* Preallocated backchannel request? */
1015 is_bc_request = bc_prealloc(req);
1016
1017 xprt = req->rq_xprt; 1023 xprt = req->rq_xprt;
1018 rpc_count_iostats(task); 1024 rpc_count_iostats(task);
1019 spin_lock_bh(&xprt->transport_lock); 1025 spin_lock_bh(&xprt->transport_lock);
@@ -1027,21 +1033,16 @@ void xprt_release(struct rpc_task *task)
1027 mod_timer(&xprt->timer, 1033 mod_timer(&xprt->timer,
1028 xprt->last_used + xprt->idle_timeout); 1034 xprt->last_used + xprt->idle_timeout);
1029 spin_unlock_bh(&xprt->transport_lock); 1035 spin_unlock_bh(&xprt->transport_lock);
1030 if (!bc_prealloc(req)) 1036 if (req->rq_buffer)
1031 xprt->ops->buf_free(req->rq_buffer); 1037 xprt->ops->buf_free(req->rq_buffer);
1032 task->tk_rqstp = NULL; 1038 task->tk_rqstp = NULL;
1033 if (req->rq_release_snd_buf) 1039 if (req->rq_release_snd_buf)
1034 req->rq_release_snd_buf(req); 1040 req->rq_release_snd_buf(req);
1035 1041
1036 dprintk("RPC: %5u release request %p\n", task->tk_pid, req); 1042 dprintk("RPC: %5u release request %p\n", task->tk_pid, req);
1037 if (likely(!is_bc_request)) { 1043 if (likely(!bc_prealloc(req)))
1038 memset(req, 0, sizeof(*req)); /* mark unused */ 1044 xprt_free_slot(xprt, req);
1039 1045 else
1040 spin_lock(&xprt->reserve_lock);
1041 list_add(&req->rq_list, &xprt->free);
1042 rpc_wake_up_next(&xprt->backlog);
1043 spin_unlock(&xprt->reserve_lock);
1044 } else
1045 xprt_free_bc_request(req); 1046 xprt_free_bc_request(req);
1046} 1047}
1047 1048
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index 187257b1d880..a85e866a77f7 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -305,7 +305,6 @@ xprt_setup_rdma(struct xprt_create *args)
305 /* 60 second timeout, no retries */ 305 /* 60 second timeout, no retries */
306 xprt->timeout = &xprt_rdma_default_timeout; 306 xprt->timeout = &xprt_rdma_default_timeout;
307 xprt->bind_timeout = (60U * HZ); 307 xprt->bind_timeout = (60U * HZ);
308 xprt->connect_timeout = (60U * HZ);
309 xprt->reestablish_timeout = (5U * HZ); 308 xprt->reestablish_timeout = (5U * HZ);
310 xprt->idle_timeout = (5U * 60 * HZ); 309 xprt->idle_timeout = (5U * 60 * HZ);
311 310
@@ -449,21 +448,19 @@ xprt_rdma_connect(struct rpc_task *task)
449 struct rpc_xprt *xprt = (struct rpc_xprt *)task->tk_xprt; 448 struct rpc_xprt *xprt = (struct rpc_xprt *)task->tk_xprt;
450 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 449 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
451 450
452 if (!xprt_test_and_set_connecting(xprt)) { 451 if (r_xprt->rx_ep.rep_connected != 0) {
453 if (r_xprt->rx_ep.rep_connected != 0) { 452 /* Reconnect */
454 /* Reconnect */ 453 schedule_delayed_work(&r_xprt->rdma_connect,
455 schedule_delayed_work(&r_xprt->rdma_connect, 454 xprt->reestablish_timeout);
456 xprt->reestablish_timeout); 455 xprt->reestablish_timeout <<= 1;
457 xprt->reestablish_timeout <<= 1; 456 if (xprt->reestablish_timeout > (30 * HZ))
458 if (xprt->reestablish_timeout > (30 * HZ)) 457 xprt->reestablish_timeout = (30 * HZ);
459 xprt->reestablish_timeout = (30 * HZ); 458 else if (xprt->reestablish_timeout < (5 * HZ))
460 else if (xprt->reestablish_timeout < (5 * HZ)) 459 xprt->reestablish_timeout = (5 * HZ);
461 xprt->reestablish_timeout = (5 * HZ); 460 } else {
462 } else { 461 schedule_delayed_work(&r_xprt->rdma_connect, 0);
463 schedule_delayed_work(&r_xprt->rdma_connect, 0); 462 if (!RPC_IS_ASYNC(task))
464 if (!RPC_IS_ASYNC(task)) 463 flush_scheduled_work();
465 flush_scheduled_work();
466 }
467 } 464 }
468} 465}
469 466
@@ -677,7 +674,7 @@ xprt_rdma_send_request(struct rpc_task *task)
677 if (rpcrdma_ep_post(&r_xprt->rx_ia, &r_xprt->rx_ep, req)) 674 if (rpcrdma_ep_post(&r_xprt->rx_ia, &r_xprt->rx_ep, req))
678 goto drop_connection; 675 goto drop_connection;
679 676
680 task->tk_bytes_sent += rqst->rq_snd_buf.len; 677 rqst->rq_xmit_bytes_sent += rqst->rq_snd_buf.len;
681 rqst->rq_bytes_sent = 0; 678 rqst->rq_bytes_sent = 0;
682 return 0; 679 return 0;
683 680
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 9847c30b5001..02fc7f04dd17 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -138,20 +138,6 @@ static ctl_table sunrpc_table[] = {
138#endif 138#endif
139 139
140/* 140/*
141 * Time out for an RPC UDP socket connect. UDP socket connects are
142 * synchronous, but we set a timeout anyway in case of resource
143 * exhaustion on the local host.
144 */
145#define XS_UDP_CONN_TO (5U * HZ)
146
147/*
148 * Wait duration for an RPC TCP connection to be established. Solaris
149 * NFS over TCP uses 60 seconds, for example, which is in line with how
150 * long a server takes to reboot.
151 */
152#define XS_TCP_CONN_TO (60U * HZ)
153
154/*
155 * Wait duration for a reply from the RPC portmapper. 141 * Wait duration for a reply from the RPC portmapper.
156 */ 142 */
157#define XS_BIND_TO (60U * HZ) 143#define XS_BIND_TO (60U * HZ)
@@ -542,7 +528,7 @@ static int xs_udp_send_request(struct rpc_task *task)
542 xdr->len - req->rq_bytes_sent, status); 528 xdr->len - req->rq_bytes_sent, status);
543 529
544 if (status >= 0) { 530 if (status >= 0) {
545 task->tk_bytes_sent += status; 531 req->rq_xmit_bytes_sent += status;
546 if (status >= req->rq_slen) 532 if (status >= req->rq_slen)
547 return 0; 533 return 0;
548 /* Still some bytes left; set up for a retry later. */ 534 /* Still some bytes left; set up for a retry later. */
@@ -638,7 +624,7 @@ static int xs_tcp_send_request(struct rpc_task *task)
638 /* If we've sent the entire packet, immediately 624 /* If we've sent the entire packet, immediately
639 * reset the count of bytes sent. */ 625 * reset the count of bytes sent. */
640 req->rq_bytes_sent += status; 626 req->rq_bytes_sent += status;
641 task->tk_bytes_sent += status; 627 req->rq_xmit_bytes_sent += status;
642 if (likely(req->rq_bytes_sent >= req->rq_slen)) { 628 if (likely(req->rq_bytes_sent >= req->rq_slen)) {
643 req->rq_bytes_sent = 0; 629 req->rq_bytes_sent = 0;
644 return 0; 630 return 0;
@@ -858,7 +844,6 @@ static void xs_udp_data_ready(struct sock *sk, int len)
858 dst_confirm(skb_dst(skb)); 844 dst_confirm(skb_dst(skb));
859 845
860 xprt_adjust_cwnd(task, copied); 846 xprt_adjust_cwnd(task, copied);
861 xprt_update_rtt(task);
862 xprt_complete_rqst(task, copied); 847 xprt_complete_rqst(task, copied);
863 848
864 out_unlock: 849 out_unlock:
@@ -2016,9 +2001,6 @@ static void xs_connect(struct rpc_task *task)
2016 struct rpc_xprt *xprt = task->tk_xprt; 2001 struct rpc_xprt *xprt = task->tk_xprt;
2017 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 2002 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2018 2003
2019 if (xprt_test_and_set_connecting(xprt))
2020 return;
2021
2022 if (transport->sock != NULL && !RPC_IS_SOFTCONN(task)) { 2004 if (transport->sock != NULL && !RPC_IS_SOFTCONN(task)) {
2023 dprintk("RPC: xs_connect delayed xprt %p for %lu " 2005 dprintk("RPC: xs_connect delayed xprt %p for %lu "
2024 "seconds\n", 2006 "seconds\n",
@@ -2038,16 +2020,6 @@ static void xs_connect(struct rpc_task *task)
2038 } 2020 }
2039} 2021}
2040 2022
2041static void xs_tcp_connect(struct rpc_task *task)
2042{
2043 struct rpc_xprt *xprt = task->tk_xprt;
2044
2045 /* Exit if we need to wait for socket shutdown to complete */
2046 if (test_bit(XPRT_CLOSING, &xprt->state))
2047 return;
2048 xs_connect(task);
2049}
2050
2051/** 2023/**
2052 * xs_udp_print_stats - display UDP socket-specifc stats 2024 * xs_udp_print_stats - display UDP socket-specifc stats
2053 * @xprt: rpc_xprt struct containing statistics 2025 * @xprt: rpc_xprt struct containing statistics
@@ -2246,7 +2218,7 @@ static struct rpc_xprt_ops xs_tcp_ops = {
2246 .release_xprt = xs_tcp_release_xprt, 2218 .release_xprt = xs_tcp_release_xprt,
2247 .rpcbind = rpcb_getport_async, 2219 .rpcbind = rpcb_getport_async,
2248 .set_port = xs_set_port, 2220 .set_port = xs_set_port,
2249 .connect = xs_tcp_connect, 2221 .connect = xs_connect,
2250 .buf_alloc = rpc_malloc, 2222 .buf_alloc = rpc_malloc,
2251 .buf_free = rpc_free, 2223 .buf_free = rpc_free,
2252 .send_request = xs_tcp_send_request, 2224 .send_request = xs_tcp_send_request,
@@ -2337,7 +2309,6 @@ static struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
2337 xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); 2309 xprt->max_payload = (1U << 16) - (MAX_HEADER << 3);
2338 2310
2339 xprt->bind_timeout = XS_BIND_TO; 2311 xprt->bind_timeout = XS_BIND_TO;
2340 xprt->connect_timeout = XS_UDP_CONN_TO;
2341 xprt->reestablish_timeout = XS_UDP_REEST_TO; 2312 xprt->reestablish_timeout = XS_UDP_REEST_TO;
2342 xprt->idle_timeout = XS_IDLE_DISC_TO; 2313 xprt->idle_timeout = XS_IDLE_DISC_TO;
2343 2314
@@ -2412,7 +2383,6 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
2412 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; 2383 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
2413 2384
2414 xprt->bind_timeout = XS_BIND_TO; 2385 xprt->bind_timeout = XS_BIND_TO;
2415 xprt->connect_timeout = XS_TCP_CONN_TO;
2416 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; 2386 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
2417 xprt->idle_timeout = XS_IDLE_DISC_TO; 2387 xprt->idle_timeout = XS_IDLE_DISC_TO;
2418 2388
@@ -2472,9 +2442,6 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
2472 struct sock_xprt *transport; 2442 struct sock_xprt *transport;
2473 struct svc_sock *bc_sock; 2443 struct svc_sock *bc_sock;
2474 2444
2475 if (!args->bc_xprt)
2476 ERR_PTR(-EINVAL);
2477
2478 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries); 2445 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries);
2479 if (IS_ERR(xprt)) 2446 if (IS_ERR(xprt))
2480 return xprt; 2447 return xprt;
@@ -2488,7 +2455,6 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
2488 /* backchannel */ 2455 /* backchannel */
2489 xprt_set_bound(xprt); 2456 xprt_set_bound(xprt);
2490 xprt->bind_timeout = 0; 2457 xprt->bind_timeout = 0;
2491 xprt->connect_timeout = 0;
2492 xprt->reestablish_timeout = 0; 2458 xprt->reestablish_timeout = 0;
2493 xprt->idle_timeout = 0; 2459 xprt->idle_timeout = 0;
2494 2460