aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-04-24 14:45:00 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-24 14:45:00 -0400
commit10c993a6b5418cb1026775765ba4c70ffb70853d (patch)
tree717deba79b938c2f3f786ff6fe908d30582f06f8 /net
parentc328d54cd4ad120d76284e46dcca6c6cf996154a (diff)
parentca456252db0521e5e88024fa2b67535e9739e030 (diff)
Merge branch 'for-linus' of git://linux-nfs.org/~bfields/linux
* 'for-linus' of git://linux-nfs.org/~bfields/linux: (52 commits) knfsd: clear both setuid and setgid whenever a chown is done knfsd: get rid of imode variable in nfsd_setattr SUNRPC: Use unsigned loop and array index in svc_init_buffer() SUNRPC: Use unsigned index when looping over arrays SUNRPC: Update RPC server's TCP record marker decoder SUNRPC: RPC server still uses 2.4 method for disabling TCP Nagle NLM: don't let lockd exit on unexpected svc_recv errors (try #2) NFS: don't let nfs_callback_svc exit on unexpected svc_recv errors (try #2) Use a zero sized array for raw field in struct fid nfsd: use static memory for callback program and stats SUNRPC: remove svc_create_thread() nfsd: fix comment lockd: Fix stale nlmsvc_unlink_block comment NFSD: Strip __KERNEL__ testing from unexported header files. sunrpc: make token header values less confusing gss_krb5: consistently use unsigned for seqnum NFSD: Remove NFSv4 dependency on NFSv3 SUNRPC: Remove PROC_FS dependency NFSD: Use "depends on" for PROC_FS dependency nfsd: move most of fh_verify to separate function ...
Diffstat (limited to 'net')
-rw-r--r--net/sunrpc/auth_gss/gss_generic_token.c4
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_crypto.c6
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_seal.c9
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_seqnum.c4
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_unseal.c2
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_wrap.c8
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_seal.c4
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c9
-rw-r--r--net/sunrpc/cache.c1
-rw-r--r--net/sunrpc/svc.c25
-rw-r--r--net/sunrpc/svc_xprt.c30
-rw-r--r--net/sunrpc/svcauth_unix.c118
-rw-r--r--net/sunrpc/svcsock.c29
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c2
14 files changed, 154 insertions, 97 deletions
diff --git a/net/sunrpc/auth_gss/gss_generic_token.c b/net/sunrpc/auth_gss/gss_generic_token.c
index ea8c92ecdae5..d83b881685fe 100644
--- a/net/sunrpc/auth_gss/gss_generic_token.c
+++ b/net/sunrpc/auth_gss/gss_generic_token.c
@@ -148,7 +148,7 @@ int
148g_token_size(struct xdr_netobj *mech, unsigned int body_size) 148g_token_size(struct xdr_netobj *mech, unsigned int body_size)
149{ 149{
150 /* set body_size to sequence contents size */ 150 /* set body_size to sequence contents size */
151 body_size += 4 + (int) mech->len; /* NEED overflow check */ 151 body_size += 2 + (int) mech->len; /* NEED overflow check */
152 return(1 + der_length_size(body_size) + body_size); 152 return(1 + der_length_size(body_size) + body_size);
153} 153}
154 154
@@ -161,7 +161,7 @@ void
161g_make_token_header(struct xdr_netobj *mech, int body_size, unsigned char **buf) 161g_make_token_header(struct xdr_netobj *mech, int body_size, unsigned char **buf)
162{ 162{
163 *(*buf)++ = 0x60; 163 *(*buf)++ = 0x60;
164 der_write_length(buf, 4 + mech->len + body_size); 164 der_write_length(buf, 2 + mech->len + body_size);
165 *(*buf)++ = 0x06; 165 *(*buf)++ = 0x06;
166 *(*buf)++ = (unsigned char) mech->len; 166 *(*buf)++ = (unsigned char) mech->len;
167 TWRITE_STR(*buf, mech->data, ((int) mech->len)); 167 TWRITE_STR(*buf, mech->data, ((int) mech->len));
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index 0dd792338fa9..1d52308ca324 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -66,8 +66,8 @@ krb5_encrypt(
66 goto out; 66 goto out;
67 67
68 if (crypto_blkcipher_ivsize(tfm) > 16) { 68 if (crypto_blkcipher_ivsize(tfm) > 16) {
69 dprintk("RPC: gss_k5encrypt: tfm iv size to large %d\n", 69 dprintk("RPC: gss_k5encrypt: tfm iv size too large %d\n",
70 crypto_blkcipher_ivsize(tfm)); 70 crypto_blkcipher_ivsize(tfm));
71 goto out; 71 goto out;
72 } 72 }
73 73
@@ -102,7 +102,7 @@ krb5_decrypt(
102 goto out; 102 goto out;
103 103
104 if (crypto_blkcipher_ivsize(tfm) > 16) { 104 if (crypto_blkcipher_ivsize(tfm) > 16) {
105 dprintk("RPC: gss_k5decrypt: tfm iv size to large %d\n", 105 dprintk("RPC: gss_k5decrypt: tfm iv size too large %d\n",
106 crypto_blkcipher_ivsize(tfm)); 106 crypto_blkcipher_ivsize(tfm));
107 goto out; 107 goto out;
108 } 108 }
diff --git a/net/sunrpc/auth_gss/gss_krb5_seal.c b/net/sunrpc/auth_gss/gss_krb5_seal.c
index dedcbd6108f4..5f1d36dfbcf7 100644
--- a/net/sunrpc/auth_gss/gss_krb5_seal.c
+++ b/net/sunrpc/auth_gss/gss_krb5_seal.c
@@ -87,10 +87,10 @@ gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text,
87 87
88 now = get_seconds(); 88 now = get_seconds();
89 89
90 token->len = g_token_size(&ctx->mech_used, 22); 90 token->len = g_token_size(&ctx->mech_used, 24);
91 91
92 ptr = token->data; 92 ptr = token->data;
93 g_make_token_header(&ctx->mech_used, 22, &ptr); 93 g_make_token_header(&ctx->mech_used, 24, &ptr);
94 94
95 *ptr++ = (unsigned char) ((KG_TOK_MIC_MSG>>8)&0xff); 95 *ptr++ = (unsigned char) ((KG_TOK_MIC_MSG>>8)&0xff);
96 *ptr++ = (unsigned char) (KG_TOK_MIC_MSG&0xff); 96 *ptr++ = (unsigned char) (KG_TOK_MIC_MSG&0xff);
@@ -109,15 +109,14 @@ gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text,
109 md5cksum.data, md5cksum.len)) 109 md5cksum.data, md5cksum.len))
110 return GSS_S_FAILURE; 110 return GSS_S_FAILURE;
111 111
112 memcpy(krb5_hdr + 16, md5cksum.data + md5cksum.len - KRB5_CKSUM_LENGTH, 112 memcpy(krb5_hdr + 16, md5cksum.data + md5cksum.len - 8, 8);
113 KRB5_CKSUM_LENGTH);
114 113
115 spin_lock(&krb5_seq_lock); 114 spin_lock(&krb5_seq_lock);
116 seq_send = ctx->seq_send++; 115 seq_send = ctx->seq_send++;
117 spin_unlock(&krb5_seq_lock); 116 spin_unlock(&krb5_seq_lock);
118 117
119 if (krb5_make_seq_num(ctx->seq, ctx->initiate ? 0 : 0xff, 118 if (krb5_make_seq_num(ctx->seq, ctx->initiate ? 0 : 0xff,
120 ctx->seq_send, krb5_hdr + 16, krb5_hdr + 8)) 119 seq_send, krb5_hdr + 16, krb5_hdr + 8))
121 return GSS_S_FAILURE; 120 return GSS_S_FAILURE;
122 121
123 return (ctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE; 122 return (ctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
diff --git a/net/sunrpc/auth_gss/gss_krb5_seqnum.c b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
index 43f3421f1e6a..f160be6c1a46 100644
--- a/net/sunrpc/auth_gss/gss_krb5_seqnum.c
+++ b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
@@ -43,7 +43,7 @@
43s32 43s32
44krb5_make_seq_num(struct crypto_blkcipher *key, 44krb5_make_seq_num(struct crypto_blkcipher *key,
45 int direction, 45 int direction,
46 s32 seqnum, 46 u32 seqnum,
47 unsigned char *cksum, unsigned char *buf) 47 unsigned char *cksum, unsigned char *buf)
48{ 48{
49 unsigned char plain[8]; 49 unsigned char plain[8];
@@ -65,7 +65,7 @@ s32
65krb5_get_seq_num(struct crypto_blkcipher *key, 65krb5_get_seq_num(struct crypto_blkcipher *key,
66 unsigned char *cksum, 66 unsigned char *cksum,
67 unsigned char *buf, 67 unsigned char *buf,
68 int *direction, s32 * seqnum) 68 int *direction, u32 *seqnum)
69{ 69{
70 s32 code; 70 s32 code;
71 unsigned char plain[8]; 71 unsigned char plain[8];
diff --git a/net/sunrpc/auth_gss/gss_krb5_unseal.c b/net/sunrpc/auth_gss/gss_krb5_unseal.c
index e30a993466bc..d91a5d004803 100644
--- a/net/sunrpc/auth_gss/gss_krb5_unseal.c
+++ b/net/sunrpc/auth_gss/gss_krb5_unseal.c
@@ -82,7 +82,7 @@ gss_verify_mic_kerberos(struct gss_ctx *gss_ctx,
82 struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata}; 82 struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata};
83 s32 now; 83 s32 now;
84 int direction; 84 int direction;
85 s32 seqnum; 85 u32 seqnum;
86 unsigned char *ptr = (unsigned char *)read_token->data; 86 unsigned char *ptr = (unsigned char *)read_token->data;
87 int bodysize; 87 int bodysize;
88 88
diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c
index 3bdc527ee64a..b00b1b426301 100644
--- a/net/sunrpc/auth_gss/gss_krb5_wrap.c
+++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c
@@ -137,7 +137,7 @@ gss_wrap_kerberos(struct gss_ctx *ctx, int offset,
137 BUG_ON((buf->len - offset) % blocksize); 137 BUG_ON((buf->len - offset) % blocksize);
138 plainlen = blocksize + buf->len - offset; 138 plainlen = blocksize + buf->len - offset;
139 139
140 headlen = g_token_size(&kctx->mech_used, 22 + plainlen) - 140 headlen = g_token_size(&kctx->mech_used, 24 + plainlen) -
141 (buf->len - offset); 141 (buf->len - offset);
142 142
143 ptr = buf->head[0].iov_base + offset; 143 ptr = buf->head[0].iov_base + offset;
@@ -149,7 +149,7 @@ gss_wrap_kerberos(struct gss_ctx *ctx, int offset,
149 buf->len += headlen; 149 buf->len += headlen;
150 BUG_ON((buf->len - offset - headlen) % blocksize); 150 BUG_ON((buf->len - offset - headlen) % blocksize);
151 151
152 g_make_token_header(&kctx->mech_used, 22 + plainlen, &ptr); 152 g_make_token_header(&kctx->mech_used, 24 + plainlen, &ptr);
153 153
154 154
155 *ptr++ = (unsigned char) ((KG_TOK_WRAP_MSG>>8)&0xff); 155 *ptr++ = (unsigned char) ((KG_TOK_WRAP_MSG>>8)&0xff);
@@ -176,9 +176,7 @@ gss_wrap_kerberos(struct gss_ctx *ctx, int offset,
176 if (krb5_encrypt(kctx->seq, NULL, md5cksum.data, 176 if (krb5_encrypt(kctx->seq, NULL, md5cksum.data,
177 md5cksum.data, md5cksum.len)) 177 md5cksum.data, md5cksum.len))
178 return GSS_S_FAILURE; 178 return GSS_S_FAILURE;
179 memcpy(krb5_hdr + 16, 179 memcpy(krb5_hdr + 16, md5cksum.data + md5cksum.len - 8, 8);
180 md5cksum.data + md5cksum.len - KRB5_CKSUM_LENGTH,
181 KRB5_CKSUM_LENGTH);
182 180
183 spin_lock(&krb5_seq_lock); 181 spin_lock(&krb5_seq_lock);
184 seq_send = kctx->seq_send++; 182 seq_send = kctx->seq_send++;
diff --git a/net/sunrpc/auth_gss/gss_spkm3_seal.c b/net/sunrpc/auth_gss/gss_spkm3_seal.c
index abf17ce2e3b1..c832712f8d55 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_seal.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_seal.c
@@ -107,10 +107,10 @@ spkm3_make_token(struct spkm3_ctx *ctx,
107 tokenlen = 10 + ctxelen + 1 + md5elen + 1; 107 tokenlen = 10 + ctxelen + 1 + md5elen + 1;
108 108
109 /* Create token header using generic routines */ 109 /* Create token header using generic routines */
110 token->len = g_token_size(&ctx->mech_used, tokenlen); 110 token->len = g_token_size(&ctx->mech_used, tokenlen + 2);
111 111
112 ptr = token->data; 112 ptr = token->data;
113 g_make_token_header(&ctx->mech_used, tokenlen, &ptr); 113 g_make_token_header(&ctx->mech_used, tokenlen + 2, &ptr);
114 114
115 spkm3_make_mic_token(&ptr, tokenlen, &mic_hdr, &md5cksum, md5elen, md5zbit); 115 spkm3_make_mic_token(&ptr, tokenlen, &mic_hdr, &md5cksum, md5elen, md5zbit);
116 } else if (toktype == SPKM_WRAP_TOK) { /* Not Supported */ 116 } else if (toktype == SPKM_WRAP_TOK) { /* Not Supported */
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 481f984e9a22..5905d56737d6 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -1146,7 +1146,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)
1146 case RPC_GSS_SVC_INTEGRITY: 1146 case RPC_GSS_SVC_INTEGRITY:
1147 if (unwrap_integ_data(&rqstp->rq_arg, 1147 if (unwrap_integ_data(&rqstp->rq_arg,
1148 gc->gc_seq, rsci->mechctx)) 1148 gc->gc_seq, rsci->mechctx))
1149 goto auth_err; 1149 goto garbage_args;
1150 /* placeholders for length and seq. number: */ 1150 /* placeholders for length and seq. number: */
1151 svc_putnl(resv, 0); 1151 svc_putnl(resv, 0);
1152 svc_putnl(resv, 0); 1152 svc_putnl(resv, 0);
@@ -1154,7 +1154,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)
1154 case RPC_GSS_SVC_PRIVACY: 1154 case RPC_GSS_SVC_PRIVACY:
1155 if (unwrap_priv_data(rqstp, &rqstp->rq_arg, 1155 if (unwrap_priv_data(rqstp, &rqstp->rq_arg,
1156 gc->gc_seq, rsci->mechctx)) 1156 gc->gc_seq, rsci->mechctx))
1157 goto auth_err; 1157 goto garbage_args;
1158 /* placeholders for length and seq. number: */ 1158 /* placeholders for length and seq. number: */
1159 svc_putnl(resv, 0); 1159 svc_putnl(resv, 0);
1160 svc_putnl(resv, 0); 1160 svc_putnl(resv, 0);
@@ -1169,6 +1169,11 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)
1169 ret = SVC_OK; 1169 ret = SVC_OK;
1170 goto out; 1170 goto out;
1171 } 1171 }
1172garbage_args:
1173 /* Restore write pointer to its original value: */
1174 xdr_ressize_check(rqstp, reject_stat);
1175 ret = SVC_GARBAGE;
1176 goto out;
1172auth_err: 1177auth_err:
1173 /* Restore write pointer to its original value: */ 1178 /* Restore write pointer to its original value: */
1174 xdr_ressize_check(rqstp, reject_stat); 1179 xdr_ressize_check(rqstp, reject_stat);
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index b5f2786251b9..d75530ff2a6d 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -571,7 +571,6 @@ static int cache_defer_req(struct cache_req *req, struct cache_head *item)
571 return -ETIMEDOUT; 571 return -ETIMEDOUT;
572 572
573 dreq->item = item; 573 dreq->item = item;
574 dreq->recv_time = get_seconds();
575 574
576 spin_lock(&cache_defer_lock); 575 spin_lock(&cache_defer_lock);
577 576
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 090af78d68b5..d74c2d269539 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -510,8 +510,7 @@ EXPORT_SYMBOL(svc_destroy);
510static int 510static int
511svc_init_buffer(struct svc_rqst *rqstp, unsigned int size) 511svc_init_buffer(struct svc_rqst *rqstp, unsigned int size)
512{ 512{
513 int pages; 513 unsigned int pages, arghi;
514 int arghi;
515 514
516 pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply. 515 pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply.
517 * We assume one is at most one page 516 * We assume one is at most one page
@@ -525,7 +524,7 @@ svc_init_buffer(struct svc_rqst *rqstp, unsigned int size)
525 rqstp->rq_pages[arghi++] = p; 524 rqstp->rq_pages[arghi++] = p;
526 pages--; 525 pages--;
527 } 526 }
528 return ! pages; 527 return pages == 0;
529} 528}
530 529
531/* 530/*
@@ -534,8 +533,9 @@ svc_init_buffer(struct svc_rqst *rqstp, unsigned int size)
534static void 533static void
535svc_release_buffer(struct svc_rqst *rqstp) 534svc_release_buffer(struct svc_rqst *rqstp)
536{ 535{
537 int i; 536 unsigned int i;
538 for (i=0; i<ARRAY_SIZE(rqstp->rq_pages); i++) 537
538 for (i = 0; i < ARRAY_SIZE(rqstp->rq_pages); i++)
539 if (rqstp->rq_pages[i]) 539 if (rqstp->rq_pages[i])
540 put_page(rqstp->rq_pages[i]); 540 put_page(rqstp->rq_pages[i]);
541} 541}
@@ -590,7 +590,7 @@ __svc_create_thread(svc_thread_fn func, struct svc_serv *serv,
590 struct svc_rqst *rqstp; 590 struct svc_rqst *rqstp;
591 int error = -ENOMEM; 591 int error = -ENOMEM;
592 int have_oldmask = 0; 592 int have_oldmask = 0;
593 cpumask_t oldmask; 593 cpumask_t uninitialized_var(oldmask);
594 594
595 rqstp = svc_prepare_thread(serv, pool); 595 rqstp = svc_prepare_thread(serv, pool);
596 if (IS_ERR(rqstp)) { 596 if (IS_ERR(rqstp)) {
@@ -619,16 +619,6 @@ out_thread:
619} 619}
620 620
621/* 621/*
622 * Create a thread in the default pool. Caller must hold BKL.
623 */
624int
625svc_create_thread(svc_thread_fn func, struct svc_serv *serv)
626{
627 return __svc_create_thread(func, serv, &serv->sv_pools[0]);
628}
629EXPORT_SYMBOL(svc_create_thread);
630
631/*
632 * Choose a pool in which to create a new thread, for svc_set_num_threads 622 * Choose a pool in which to create a new thread, for svc_set_num_threads
633 */ 623 */
634static inline struct svc_pool * 624static inline struct svc_pool *
@@ -921,8 +911,7 @@ svc_process(struct svc_rqst *rqstp)
921 case SVC_OK: 911 case SVC_OK:
922 break; 912 break;
923 case SVC_GARBAGE: 913 case SVC_GARBAGE:
924 rpc_stat = rpc_garbage_args; 914 goto err_garbage;
925 goto err_bad;
926 case SVC_SYSERR: 915 case SVC_SYSERR:
927 rpc_stat = rpc_system_err; 916 rpc_stat = rpc_system_err;
928 goto err_bad; 917 goto err_bad;
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 332eb47539e1..d8e8d79a8451 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -18,6 +18,7 @@
18#include <linux/skbuff.h> 18#include <linux/skbuff.h>
19#include <linux/file.h> 19#include <linux/file.h>
20#include <linux/freezer.h> 20#include <linux/freezer.h>
21#include <linux/kthread.h>
21#include <net/sock.h> 22#include <net/sock.h>
22#include <net/checksum.h> 23#include <net/checksum.h>
23#include <net/ip.h> 24#include <net/ip.h>
@@ -586,8 +587,12 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
586 while (rqstp->rq_pages[i] == NULL) { 587 while (rqstp->rq_pages[i] == NULL) {
587 struct page *p = alloc_page(GFP_KERNEL); 588 struct page *p = alloc_page(GFP_KERNEL);
588 if (!p) { 589 if (!p) {
589 int j = msecs_to_jiffies(500); 590 set_current_state(TASK_INTERRUPTIBLE);
590 schedule_timeout_uninterruptible(j); 591 if (signalled() || kthread_should_stop()) {
592 set_current_state(TASK_RUNNING);
593 return -EINTR;
594 }
595 schedule_timeout(msecs_to_jiffies(500));
591 } 596 }
592 rqstp->rq_pages[i] = p; 597 rqstp->rq_pages[i] = p;
593 } 598 }
@@ -607,7 +612,7 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
607 612
608 try_to_freeze(); 613 try_to_freeze();
609 cond_resched(); 614 cond_resched();
610 if (signalled()) 615 if (signalled() || kthread_should_stop())
611 return -EINTR; 616 return -EINTR;
612 617
613 spin_lock_bh(&pool->sp_lock); 618 spin_lock_bh(&pool->sp_lock);
@@ -626,6 +631,20 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
626 * to bring down the daemons ... 631 * to bring down the daemons ...
627 */ 632 */
628 set_current_state(TASK_INTERRUPTIBLE); 633 set_current_state(TASK_INTERRUPTIBLE);
634
635 /*
636 * checking kthread_should_stop() here allows us to avoid
637 * locking and signalling when stopping kthreads that call
638 * svc_recv. If the thread has already been woken up, then
639 * we can exit here without sleeping. If not, then it
640 * it'll be woken up quickly during the schedule_timeout
641 */
642 if (kthread_should_stop()) {
643 set_current_state(TASK_RUNNING);
644 spin_unlock_bh(&pool->sp_lock);
645 return -EINTR;
646 }
647
629 add_wait_queue(&rqstp->rq_wait, &wait); 648 add_wait_queue(&rqstp->rq_wait, &wait);
630 spin_unlock_bh(&pool->sp_lock); 649 spin_unlock_bh(&pool->sp_lock);
631 650
@@ -641,7 +660,10 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
641 svc_thread_dequeue(pool, rqstp); 660 svc_thread_dequeue(pool, rqstp);
642 spin_unlock_bh(&pool->sp_lock); 661 spin_unlock_bh(&pool->sp_lock);
643 dprintk("svc: server %p, no data yet\n", rqstp); 662 dprintk("svc: server %p, no data yet\n", rqstp);
644 return signalled()? -EINTR : -EAGAIN; 663 if (signalled() || kthread_should_stop())
664 return -EINTR;
665 else
666 return -EAGAIN;
645 } 667 }
646 } 668 }
647 spin_unlock_bh(&pool->sp_lock); 669 spin_unlock_bh(&pool->sp_lock);
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index 3c64051e4555..3f30ee6006ae 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -11,7 +11,8 @@
11#include <linux/hash.h> 11#include <linux/hash.h>
12#include <linux/string.h> 12#include <linux/string.h>
13#include <net/sock.h> 13#include <net/sock.h>
14 14#include <net/ipv6.h>
15#include <linux/kernel.h>
15#define RPCDBG_FACILITY RPCDBG_AUTH 16#define RPCDBG_FACILITY RPCDBG_AUTH
16 17
17 18
@@ -85,7 +86,7 @@ static void svcauth_unix_domain_release(struct auth_domain *dom)
85struct ip_map { 86struct ip_map {
86 struct cache_head h; 87 struct cache_head h;
87 char m_class[8]; /* e.g. "nfsd" */ 88 char m_class[8]; /* e.g. "nfsd" */
88 struct in_addr m_addr; 89 struct in6_addr m_addr;
89 struct unix_domain *m_client; 90 struct unix_domain *m_client;
90 int m_add_change; 91 int m_add_change;
91}; 92};
@@ -113,12 +114,19 @@ static inline int hash_ip(__be32 ip)
113 return (hash ^ (hash>>8)) & 0xff; 114 return (hash ^ (hash>>8)) & 0xff;
114} 115}
115#endif 116#endif
117static inline int hash_ip6(struct in6_addr ip)
118{
119 return (hash_ip(ip.s6_addr32[0]) ^
120 hash_ip(ip.s6_addr32[1]) ^
121 hash_ip(ip.s6_addr32[2]) ^
122 hash_ip(ip.s6_addr32[3]));
123}
116static int ip_map_match(struct cache_head *corig, struct cache_head *cnew) 124static int ip_map_match(struct cache_head *corig, struct cache_head *cnew)
117{ 125{
118 struct ip_map *orig = container_of(corig, struct ip_map, h); 126 struct ip_map *orig = container_of(corig, struct ip_map, h);
119 struct ip_map *new = container_of(cnew, struct ip_map, h); 127 struct ip_map *new = container_of(cnew, struct ip_map, h);
120 return strcmp(orig->m_class, new->m_class) == 0 128 return strcmp(orig->m_class, new->m_class) == 0
121 && orig->m_addr.s_addr == new->m_addr.s_addr; 129 && ipv6_addr_equal(&orig->m_addr, &new->m_addr);
122} 130}
123static void ip_map_init(struct cache_head *cnew, struct cache_head *citem) 131static void ip_map_init(struct cache_head *cnew, struct cache_head *citem)
124{ 132{
@@ -126,7 +134,7 @@ static void ip_map_init(struct cache_head *cnew, struct cache_head *citem)
126 struct ip_map *item = container_of(citem, struct ip_map, h); 134 struct ip_map *item = container_of(citem, struct ip_map, h);
127 135
128 strcpy(new->m_class, item->m_class); 136 strcpy(new->m_class, item->m_class);
129 new->m_addr.s_addr = item->m_addr.s_addr; 137 ipv6_addr_copy(&new->m_addr, &item->m_addr);
130} 138}
131static void update(struct cache_head *cnew, struct cache_head *citem) 139static void update(struct cache_head *cnew, struct cache_head *citem)
132{ 140{
@@ -150,22 +158,24 @@ static void ip_map_request(struct cache_detail *cd,
150 struct cache_head *h, 158 struct cache_head *h,
151 char **bpp, int *blen) 159 char **bpp, int *blen)
152{ 160{
153 char text_addr[20]; 161 char text_addr[40];
154 struct ip_map *im = container_of(h, struct ip_map, h); 162 struct ip_map *im = container_of(h, struct ip_map, h);
155 __be32 addr = im->m_addr.s_addr;
156
157 snprintf(text_addr, 20, "%u.%u.%u.%u",
158 ntohl(addr) >> 24 & 0xff,
159 ntohl(addr) >> 16 & 0xff,
160 ntohl(addr) >> 8 & 0xff,
161 ntohl(addr) >> 0 & 0xff);
162 163
164 if (ipv6_addr_v4mapped(&(im->m_addr))) {
165 snprintf(text_addr, 20, NIPQUAD_FMT,
166 ntohl(im->m_addr.s6_addr32[3]) >> 24 & 0xff,
167 ntohl(im->m_addr.s6_addr32[3]) >> 16 & 0xff,
168 ntohl(im->m_addr.s6_addr32[3]) >> 8 & 0xff,
169 ntohl(im->m_addr.s6_addr32[3]) >> 0 & 0xff);
170 } else {
171 snprintf(text_addr, 40, NIP6_FMT, NIP6(im->m_addr));
172 }
163 qword_add(bpp, blen, im->m_class); 173 qword_add(bpp, blen, im->m_class);
164 qword_add(bpp, blen, text_addr); 174 qword_add(bpp, blen, text_addr);
165 (*bpp)[-1] = '\n'; 175 (*bpp)[-1] = '\n';
166} 176}
167 177
168static struct ip_map *ip_map_lookup(char *class, struct in_addr addr); 178static struct ip_map *ip_map_lookup(char *class, struct in6_addr *addr);
169static int ip_map_update(struct ip_map *ipm, struct unix_domain *udom, time_t expiry); 179static int ip_map_update(struct ip_map *ipm, struct unix_domain *udom, time_t expiry);
170 180
171static int ip_map_parse(struct cache_detail *cd, 181static int ip_map_parse(struct cache_detail *cd,
@@ -176,10 +186,10 @@ static int ip_map_parse(struct cache_detail *cd,
176 * for scratch: */ 186 * for scratch: */
177 char *buf = mesg; 187 char *buf = mesg;
178 int len; 188 int len;
179 int b1,b2,b3,b4; 189 int b1, b2, b3, b4, b5, b6, b7, b8;
180 char c; 190 char c;
181 char class[8]; 191 char class[8];
182 struct in_addr addr; 192 struct in6_addr addr;
183 int err; 193 int err;
184 194
185 struct ip_map *ipmp; 195 struct ip_map *ipmp;
@@ -198,7 +208,23 @@ static int ip_map_parse(struct cache_detail *cd,
198 len = qword_get(&mesg, buf, mlen); 208 len = qword_get(&mesg, buf, mlen);
199 if (len <= 0) return -EINVAL; 209 if (len <= 0) return -EINVAL;
200 210
201 if (sscanf(buf, "%u.%u.%u.%u%c", &b1, &b2, &b3, &b4, &c) != 4) 211 if (sscanf(buf, NIPQUAD_FMT "%c", &b1, &b2, &b3, &b4, &c) == 4) {
212 addr.s6_addr32[0] = 0;
213 addr.s6_addr32[1] = 0;
214 addr.s6_addr32[2] = htonl(0xffff);
215 addr.s6_addr32[3] =
216 htonl((((((b1<<8)|b2)<<8)|b3)<<8)|b4);
217 } else if (sscanf(buf, NIP6_FMT "%c",
218 &b1, &b2, &b3, &b4, &b5, &b6, &b7, &b8, &c) == 8) {
219 addr.s6_addr16[0] = htons(b1);
220 addr.s6_addr16[1] = htons(b2);
221 addr.s6_addr16[2] = htons(b3);
222 addr.s6_addr16[3] = htons(b4);
223 addr.s6_addr16[4] = htons(b5);
224 addr.s6_addr16[5] = htons(b6);
225 addr.s6_addr16[6] = htons(b7);
226 addr.s6_addr16[7] = htons(b8);
227 } else
202 return -EINVAL; 228 return -EINVAL;
203 229
204 expiry = get_expiry(&mesg); 230 expiry = get_expiry(&mesg);
@@ -216,10 +242,7 @@ static int ip_map_parse(struct cache_detail *cd,
216 } else 242 } else
217 dom = NULL; 243 dom = NULL;
218 244
219 addr.s_addr = 245 ipmp = ip_map_lookup(class, &addr);
220 htonl((((((b1<<8)|b2)<<8)|b3)<<8)|b4);
221
222 ipmp = ip_map_lookup(class,addr);
223 if (ipmp) { 246 if (ipmp) {
224 err = ip_map_update(ipmp, 247 err = ip_map_update(ipmp,
225 container_of(dom, struct unix_domain, h), 248 container_of(dom, struct unix_domain, h),
@@ -239,7 +262,7 @@ static int ip_map_show(struct seq_file *m,
239 struct cache_head *h) 262 struct cache_head *h)
240{ 263{
241 struct ip_map *im; 264 struct ip_map *im;
242 struct in_addr addr; 265 struct in6_addr addr;
243 char *dom = "-no-domain-"; 266 char *dom = "-no-domain-";
244 267
245 if (h == NULL) { 268 if (h == NULL) {
@@ -248,20 +271,24 @@ static int ip_map_show(struct seq_file *m,
248 } 271 }
249 im = container_of(h, struct ip_map, h); 272 im = container_of(h, struct ip_map, h);
250 /* class addr domain */ 273 /* class addr domain */
251 addr = im->m_addr; 274 ipv6_addr_copy(&addr, &im->m_addr);
252 275
253 if (test_bit(CACHE_VALID, &h->flags) && 276 if (test_bit(CACHE_VALID, &h->flags) &&
254 !test_bit(CACHE_NEGATIVE, &h->flags)) 277 !test_bit(CACHE_NEGATIVE, &h->flags))
255 dom = im->m_client->h.name; 278 dom = im->m_client->h.name;
256 279
257 seq_printf(m, "%s %d.%d.%d.%d %s\n", 280 if (ipv6_addr_v4mapped(&addr)) {
258 im->m_class, 281 seq_printf(m, "%s" NIPQUAD_FMT "%s\n",
259 ntohl(addr.s_addr) >> 24 & 0xff, 282 im->m_class,
260 ntohl(addr.s_addr) >> 16 & 0xff, 283 ntohl(addr.s6_addr32[3]) >> 24 & 0xff,
261 ntohl(addr.s_addr) >> 8 & 0xff, 284 ntohl(addr.s6_addr32[3]) >> 16 & 0xff,
262 ntohl(addr.s_addr) >> 0 & 0xff, 285 ntohl(addr.s6_addr32[3]) >> 8 & 0xff,
263 dom 286 ntohl(addr.s6_addr32[3]) >> 0 & 0xff,
264 ); 287 dom);
288 } else {
289 seq_printf(m, "%s" NIP6_FMT "%s\n",
290 im->m_class, NIP6(addr), dom);
291 }
265 return 0; 292 return 0;
266} 293}
267 294
@@ -281,16 +308,16 @@ struct cache_detail ip_map_cache = {
281 .alloc = ip_map_alloc, 308 .alloc = ip_map_alloc,
282}; 309};
283 310
284static struct ip_map *ip_map_lookup(char *class, struct in_addr addr) 311static struct ip_map *ip_map_lookup(char *class, struct in6_addr *addr)
285{ 312{
286 struct ip_map ip; 313 struct ip_map ip;
287 struct cache_head *ch; 314 struct cache_head *ch;
288 315
289 strcpy(ip.m_class, class); 316 strcpy(ip.m_class, class);
290 ip.m_addr = addr; 317 ipv6_addr_copy(&ip.m_addr, addr);
291 ch = sunrpc_cache_lookup(&ip_map_cache, &ip.h, 318 ch = sunrpc_cache_lookup(&ip_map_cache, &ip.h,
292 hash_str(class, IP_HASHBITS) ^ 319 hash_str(class, IP_HASHBITS) ^
293 hash_ip(addr.s_addr)); 320 hash_ip6(*addr));
294 321
295 if (ch) 322 if (ch)
296 return container_of(ch, struct ip_map, h); 323 return container_of(ch, struct ip_map, h);
@@ -319,14 +346,14 @@ static int ip_map_update(struct ip_map *ipm, struct unix_domain *udom, time_t ex
319 ch = sunrpc_cache_update(&ip_map_cache, 346 ch = sunrpc_cache_update(&ip_map_cache,
320 &ip.h, &ipm->h, 347 &ip.h, &ipm->h,
321 hash_str(ipm->m_class, IP_HASHBITS) ^ 348 hash_str(ipm->m_class, IP_HASHBITS) ^
322 hash_ip(ipm->m_addr.s_addr)); 349 hash_ip6(ipm->m_addr));
323 if (!ch) 350 if (!ch)
324 return -ENOMEM; 351 return -ENOMEM;
325 cache_put(ch, &ip_map_cache); 352 cache_put(ch, &ip_map_cache);
326 return 0; 353 return 0;
327} 354}
328 355
329int auth_unix_add_addr(struct in_addr addr, struct auth_domain *dom) 356int auth_unix_add_addr(struct in6_addr *addr, struct auth_domain *dom)
330{ 357{
331 struct unix_domain *udom; 358 struct unix_domain *udom;
332 struct ip_map *ipmp; 359 struct ip_map *ipmp;
@@ -355,7 +382,7 @@ int auth_unix_forget_old(struct auth_domain *dom)
355} 382}
356EXPORT_SYMBOL(auth_unix_forget_old); 383EXPORT_SYMBOL(auth_unix_forget_old);
357 384
358struct auth_domain *auth_unix_lookup(struct in_addr addr) 385struct auth_domain *auth_unix_lookup(struct in6_addr *addr)
359{ 386{
360 struct ip_map *ipm; 387 struct ip_map *ipm;
361 struct auth_domain *rv; 388 struct auth_domain *rv;
@@ -650,9 +677,24 @@ static int unix_gid_find(uid_t uid, struct group_info **gip,
650int 677int
651svcauth_unix_set_client(struct svc_rqst *rqstp) 678svcauth_unix_set_client(struct svc_rqst *rqstp)
652{ 679{
653 struct sockaddr_in *sin = svc_addr_in(rqstp); 680 struct sockaddr_in *sin;
681 struct sockaddr_in6 *sin6, sin6_storage;
654 struct ip_map *ipm; 682 struct ip_map *ipm;
655 683
684 switch (rqstp->rq_addr.ss_family) {
685 case AF_INET:
686 sin = svc_addr_in(rqstp);
687 sin6 = &sin6_storage;
688 ipv6_addr_set(&sin6->sin6_addr, 0, 0,
689 htonl(0x0000FFFF), sin->sin_addr.s_addr);
690 break;
691 case AF_INET6:
692 sin6 = svc_addr_in6(rqstp);
693 break;
694 default:
695 BUG();
696 }
697
656 rqstp->rq_client = NULL; 698 rqstp->rq_client = NULL;
657 if (rqstp->rq_proc == 0) 699 if (rqstp->rq_proc == 0)
658 return SVC_OK; 700 return SVC_OK;
@@ -660,7 +702,7 @@ svcauth_unix_set_client(struct svc_rqst *rqstp)
660 ipm = ip_map_cached_get(rqstp); 702 ipm = ip_map_cached_get(rqstp);
661 if (ipm == NULL) 703 if (ipm == NULL)
662 ipm = ip_map_lookup(rqstp->rq_server->sv_program->pg_class, 704 ipm = ip_map_lookup(rqstp->rq_server->sv_program->pg_class,
663 sin->sin_addr); 705 &sin6->sin6_addr);
664 706
665 if (ipm == NULL) 707 if (ipm == NULL)
666 return SVC_DENIED; 708 return SVC_DENIED;
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index c475977de05a..3e65719f1ef6 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -38,6 +38,7 @@
38#include <net/checksum.h> 38#include <net/checksum.h>
39#include <net/ip.h> 39#include <net/ip.h>
40#include <net/ipv6.h> 40#include <net/ipv6.h>
41#include <net/tcp.h>
41#include <net/tcp_states.h> 42#include <net/tcp_states.h>
42#include <asm/uaccess.h> 43#include <asm/uaccess.h>
43#include <asm/ioctls.h> 44#include <asm/ioctls.h>
@@ -45,6 +46,7 @@
45#include <linux/sunrpc/types.h> 46#include <linux/sunrpc/types.h>
46#include <linux/sunrpc/clnt.h> 47#include <linux/sunrpc/clnt.h>
47#include <linux/sunrpc/xdr.h> 48#include <linux/sunrpc/xdr.h>
49#include <linux/sunrpc/msg_prot.h>
48#include <linux/sunrpc/svcsock.h> 50#include <linux/sunrpc/svcsock.h>
49#include <linux/sunrpc/stats.h> 51#include <linux/sunrpc/stats.h>
50 52
@@ -822,8 +824,8 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
822 * the next four bytes. Otherwise try to gobble up as much as 824 * the next four bytes. Otherwise try to gobble up as much as
823 * possible up to the complete record length. 825 * possible up to the complete record length.
824 */ 826 */
825 if (svsk->sk_tcplen < 4) { 827 if (svsk->sk_tcplen < sizeof(rpc_fraghdr)) {
826 unsigned long want = 4 - svsk->sk_tcplen; 828 int want = sizeof(rpc_fraghdr) - svsk->sk_tcplen;
827 struct kvec iov; 829 struct kvec iov;
828 830
829 iov.iov_base = ((char *) &svsk->sk_reclen) + svsk->sk_tcplen; 831 iov.iov_base = ((char *) &svsk->sk_reclen) + svsk->sk_tcplen;
@@ -833,32 +835,31 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
833 svsk->sk_tcplen += len; 835 svsk->sk_tcplen += len;
834 836
835 if (len < want) { 837 if (len < want) {
836 dprintk("svc: short recvfrom while reading record length (%d of %lu)\n", 838 dprintk("svc: short recvfrom while reading record "
837 len, want); 839 "length (%d of %d)\n", len, want);
838 svc_xprt_received(&svsk->sk_xprt); 840 svc_xprt_received(&svsk->sk_xprt);
839 return -EAGAIN; /* record header not complete */ 841 return -EAGAIN; /* record header not complete */
840 } 842 }
841 843
842 svsk->sk_reclen = ntohl(svsk->sk_reclen); 844 svsk->sk_reclen = ntohl(svsk->sk_reclen);
843 if (!(svsk->sk_reclen & 0x80000000)) { 845 if (!(svsk->sk_reclen & RPC_LAST_STREAM_FRAGMENT)) {
844 /* FIXME: technically, a record can be fragmented, 846 /* FIXME: technically, a record can be fragmented,
845 * and non-terminal fragments will not have the top 847 * and non-terminal fragments will not have the top
846 * bit set in the fragment length header. 848 * bit set in the fragment length header.
847 * But apparently no known nfs clients send fragmented 849 * But apparently no known nfs clients send fragmented
848 * records. */ 850 * records. */
849 if (net_ratelimit()) 851 if (net_ratelimit())
850 printk(KERN_NOTICE "RPC: bad TCP reclen 0x%08lx" 852 printk(KERN_NOTICE "RPC: multiple fragments "
851 " (non-terminal)\n", 853 "per record not supported\n");
852 (unsigned long) svsk->sk_reclen);
853 goto err_delete; 854 goto err_delete;
854 } 855 }
855 svsk->sk_reclen &= 0x7fffffff; 856 svsk->sk_reclen &= RPC_FRAGMENT_SIZE_MASK;
856 dprintk("svc: TCP record, %d bytes\n", svsk->sk_reclen); 857 dprintk("svc: TCP record, %d bytes\n", svsk->sk_reclen);
857 if (svsk->sk_reclen > serv->sv_max_mesg) { 858 if (svsk->sk_reclen > serv->sv_max_mesg) {
858 if (net_ratelimit()) 859 if (net_ratelimit())
859 printk(KERN_NOTICE "RPC: bad TCP reclen 0x%08lx" 860 printk(KERN_NOTICE "RPC: "
860 " (large)\n", 861 "fragment too large: 0x%08lx\n",
861 (unsigned long) svsk->sk_reclen); 862 (unsigned long)svsk->sk_reclen);
862 goto err_delete; 863 goto err_delete;
863 } 864 }
864 } 865 }
@@ -1045,7 +1046,6 @@ void svc_cleanup_xprt_sock(void)
1045static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv) 1046static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv)
1046{ 1047{
1047 struct sock *sk = svsk->sk_sk; 1048 struct sock *sk = svsk->sk_sk;
1048 struct tcp_sock *tp = tcp_sk(sk);
1049 1049
1050 svc_xprt_init(&svc_tcp_class, &svsk->sk_xprt, serv); 1050 svc_xprt_init(&svc_tcp_class, &svsk->sk_xprt, serv);
1051 set_bit(XPT_CACHE_AUTH, &svsk->sk_xprt.xpt_flags); 1051 set_bit(XPT_CACHE_AUTH, &svsk->sk_xprt.xpt_flags);
@@ -1063,7 +1063,7 @@ static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv)
1063 svsk->sk_reclen = 0; 1063 svsk->sk_reclen = 0;
1064 svsk->sk_tcplen = 0; 1064 svsk->sk_tcplen = 0;
1065 1065
1066 tp->nonagle = 1; /* disable Nagle's algorithm */ 1066 tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF;
1067 1067
1068 /* initialise setting must have enough space to 1068 /* initialise setting must have enough space to
1069 * receive and respond to one request. 1069 * receive and respond to one request.
@@ -1101,6 +1101,7 @@ void svc_sock_update_bufs(struct svc_serv *serv)
1101 } 1101 }
1102 spin_unlock_bh(&serv->sv_lock); 1102 spin_unlock_bh(&serv->sv_lock);
1103} 1103}
1104EXPORT_SYMBOL(svc_sock_update_bufs);
1104 1105
1105/* 1106/*
1106 * Initialize socket for RPC use and create svc_sock struct 1107 * Initialize socket for RPC use and create svc_sock struct
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 16fd3f6718ff..af408fc12634 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -1036,6 +1036,8 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
1036 wait_event(xprt->sc_send_wait, 1036 wait_event(xprt->sc_send_wait,
1037 atomic_read(&xprt->sc_sq_count) < 1037 atomic_read(&xprt->sc_sq_count) <
1038 xprt->sc_sq_depth); 1038 xprt->sc_sq_depth);
1039 if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags))
1040 return 0;
1039 continue; 1041 continue;
1040 } 1042 }
1041 /* Bumped used SQ WR count and post */ 1043 /* Bumped used SQ WR count and post */