aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc
diff options
context:
space:
mode:
Diffstat (limited to 'net/sunrpc')
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_crypto.c2
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c2
-rw-r--r--net/sunrpc/cache.c54
-rw-r--r--net/sunrpc/svc.c3
-rw-r--r--net/sunrpc/svc_xprt.c97
-rw-r--r--net/sunrpc/svcauth.c1
-rw-r--r--net/sunrpc/svcauth_unix.c17
-rw-r--r--net/sunrpc/svcsock.c20
-rw-r--r--net/sunrpc/xprt.c5
-rw-r--r--net/sunrpc/xprtsock.c34
10 files changed, 141 insertions, 94 deletions
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index 75ee993ea057..9576f35ab701 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -137,7 +137,7 @@ arcfour_hmac_md5_usage_to_salt(unsigned int usage, u8 salt[4])
137 ms_usage = 13; 137 ms_usage = 13;
138 break; 138 break;
139 default: 139 default:
140 return EINVAL;; 140 return -EINVAL;
141 } 141 }
142 salt[0] = (ms_usage >> 0) & 0xff; 142 salt[0] = (ms_usage >> 0) & 0xff;
143 salt[1] = (ms_usage >> 8) & 0xff; 143 salt[1] = (ms_usage >> 8) & 0xff;
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index dec2a6fc7c12..bcdae78fdfc6 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -67,7 +67,6 @@ static int netobj_equal(struct xdr_netobj *a, struct xdr_netobj *b)
67 67
68#define RSI_HASHBITS 6 68#define RSI_HASHBITS 6
69#define RSI_HASHMAX (1<<RSI_HASHBITS) 69#define RSI_HASHMAX (1<<RSI_HASHBITS)
70#define RSI_HASHMASK (RSI_HASHMAX-1)
71 70
72struct rsi { 71struct rsi {
73 struct cache_head h; 72 struct cache_head h;
@@ -319,7 +318,6 @@ static struct rsi *rsi_update(struct rsi *new, struct rsi *old)
319 318
320#define RSC_HASHBITS 10 319#define RSC_HASHBITS 10
321#define RSC_HASHMAX (1<<RSC_HASHBITS) 320#define RSC_HASHMAX (1<<RSC_HASHBITS)
322#define RSC_HASHMASK (RSC_HASHMAX-1)
323 321
324#define GSS_SEQ_WIN 128 322#define GSS_SEQ_WIN 128
325 323
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index e433e7580e27..72ad836e4fe0 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -37,7 +37,7 @@
37 37
38#define RPCDBG_FACILITY RPCDBG_CACHE 38#define RPCDBG_FACILITY RPCDBG_CACHE
39 39
40static void cache_defer_req(struct cache_req *req, struct cache_head *item); 40static bool cache_defer_req(struct cache_req *req, struct cache_head *item);
41static void cache_revisit_request(struct cache_head *item); 41static void cache_revisit_request(struct cache_head *item);
42 42
43static void cache_init(struct cache_head *h) 43static void cache_init(struct cache_head *h)
@@ -128,6 +128,7 @@ static void cache_fresh_locked(struct cache_head *head, time_t expiry)
128{ 128{
129 head->expiry_time = expiry; 129 head->expiry_time = expiry;
130 head->last_refresh = seconds_since_boot(); 130 head->last_refresh = seconds_since_boot();
131 smp_wmb(); /* paired with smp_rmb() in cache_is_valid() */
131 set_bit(CACHE_VALID, &head->flags); 132 set_bit(CACHE_VALID, &head->flags);
132} 133}
133 134
@@ -208,11 +209,36 @@ static inline int cache_is_valid(struct cache_detail *detail, struct cache_head
208 /* entry is valid */ 209 /* entry is valid */
209 if (test_bit(CACHE_NEGATIVE, &h->flags)) 210 if (test_bit(CACHE_NEGATIVE, &h->flags))
210 return -ENOENT; 211 return -ENOENT;
211 else 212 else {
213 /*
214 * In combination with write barrier in
215 * sunrpc_cache_update, ensures that anyone
216 * using the cache entry after this sees the
217 * updated contents:
218 */
219 smp_rmb();
212 return 0; 220 return 0;
221 }
213 } 222 }
214} 223}
215 224
225static int try_to_negate_entry(struct cache_detail *detail, struct cache_head *h)
226{
227 int rv;
228
229 write_lock(&detail->hash_lock);
230 rv = cache_is_valid(detail, h);
231 if (rv != -EAGAIN) {
232 write_unlock(&detail->hash_lock);
233 return rv;
234 }
235 set_bit(CACHE_NEGATIVE, &h->flags);
236 cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY);
237 write_unlock(&detail->hash_lock);
238 cache_fresh_unlocked(h, detail);
239 return -ENOENT;
240}
241
216/* 242/*
217 * This is the generic cache management routine for all 243 * This is the generic cache management routine for all
218 * the authentication caches. 244 * the authentication caches.
@@ -251,14 +277,8 @@ int cache_check(struct cache_detail *detail,
251 case -EINVAL: 277 case -EINVAL:
252 clear_bit(CACHE_PENDING, &h->flags); 278 clear_bit(CACHE_PENDING, &h->flags);
253 cache_revisit_request(h); 279 cache_revisit_request(h);
254 if (rv == -EAGAIN) { 280 rv = try_to_negate_entry(detail, h);
255 set_bit(CACHE_NEGATIVE, &h->flags);
256 cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY);
257 cache_fresh_unlocked(h, detail);
258 rv = -ENOENT;
259 }
260 break; 281 break;
261
262 case -EAGAIN: 282 case -EAGAIN:
263 clear_bit(CACHE_PENDING, &h->flags); 283 clear_bit(CACHE_PENDING, &h->flags);
264 cache_revisit_request(h); 284 cache_revisit_request(h);
@@ -268,9 +288,11 @@ int cache_check(struct cache_detail *detail,
268 } 288 }
269 289
270 if (rv == -EAGAIN) { 290 if (rv == -EAGAIN) {
271 cache_defer_req(rqstp, h); 291 if (!cache_defer_req(rqstp, h)) {
272 if (!test_bit(CACHE_PENDING, &h->flags)) { 292 /*
273 /* Request is not deferred */ 293 * Request was not deferred; handle it as best
294 * we can ourselves:
295 */
274 rv = cache_is_valid(detail, h); 296 rv = cache_is_valid(detail, h);
275 if (rv == -EAGAIN) 297 if (rv == -EAGAIN)
276 rv = -ETIMEDOUT; 298 rv = -ETIMEDOUT;
@@ -618,18 +640,19 @@ static void cache_limit_defers(void)
618 discard->revisit(discard, 1); 640 discard->revisit(discard, 1);
619} 641}
620 642
621static void cache_defer_req(struct cache_req *req, struct cache_head *item) 643/* Return true if and only if a deferred request is queued. */
644static bool cache_defer_req(struct cache_req *req, struct cache_head *item)
622{ 645{
623 struct cache_deferred_req *dreq; 646 struct cache_deferred_req *dreq;
624 647
625 if (req->thread_wait) { 648 if (req->thread_wait) {
626 cache_wait_req(req, item); 649 cache_wait_req(req, item);
627 if (!test_bit(CACHE_PENDING, &item->flags)) 650 if (!test_bit(CACHE_PENDING, &item->flags))
628 return; 651 return false;
629 } 652 }
630 dreq = req->defer(req); 653 dreq = req->defer(req);
631 if (dreq == NULL) 654 if (dreq == NULL)
632 return; 655 return false;
633 setup_deferral(dreq, item, 1); 656 setup_deferral(dreq, item, 1);
634 if (!test_bit(CACHE_PENDING, &item->flags)) 657 if (!test_bit(CACHE_PENDING, &item->flags))
635 /* Bit could have been cleared before we managed to 658 /* Bit could have been cleared before we managed to
@@ -638,6 +661,7 @@ static void cache_defer_req(struct cache_req *req, struct cache_head *item)
638 cache_revisit_request(item); 661 cache_revisit_request(item);
639 662
640 cache_limit_defers(); 663 cache_limit_defers();
664 return true;
641} 665}
642 666
643static void cache_revisit_request(struct cache_head *item) 667static void cache_revisit_request(struct cache_head *item)
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 0e659c665a8d..08e05a8ce025 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -1001,6 +1001,7 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
1001 rqstp->rq_splice_ok = 1; 1001 rqstp->rq_splice_ok = 1;
1002 /* Will be turned off only when NFSv4 Sessions are used */ 1002 /* Will be turned off only when NFSv4 Sessions are used */
1003 rqstp->rq_usedeferral = 1; 1003 rqstp->rq_usedeferral = 1;
1004 rqstp->rq_dropme = false;
1004 1005
1005 /* Setup reply header */ 1006 /* Setup reply header */
1006 rqstp->rq_xprt->xpt_ops->xpo_prep_reply_hdr(rqstp); 1007 rqstp->rq_xprt->xpt_ops->xpo_prep_reply_hdr(rqstp);
@@ -1102,7 +1103,7 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
1102 *statp = procp->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp); 1103 *statp = procp->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp);
1103 1104
1104 /* Encode reply */ 1105 /* Encode reply */
1105 if (*statp == rpc_drop_reply) { 1106 if (rqstp->rq_dropme) {
1106 if (procp->pc_release) 1107 if (procp->pc_release)
1107 procp->pc_release(rqstp, NULL, rqstp->rq_resp); 1108 procp->pc_release(rqstp, NULL, rqstp->rq_resp);
1108 goto dropit; 1109 goto dropit;
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 3f2c5559ca1a..ab86b7927f84 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -13,6 +13,7 @@
13#include <linux/sunrpc/stats.h> 13#include <linux/sunrpc/stats.h>
14#include <linux/sunrpc/svc_xprt.h> 14#include <linux/sunrpc/svc_xprt.h>
15#include <linux/sunrpc/svcsock.h> 15#include <linux/sunrpc/svcsock.h>
16#include <linux/sunrpc/xprt.h>
16 17
17#define RPCDBG_FACILITY RPCDBG_SVCXPRT 18#define RPCDBG_FACILITY RPCDBG_SVCXPRT
18 19
@@ -128,6 +129,9 @@ static void svc_xprt_free(struct kref *kref)
128 if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) 129 if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags))
129 svcauth_unix_info_release(xprt); 130 svcauth_unix_info_release(xprt);
130 put_net(xprt->xpt_net); 131 put_net(xprt->xpt_net);
132 /* See comment on corresponding get in xs_setup_bc_tcp(): */
133 if (xprt->xpt_bc_xprt)
134 xprt_put(xprt->xpt_bc_xprt);
131 xprt->xpt_ops->xpo_free(xprt); 135 xprt->xpt_ops->xpo_free(xprt);
132 module_put(owner); 136 module_put(owner);
133} 137}
@@ -303,6 +307,15 @@ static void svc_thread_dequeue(struct svc_pool *pool, struct svc_rqst *rqstp)
303 list_del(&rqstp->rq_list); 307 list_del(&rqstp->rq_list);
304} 308}
305 309
310static bool svc_xprt_has_something_to_do(struct svc_xprt *xprt)
311{
312 if (xprt->xpt_flags & ((1<<XPT_CONN)|(1<<XPT_CLOSE)))
313 return true;
314 if (xprt->xpt_flags & ((1<<XPT_DATA)|(1<<XPT_DEFERRED)))
315 return xprt->xpt_ops->xpo_has_wspace(xprt);
316 return false;
317}
318
306/* 319/*
307 * Queue up a transport with data pending. If there are idle nfsd 320 * Queue up a transport with data pending. If there are idle nfsd
308 * processes, wake 'em up. 321 * processes, wake 'em up.
@@ -315,8 +328,7 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
315 struct svc_rqst *rqstp; 328 struct svc_rqst *rqstp;
316 int cpu; 329 int cpu;
317 330
318 if (!(xprt->xpt_flags & 331 if (!svc_xprt_has_something_to_do(xprt))
319 ((1<<XPT_CONN)|(1<<XPT_DATA)|(1<<XPT_CLOSE)|(1<<XPT_DEFERRED))))
320 return; 332 return;
321 333
322 cpu = get_cpu(); 334 cpu = get_cpu();
@@ -343,28 +355,7 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
343 dprintk("svc: transport %p busy, not enqueued\n", xprt); 355 dprintk("svc: transport %p busy, not enqueued\n", xprt);
344 goto out_unlock; 356 goto out_unlock;
345 } 357 }
346 BUG_ON(xprt->xpt_pool != NULL);
347 xprt->xpt_pool = pool;
348
349 /* Handle pending connection */
350 if (test_bit(XPT_CONN, &xprt->xpt_flags))
351 goto process;
352
353 /* Handle close in-progress */
354 if (test_bit(XPT_CLOSE, &xprt->xpt_flags))
355 goto process;
356
357 /* Check if we have space to reply to a request */
358 if (!xprt->xpt_ops->xpo_has_wspace(xprt)) {
359 /* Don't enqueue while not enough space for reply */
360 dprintk("svc: no write space, transport %p not enqueued\n",
361 xprt);
362 xprt->xpt_pool = NULL;
363 clear_bit(XPT_BUSY, &xprt->xpt_flags);
364 goto out_unlock;
365 }
366 358
367 process:
368 if (!list_empty(&pool->sp_threads)) { 359 if (!list_empty(&pool->sp_threads)) {
369 rqstp = list_entry(pool->sp_threads.next, 360 rqstp = list_entry(pool->sp_threads.next,
370 struct svc_rqst, 361 struct svc_rqst,
@@ -381,13 +372,11 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
381 rqstp->rq_reserved = serv->sv_max_mesg; 372 rqstp->rq_reserved = serv->sv_max_mesg;
382 atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); 373 atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
383 pool->sp_stats.threads_woken++; 374 pool->sp_stats.threads_woken++;
384 BUG_ON(xprt->xpt_pool != pool);
385 wake_up(&rqstp->rq_wait); 375 wake_up(&rqstp->rq_wait);
386 } else { 376 } else {
387 dprintk("svc: transport %p put into queue\n", xprt); 377 dprintk("svc: transport %p put into queue\n", xprt);
388 list_add_tail(&xprt->xpt_ready, &pool->sp_sockets); 378 list_add_tail(&xprt->xpt_ready, &pool->sp_sockets);
389 pool->sp_stats.sockets_queued++; 379 pool->sp_stats.sockets_queued++;
390 BUG_ON(xprt->xpt_pool != pool);
391 } 380 }
392 381
393out_unlock: 382out_unlock:
@@ -426,7 +415,6 @@ static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool)
426void svc_xprt_received(struct svc_xprt *xprt) 415void svc_xprt_received(struct svc_xprt *xprt)
427{ 416{
428 BUG_ON(!test_bit(XPT_BUSY, &xprt->xpt_flags)); 417 BUG_ON(!test_bit(XPT_BUSY, &xprt->xpt_flags));
429 xprt->xpt_pool = NULL;
430 /* As soon as we clear busy, the xprt could be closed and 418 /* As soon as we clear busy, the xprt could be closed and
431 * 'put', so we need a reference to call svc_xprt_enqueue with: 419 * 'put', so we need a reference to call svc_xprt_enqueue with:
432 */ 420 */
@@ -722,7 +710,10 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
722 if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) { 710 if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
723 dprintk("svc_recv: found XPT_CLOSE\n"); 711 dprintk("svc_recv: found XPT_CLOSE\n");
724 svc_delete_xprt(xprt); 712 svc_delete_xprt(xprt);
725 } else if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) { 713 /* Leave XPT_BUSY set on the dead xprt: */
714 goto out;
715 }
716 if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) {
726 struct svc_xprt *newxpt; 717 struct svc_xprt *newxpt;
727 newxpt = xprt->xpt_ops->xpo_accept(xprt); 718 newxpt = xprt->xpt_ops->xpo_accept(xprt);
728 if (newxpt) { 719 if (newxpt) {
@@ -747,28 +738,23 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
747 spin_unlock_bh(&serv->sv_lock); 738 spin_unlock_bh(&serv->sv_lock);
748 svc_xprt_received(newxpt); 739 svc_xprt_received(newxpt);
749 } 740 }
750 svc_xprt_received(xprt); 741 } else if (xprt->xpt_ops->xpo_has_wspace(xprt)) {
751 } else {
752 dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n", 742 dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n",
753 rqstp, pool->sp_id, xprt, 743 rqstp, pool->sp_id, xprt,
754 atomic_read(&xprt->xpt_ref.refcount)); 744 atomic_read(&xprt->xpt_ref.refcount));
755 rqstp->rq_deferred = svc_deferred_dequeue(xprt); 745 rqstp->rq_deferred = svc_deferred_dequeue(xprt);
756 if (rqstp->rq_deferred) { 746 if (rqstp->rq_deferred)
757 svc_xprt_received(xprt);
758 len = svc_deferred_recv(rqstp); 747 len = svc_deferred_recv(rqstp);
759 } else { 748 else
760 len = xprt->xpt_ops->xpo_recvfrom(rqstp); 749 len = xprt->xpt_ops->xpo_recvfrom(rqstp);
761 svc_xprt_received(xprt);
762 }
763 dprintk("svc: got len=%d\n", len); 750 dprintk("svc: got len=%d\n", len);
764 } 751 }
752 svc_xprt_received(xprt);
765 753
766 /* No data, incomplete (TCP) read, or accept() */ 754 /* No data, incomplete (TCP) read, or accept() */
767 if (len == 0 || len == -EAGAIN) { 755 if (len == 0 || len == -EAGAIN)
768 rqstp->rq_res.len = 0; 756 goto out;
769 svc_xprt_release(rqstp); 757
770 return -EAGAIN;
771 }
772 clear_bit(XPT_OLD, &xprt->xpt_flags); 758 clear_bit(XPT_OLD, &xprt->xpt_flags);
773 759
774 rqstp->rq_secure = svc_port_is_privileged(svc_addr(rqstp)); 760 rqstp->rq_secure = svc_port_is_privileged(svc_addr(rqstp));
@@ -777,6 +763,10 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
777 if (serv->sv_stats) 763 if (serv->sv_stats)
778 serv->sv_stats->netcnt++; 764 serv->sv_stats->netcnt++;
779 return len; 765 return len;
766out:
767 rqstp->rq_res.len = 0;
768 svc_xprt_release(rqstp);
769 return -EAGAIN;
780} 770}
781EXPORT_SYMBOL_GPL(svc_recv); 771EXPORT_SYMBOL_GPL(svc_recv);
782 772
@@ -935,7 +925,12 @@ void svc_close_xprt(struct svc_xprt *xprt)
935 if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) 925 if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags))
936 /* someone else will have to effect the close */ 926 /* someone else will have to effect the close */
937 return; 927 return;
938 928 /*
929 * We expect svc_close_xprt() to work even when no threads are
930 * running (e.g., while configuring the server before starting
931 * any threads), so if the transport isn't busy, we delete
932 * it ourself:
933 */
939 svc_delete_xprt(xprt); 934 svc_delete_xprt(xprt);
940} 935}
941EXPORT_SYMBOL_GPL(svc_close_xprt); 936EXPORT_SYMBOL_GPL(svc_close_xprt);
@@ -945,16 +940,16 @@ void svc_close_all(struct list_head *xprt_list)
945 struct svc_xprt *xprt; 940 struct svc_xprt *xprt;
946 struct svc_xprt *tmp; 941 struct svc_xprt *tmp;
947 942
943 /*
944 * The server is shutting down, and no more threads are running.
945 * svc_xprt_enqueue() might still be running, but at worst it
946 * will re-add the xprt to sp_sockets, which will soon get
947 * freed. So we don't bother with any more locking, and don't
948 * leave the close to the (nonexistent) server threads:
949 */
948 list_for_each_entry_safe(xprt, tmp, xprt_list, xpt_list) { 950 list_for_each_entry_safe(xprt, tmp, xprt_list, xpt_list) {
949 set_bit(XPT_CLOSE, &xprt->xpt_flags); 951 set_bit(XPT_CLOSE, &xprt->xpt_flags);
950 if (test_bit(XPT_BUSY, &xprt->xpt_flags)) { 952 svc_delete_xprt(xprt);
951 /* Waiting to be processed, but no threads left,
952 * So just remove it from the waiting list
953 */
954 list_del_init(&xprt->xpt_ready);
955 clear_bit(XPT_BUSY, &xprt->xpt_flags);
956 }
957 svc_close_xprt(xprt);
958 } 953 }
959} 954}
960 955
@@ -1028,6 +1023,7 @@ static struct cache_deferred_req *svc_defer(struct cache_req *req)
1028 } 1023 }
1029 svc_xprt_get(rqstp->rq_xprt); 1024 svc_xprt_get(rqstp->rq_xprt);
1030 dr->xprt = rqstp->rq_xprt; 1025 dr->xprt = rqstp->rq_xprt;
1026 rqstp->rq_dropme = true;
1031 1027
1032 dr->handle.revisit = svc_revisit; 1028 dr->handle.revisit = svc_revisit;
1033 return &dr->handle; 1029 return &dr->handle;
@@ -1065,14 +1061,13 @@ static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt)
1065 if (!test_bit(XPT_DEFERRED, &xprt->xpt_flags)) 1061 if (!test_bit(XPT_DEFERRED, &xprt->xpt_flags))
1066 return NULL; 1062 return NULL;
1067 spin_lock(&xprt->xpt_lock); 1063 spin_lock(&xprt->xpt_lock);
1068 clear_bit(XPT_DEFERRED, &xprt->xpt_flags);
1069 if (!list_empty(&xprt->xpt_deferred)) { 1064 if (!list_empty(&xprt->xpt_deferred)) {
1070 dr = list_entry(xprt->xpt_deferred.next, 1065 dr = list_entry(xprt->xpt_deferred.next,
1071 struct svc_deferred_req, 1066 struct svc_deferred_req,
1072 handle.recent); 1067 handle.recent);
1073 list_del_init(&dr->handle.recent); 1068 list_del_init(&dr->handle.recent);
1074 set_bit(XPT_DEFERRED, &xprt->xpt_flags); 1069 } else
1075 } 1070 clear_bit(XPT_DEFERRED, &xprt->xpt_flags);
1076 spin_unlock(&xprt->xpt_lock); 1071 spin_unlock(&xprt->xpt_lock);
1077 return dr; 1072 return dr;
1078} 1073}
diff --git a/net/sunrpc/svcauth.c b/net/sunrpc/svcauth.c
index 4e9393c24687..7963569fc04f 100644
--- a/net/sunrpc/svcauth.c
+++ b/net/sunrpc/svcauth.c
@@ -118,7 +118,6 @@ EXPORT_SYMBOL_GPL(svc_auth_unregister);
118 118
119#define DN_HASHBITS 6 119#define DN_HASHBITS 6
120#define DN_HASHMAX (1<<DN_HASHBITS) 120#define DN_HASHMAX (1<<DN_HASHBITS)
121#define DN_HASHMASK (DN_HASHMAX-1)
122 121
123static struct hlist_head auth_domain_table[DN_HASHMAX]; 122static struct hlist_head auth_domain_table[DN_HASHMAX];
124static spinlock_t auth_domain_lock = 123static spinlock_t auth_domain_lock =
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index 560677d187f1..30916b06c12b 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -30,7 +30,9 @@
30 30
31struct unix_domain { 31struct unix_domain {
32 struct auth_domain h; 32 struct auth_domain h;
33#ifdef CONFIG_NFSD_DEPRECATED
33 int addr_changes; 34 int addr_changes;
35#endif /* CONFIG_NFSD_DEPRECATED */
34 /* other stuff later */ 36 /* other stuff later */
35}; 37};
36 38
@@ -64,7 +66,9 @@ struct auth_domain *unix_domain_find(char *name)
64 return NULL; 66 return NULL;
65 } 67 }
66 new->h.flavour = &svcauth_unix; 68 new->h.flavour = &svcauth_unix;
69#ifdef CONFIG_NFSD_DEPRECATED
67 new->addr_changes = 0; 70 new->addr_changes = 0;
71#endif /* CONFIG_NFSD_DEPRECATED */
68 rv = auth_domain_lookup(name, &new->h); 72 rv = auth_domain_lookup(name, &new->h);
69 } 73 }
70} 74}
@@ -85,14 +89,15 @@ static void svcauth_unix_domain_release(struct auth_domain *dom)
85 */ 89 */
86#define IP_HASHBITS 8 90#define IP_HASHBITS 8
87#define IP_HASHMAX (1<<IP_HASHBITS) 91#define IP_HASHMAX (1<<IP_HASHBITS)
88#define IP_HASHMASK (IP_HASHMAX-1)
89 92
90struct ip_map { 93struct ip_map {
91 struct cache_head h; 94 struct cache_head h;
92 char m_class[8]; /* e.g. "nfsd" */ 95 char m_class[8]; /* e.g. "nfsd" */
93 struct in6_addr m_addr; 96 struct in6_addr m_addr;
94 struct unix_domain *m_client; 97 struct unix_domain *m_client;
98#ifdef CONFIG_NFSD_DEPRECATED
95 int m_add_change; 99 int m_add_change;
100#endif /* CONFIG_NFSD_DEPRECATED */
96}; 101};
97 102
98static void ip_map_put(struct kref *kref) 103static void ip_map_put(struct kref *kref)
@@ -146,7 +151,9 @@ static void update(struct cache_head *cnew, struct cache_head *citem)
146 151
147 kref_get(&item->m_client->h.ref); 152 kref_get(&item->m_client->h.ref);
148 new->m_client = item->m_client; 153 new->m_client = item->m_client;
154#ifdef CONFIG_NFSD_DEPRECATED
149 new->m_add_change = item->m_add_change; 155 new->m_add_change = item->m_add_change;
156#endif /* CONFIG_NFSD_DEPRECATED */
150} 157}
151static struct cache_head *ip_map_alloc(void) 158static struct cache_head *ip_map_alloc(void)
152{ 159{
@@ -331,6 +338,7 @@ static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm,
331 ip.h.flags = 0; 338 ip.h.flags = 0;
332 if (!udom) 339 if (!udom)
333 set_bit(CACHE_NEGATIVE, &ip.h.flags); 340 set_bit(CACHE_NEGATIVE, &ip.h.flags);
341#ifdef CONFIG_NFSD_DEPRECATED
334 else { 342 else {
335 ip.m_add_change = udom->addr_changes; 343 ip.m_add_change = udom->addr_changes;
336 /* if this is from the legacy set_client system call, 344 /* if this is from the legacy set_client system call,
@@ -339,6 +347,7 @@ static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm,
339 if (expiry == NEVER) 347 if (expiry == NEVER)
340 ip.m_add_change++; 348 ip.m_add_change++;
341 } 349 }
350#endif /* CONFIG_NFSD_DEPRECATED */
342 ip.h.expiry_time = expiry; 351 ip.h.expiry_time = expiry;
343 ch = sunrpc_cache_update(cd, &ip.h, &ipm->h, 352 ch = sunrpc_cache_update(cd, &ip.h, &ipm->h,
344 hash_str(ipm->m_class, IP_HASHBITS) ^ 353 hash_str(ipm->m_class, IP_HASHBITS) ^
@@ -358,6 +367,7 @@ static inline int ip_map_update(struct net *net, struct ip_map *ipm,
358 return __ip_map_update(sn->ip_map_cache, ipm, udom, expiry); 367 return __ip_map_update(sn->ip_map_cache, ipm, udom, expiry);
359} 368}
360 369
370#ifdef CONFIG_NFSD_DEPRECATED
361int auth_unix_add_addr(struct net *net, struct in6_addr *addr, struct auth_domain *dom) 371int auth_unix_add_addr(struct net *net, struct in6_addr *addr, struct auth_domain *dom)
362{ 372{
363 struct unix_domain *udom; 373 struct unix_domain *udom;
@@ -402,8 +412,7 @@ struct auth_domain *auth_unix_lookup(struct net *net, struct in6_addr *addr)
402 return NULL; 412 return NULL;
403 413
404 if ((ipm->m_client->addr_changes - ipm->m_add_change) >0) { 414 if ((ipm->m_client->addr_changes - ipm->m_add_change) >0) {
405 if (test_and_set_bit(CACHE_NEGATIVE, &ipm->h.flags) == 0) 415 sunrpc_invalidate(&ipm->h, sn->ip_map_cache);
406 auth_domain_put(&ipm->m_client->h);
407 rv = NULL; 416 rv = NULL;
408 } else { 417 } else {
409 rv = &ipm->m_client->h; 418 rv = &ipm->m_client->h;
@@ -413,6 +422,7 @@ struct auth_domain *auth_unix_lookup(struct net *net, struct in6_addr *addr)
413 return rv; 422 return rv;
414} 423}
415EXPORT_SYMBOL_GPL(auth_unix_lookup); 424EXPORT_SYMBOL_GPL(auth_unix_lookup);
425#endif /* CONFIG_NFSD_DEPRECATED */
416 426
417void svcauth_unix_purge(void) 427void svcauth_unix_purge(void)
418{ 428{
@@ -497,7 +507,6 @@ svcauth_unix_info_release(struct svc_xprt *xpt)
497 */ 507 */
498#define GID_HASHBITS 8 508#define GID_HASHBITS 8
499#define GID_HASHMAX (1<<GID_HASHBITS) 509#define GID_HASHMAX (1<<GID_HASHBITS)
500#define GID_HASHMASK (GID_HASHMAX - 1)
501 510
502struct unix_gid { 511struct unix_gid {
503 struct cache_head h; 512 struct cache_head h;
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index d265aa700bb3..7bd3bbba4710 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -331,19 +331,21 @@ int svc_sock_names(struct svc_serv *serv, char *buf, const size_t buflen,
331 len = onelen; 331 len = onelen;
332 break; 332 break;
333 } 333 }
334 if (toclose && strcmp(toclose, buf + len) == 0) 334 if (toclose && strcmp(toclose, buf + len) == 0) {
335 closesk = svsk; 335 closesk = svsk;
336 else 336 svc_xprt_get(&closesk->sk_xprt);
337 } else
337 len += onelen; 338 len += onelen;
338 } 339 }
339 spin_unlock_bh(&serv->sv_lock); 340 spin_unlock_bh(&serv->sv_lock);
340 341
341 if (closesk) 342 if (closesk) {
342 /* Should unregister with portmap, but you cannot 343 /* Should unregister with portmap, but you cannot
343 * unregister just one protocol... 344 * unregister just one protocol...
344 */ 345 */
345 svc_close_xprt(&closesk->sk_xprt); 346 svc_close_xprt(&closesk->sk_xprt);
346 else if (toclose) 347 svc_xprt_put(&closesk->sk_xprt);
348 } else if (toclose)
347 return -ENOENT; 349 return -ENOENT;
348 return len; 350 return len;
349} 351}
@@ -992,15 +994,17 @@ static int svc_process_calldir(struct svc_sock *svsk, struct svc_rqst *rqstp,
992 vec[0] = rqstp->rq_arg.head[0]; 994 vec[0] = rqstp->rq_arg.head[0];
993 } else { 995 } else {
994 /* REPLY */ 996 /* REPLY */
995 if (svsk->sk_bc_xprt) 997 struct rpc_xprt *bc_xprt = svsk->sk_xprt.xpt_bc_xprt;
996 req = xprt_lookup_rqst(svsk->sk_bc_xprt, xid); 998
999 if (bc_xprt)
1000 req = xprt_lookup_rqst(bc_xprt, xid);
997 1001
998 if (!req) { 1002 if (!req) {
999 printk(KERN_NOTICE 1003 printk(KERN_NOTICE
1000 "%s: Got unrecognized reply: " 1004 "%s: Got unrecognized reply: "
1001 "calldir 0x%x sk_bc_xprt %p xid %08x\n", 1005 "calldir 0x%x xpt_bc_xprt %p xid %08x\n",
1002 __func__, ntohl(calldir), 1006 __func__, ntohl(calldir),
1003 svsk->sk_bc_xprt, xid); 1007 bc_xprt, xid);
1004 vec[0] = rqstp->rq_arg.head[0]; 1008 vec[0] = rqstp->rq_arg.head[0];
1005 goto out; 1009 goto out;
1006 } 1010 }
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 4c8f18aff7c3..856274d7e85c 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -965,6 +965,7 @@ struct rpc_xprt *xprt_alloc(struct net *net, int size, int max_req)
965 xprt = kzalloc(size, GFP_KERNEL); 965 xprt = kzalloc(size, GFP_KERNEL);
966 if (xprt == NULL) 966 if (xprt == NULL)
967 goto out; 967 goto out;
968 kref_init(&xprt->kref);
968 969
969 xprt->max_reqs = max_req; 970 xprt->max_reqs = max_req;
970 xprt->slot = kcalloc(max_req, sizeof(struct rpc_rqst), GFP_KERNEL); 971 xprt->slot = kcalloc(max_req, sizeof(struct rpc_rqst), GFP_KERNEL);
@@ -1101,8 +1102,10 @@ found:
1101 -PTR_ERR(xprt)); 1102 -PTR_ERR(xprt));
1102 return xprt; 1103 return xprt;
1103 } 1104 }
1105 if (test_and_set_bit(XPRT_INITIALIZED, &xprt->state))
1106 /* ->setup returned a pre-initialized xprt: */
1107 return xprt;
1104 1108
1105 kref_init(&xprt->kref);
1106 spin_lock_init(&xprt->transport_lock); 1109 spin_lock_init(&xprt->transport_lock);
1107 spin_lock_init(&xprt->reserve_lock); 1110 spin_lock_init(&xprt->reserve_lock);
1108 1111
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 96549df836ee..c431f5a57960 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -2359,6 +2359,15 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
2359 struct svc_sock *bc_sock; 2359 struct svc_sock *bc_sock;
2360 struct rpc_xprt *ret; 2360 struct rpc_xprt *ret;
2361 2361
2362 if (args->bc_xprt->xpt_bc_xprt) {
2363 /*
2364 * This server connection already has a backchannel
2365 * export; we can't create a new one, as we wouldn't be
2366 * able to match replies based on xid any more. So,
2367 * reuse the already-existing one:
2368 */
2369 return args->bc_xprt->xpt_bc_xprt;
2370 }
2362 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries); 2371 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries);
2363 if (IS_ERR(xprt)) 2372 if (IS_ERR(xprt))
2364 return xprt; 2373 return xprt;
@@ -2375,16 +2384,6 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
2375 xprt->reestablish_timeout = 0; 2384 xprt->reestablish_timeout = 0;
2376 xprt->idle_timeout = 0; 2385 xprt->idle_timeout = 0;
2377 2386
2378 /*
2379 * The backchannel uses the same socket connection as the
2380 * forechannel
2381 */
2382 xprt->bc_xprt = args->bc_xprt;
2383 bc_sock = container_of(args->bc_xprt, struct svc_sock, sk_xprt);
2384 bc_sock->sk_bc_xprt = xprt;
2385 transport->sock = bc_sock->sk_sock;
2386 transport->inet = bc_sock->sk_sk;
2387
2388 xprt->ops = &bc_tcp_ops; 2387 xprt->ops = &bc_tcp_ops;
2389 2388
2390 switch (addr->sa_family) { 2389 switch (addr->sa_family) {
@@ -2407,6 +2406,20 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
2407 xprt->address_strings[RPC_DISPLAY_PROTO]); 2406 xprt->address_strings[RPC_DISPLAY_PROTO]);
2408 2407
2409 /* 2408 /*
2409 * Once we've associated a backchannel xprt with a connection,
2410 * we want to keep it around as long as long as the connection
2411 * lasts, in case we need to start using it for a backchannel
2412 * again; this reference won't be dropped until bc_xprt is
2413 * destroyed.
2414 */
2415 xprt_get(xprt);
2416 args->bc_xprt->xpt_bc_xprt = xprt;
2417 xprt->bc_xprt = args->bc_xprt;
2418 bc_sock = container_of(args->bc_xprt, struct svc_sock, sk_xprt);
2419 transport->sock = bc_sock->sk_sock;
2420 transport->inet = bc_sock->sk_sk;
2421
2422 /*
2410 * Since we don't want connections for the backchannel, we set 2423 * Since we don't want connections for the backchannel, we set
2411 * the xprt status to connected 2424 * the xprt status to connected
2412 */ 2425 */
@@ -2415,6 +2428,7 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
2415 2428
2416 if (try_module_get(THIS_MODULE)) 2429 if (try_module_get(THIS_MODULE))
2417 return xprt; 2430 return xprt;
2431 xprt_put(xprt);
2418 ret = ERR_PTR(-EINVAL); 2432 ret = ERR_PTR(-EINVAL);
2419out_err: 2433out_err:
2420 xprt_free(xprt); 2434 xprt_free(xprt);