aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc/svc_xprt.c
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
committerGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
commitc71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch)
treeecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /net/sunrpc/svc_xprt.c
parentea53c912f8a86a8567697115b6a0d8152beee5c8 (diff)
parent6a00f206debf8a5c8899055726ad127dbeeed098 (diff)
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts: litmus/sched_cedf.c
Diffstat (limited to 'net/sunrpc/svc_xprt.c')
-rw-r--r--net/sunrpc/svc_xprt.c166
1 files changed, 92 insertions, 74 deletions
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index cbc084939dd8..ab86b7927f84 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -5,7 +5,6 @@
5 */ 5 */
6 6
7#include <linux/sched.h> 7#include <linux/sched.h>
8#include <linux/smp_lock.h>
9#include <linux/errno.h> 8#include <linux/errno.h>
10#include <linux/freezer.h> 9#include <linux/freezer.h>
11#include <linux/kthread.h> 10#include <linux/kthread.h>
@@ -14,6 +13,7 @@
14#include <linux/sunrpc/stats.h> 13#include <linux/sunrpc/stats.h>
15#include <linux/sunrpc/svc_xprt.h> 14#include <linux/sunrpc/svc_xprt.h>
16#include <linux/sunrpc/svcsock.h> 15#include <linux/sunrpc/svcsock.h>
16#include <linux/sunrpc/xprt.h>
17 17
18#define RPCDBG_FACILITY RPCDBG_SVCXPRT 18#define RPCDBG_FACILITY RPCDBG_SVCXPRT
19 19
@@ -100,16 +100,14 @@ EXPORT_SYMBOL_GPL(svc_unreg_xprt_class);
100 */ 100 */
101int svc_print_xprts(char *buf, int maxlen) 101int svc_print_xprts(char *buf, int maxlen)
102{ 102{
103 struct list_head *le; 103 struct svc_xprt_class *xcl;
104 char tmpstr[80]; 104 char tmpstr[80];
105 int len = 0; 105 int len = 0;
106 buf[0] = '\0'; 106 buf[0] = '\0';
107 107
108 spin_lock(&svc_xprt_class_lock); 108 spin_lock(&svc_xprt_class_lock);
109 list_for_each(le, &svc_xprt_class_list) { 109 list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) {
110 int slen; 110 int slen;
111 struct svc_xprt_class *xcl =
112 list_entry(le, struct svc_xprt_class, xcl_list);
113 111
114 sprintf(tmpstr, "%s %d\n", xcl->xcl_name, xcl->xcl_max_payload); 112 sprintf(tmpstr, "%s %d\n", xcl->xcl_name, xcl->xcl_max_payload);
115 slen = strlen(tmpstr); 113 slen = strlen(tmpstr);
@@ -128,9 +126,12 @@ static void svc_xprt_free(struct kref *kref)
128 struct svc_xprt *xprt = 126 struct svc_xprt *xprt =
129 container_of(kref, struct svc_xprt, xpt_ref); 127 container_of(kref, struct svc_xprt, xpt_ref);
130 struct module *owner = xprt->xpt_class->xcl_owner; 128 struct module *owner = xprt->xpt_class->xcl_owner;
131 if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags) && 129 if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags))
132 xprt->xpt_auth_cache != NULL) 130 svcauth_unix_info_release(xprt);
133 svcauth_unix_info_release(xprt->xpt_auth_cache); 131 put_net(xprt->xpt_net);
132 /* See comment on corresponding get in xs_setup_bc_tcp(): */
133 if (xprt->xpt_bc_xprt)
134 xprt_put(xprt->xpt_bc_xprt);
134 xprt->xpt_ops->xpo_free(xprt); 135 xprt->xpt_ops->xpo_free(xprt);
135 module_put(owner); 136 module_put(owner);
136} 137}
@@ -156,15 +157,18 @@ void svc_xprt_init(struct svc_xprt_class *xcl, struct svc_xprt *xprt,
156 INIT_LIST_HEAD(&xprt->xpt_list); 157 INIT_LIST_HEAD(&xprt->xpt_list);
157 INIT_LIST_HEAD(&xprt->xpt_ready); 158 INIT_LIST_HEAD(&xprt->xpt_ready);
158 INIT_LIST_HEAD(&xprt->xpt_deferred); 159 INIT_LIST_HEAD(&xprt->xpt_deferred);
160 INIT_LIST_HEAD(&xprt->xpt_users);
159 mutex_init(&xprt->xpt_mutex); 161 mutex_init(&xprt->xpt_mutex);
160 spin_lock_init(&xprt->xpt_lock); 162 spin_lock_init(&xprt->xpt_lock);
161 set_bit(XPT_BUSY, &xprt->xpt_flags); 163 set_bit(XPT_BUSY, &xprt->xpt_flags);
162 rpc_init_wait_queue(&xprt->xpt_bc_pending, "xpt_bc_pending"); 164 rpc_init_wait_queue(&xprt->xpt_bc_pending, "xpt_bc_pending");
165 xprt->xpt_net = get_net(&init_net);
163} 166}
164EXPORT_SYMBOL_GPL(svc_xprt_init); 167EXPORT_SYMBOL_GPL(svc_xprt_init);
165 168
166static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl, 169static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl,
167 struct svc_serv *serv, 170 struct svc_serv *serv,
171 struct net *net,
168 const int family, 172 const int family,
169 const unsigned short port, 173 const unsigned short port,
170 int flags) 174 int flags)
@@ -199,12 +203,12 @@ static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl,
199 return ERR_PTR(-EAFNOSUPPORT); 203 return ERR_PTR(-EAFNOSUPPORT);
200 } 204 }
201 205
202 return xcl->xcl_ops->xpo_create(serv, sap, len, flags); 206 return xcl->xcl_ops->xpo_create(serv, net, sap, len, flags);
203} 207}
204 208
205int svc_create_xprt(struct svc_serv *serv, const char *xprt_name, 209int svc_create_xprt(struct svc_serv *serv, const char *xprt_name,
206 const int family, const unsigned short port, 210 struct net *net, const int family,
207 int flags) 211 const unsigned short port, int flags)
208{ 212{
209 struct svc_xprt_class *xcl; 213 struct svc_xprt_class *xcl;
210 214
@@ -212,6 +216,7 @@ int svc_create_xprt(struct svc_serv *serv, const char *xprt_name,
212 spin_lock(&svc_xprt_class_lock); 216 spin_lock(&svc_xprt_class_lock);
213 list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) { 217 list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) {
214 struct svc_xprt *newxprt; 218 struct svc_xprt *newxprt;
219 unsigned short newport;
215 220
216 if (strcmp(xprt_name, xcl->xcl_name)) 221 if (strcmp(xprt_name, xcl->xcl_name))
217 continue; 222 continue;
@@ -220,7 +225,7 @@ int svc_create_xprt(struct svc_serv *serv, const char *xprt_name,
220 goto err; 225 goto err;
221 226
222 spin_unlock(&svc_xprt_class_lock); 227 spin_unlock(&svc_xprt_class_lock);
223 newxprt = __svc_xpo_create(xcl, serv, family, port, flags); 228 newxprt = __svc_xpo_create(xcl, serv, net, family, port, flags);
224 if (IS_ERR(newxprt)) { 229 if (IS_ERR(newxprt)) {
225 module_put(xcl->xcl_owner); 230 module_put(xcl->xcl_owner);
226 return PTR_ERR(newxprt); 231 return PTR_ERR(newxprt);
@@ -230,8 +235,9 @@ int svc_create_xprt(struct svc_serv *serv, const char *xprt_name,
230 spin_lock_bh(&serv->sv_lock); 235 spin_lock_bh(&serv->sv_lock);
231 list_add(&newxprt->xpt_list, &serv->sv_permsocks); 236 list_add(&newxprt->xpt_list, &serv->sv_permsocks);
232 spin_unlock_bh(&serv->sv_lock); 237 spin_unlock_bh(&serv->sv_lock);
238 newport = svc_xprt_local_port(newxprt);
233 clear_bit(XPT_BUSY, &newxprt->xpt_flags); 239 clear_bit(XPT_BUSY, &newxprt->xpt_flags);
234 return svc_xprt_local_port(newxprt); 240 return newport;
235 } 241 }
236 err: 242 err:
237 spin_unlock(&svc_xprt_class_lock); 243 spin_unlock(&svc_xprt_class_lock);
@@ -301,6 +307,15 @@ static void svc_thread_dequeue(struct svc_pool *pool, struct svc_rqst *rqstp)
301 list_del(&rqstp->rq_list); 307 list_del(&rqstp->rq_list);
302} 308}
303 309
310static bool svc_xprt_has_something_to_do(struct svc_xprt *xprt)
311{
312 if (xprt->xpt_flags & ((1<<XPT_CONN)|(1<<XPT_CLOSE)))
313 return true;
314 if (xprt->xpt_flags & ((1<<XPT_DATA)|(1<<XPT_DEFERRED)))
315 return xprt->xpt_ops->xpo_has_wspace(xprt);
316 return false;
317}
318
304/* 319/*
305 * Queue up a transport with data pending. If there are idle nfsd 320 * Queue up a transport with data pending. If there are idle nfsd
306 * processes, wake 'em up. 321 * processes, wake 'em up.
@@ -313,8 +328,7 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
313 struct svc_rqst *rqstp; 328 struct svc_rqst *rqstp;
314 int cpu; 329 int cpu;
315 330
316 if (!(xprt->xpt_flags & 331 if (!svc_xprt_has_something_to_do(xprt))
317 ((1<<XPT_CONN)|(1<<XPT_DATA)|(1<<XPT_CLOSE)|(1<<XPT_DEFERRED))))
318 return; 332 return;
319 333
320 cpu = get_cpu(); 334 cpu = get_cpu();
@@ -329,12 +343,6 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
329 "svc_xprt_enqueue: " 343 "svc_xprt_enqueue: "
330 "threads and transports both waiting??\n"); 344 "threads and transports both waiting??\n");
331 345
332 if (test_bit(XPT_DEAD, &xprt->xpt_flags)) {
333 /* Don't enqueue dead transports */
334 dprintk("svc: transport %p is dead, not enqueued\n", xprt);
335 goto out_unlock;
336 }
337
338 pool->sp_stats.packets++; 346 pool->sp_stats.packets++;
339 347
340 /* Mark transport as busy. It will remain in this state until 348 /* Mark transport as busy. It will remain in this state until
@@ -347,28 +355,7 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
347 dprintk("svc: transport %p busy, not enqueued\n", xprt); 355 dprintk("svc: transport %p busy, not enqueued\n", xprt);
348 goto out_unlock; 356 goto out_unlock;
349 } 357 }
350 BUG_ON(xprt->xpt_pool != NULL);
351 xprt->xpt_pool = pool;
352
353 /* Handle pending connection */
354 if (test_bit(XPT_CONN, &xprt->xpt_flags))
355 goto process;
356
357 /* Handle close in-progress */
358 if (test_bit(XPT_CLOSE, &xprt->xpt_flags))
359 goto process;
360
361 /* Check if we have space to reply to a request */
362 if (!xprt->xpt_ops->xpo_has_wspace(xprt)) {
363 /* Don't enqueue while not enough space for reply */
364 dprintk("svc: no write space, transport %p not enqueued\n",
365 xprt);
366 xprt->xpt_pool = NULL;
367 clear_bit(XPT_BUSY, &xprt->xpt_flags);
368 goto out_unlock;
369 }
370 358
371 process:
372 if (!list_empty(&pool->sp_threads)) { 359 if (!list_empty(&pool->sp_threads)) {
373 rqstp = list_entry(pool->sp_threads.next, 360 rqstp = list_entry(pool->sp_threads.next,
374 struct svc_rqst, 361 struct svc_rqst,
@@ -385,13 +372,11 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
385 rqstp->rq_reserved = serv->sv_max_mesg; 372 rqstp->rq_reserved = serv->sv_max_mesg;
386 atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); 373 atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
387 pool->sp_stats.threads_woken++; 374 pool->sp_stats.threads_woken++;
388 BUG_ON(xprt->xpt_pool != pool);
389 wake_up(&rqstp->rq_wait); 375 wake_up(&rqstp->rq_wait);
390 } else { 376 } else {
391 dprintk("svc: transport %p put into queue\n", xprt); 377 dprintk("svc: transport %p put into queue\n", xprt);
392 list_add_tail(&xprt->xpt_ready, &pool->sp_sockets); 378 list_add_tail(&xprt->xpt_ready, &pool->sp_sockets);
393 pool->sp_stats.sockets_queued++; 379 pool->sp_stats.sockets_queued++;
394 BUG_ON(xprt->xpt_pool != pool);
395 } 380 }
396 381
397out_unlock: 382out_unlock:
@@ -430,9 +415,13 @@ static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool)
430void svc_xprt_received(struct svc_xprt *xprt) 415void svc_xprt_received(struct svc_xprt *xprt)
431{ 416{
432 BUG_ON(!test_bit(XPT_BUSY, &xprt->xpt_flags)); 417 BUG_ON(!test_bit(XPT_BUSY, &xprt->xpt_flags));
433 xprt->xpt_pool = NULL; 418 /* As soon as we clear busy, the xprt could be closed and
419 * 'put', so we need a reference to call svc_xprt_enqueue with:
420 */
421 svc_xprt_get(xprt);
434 clear_bit(XPT_BUSY, &xprt->xpt_flags); 422 clear_bit(XPT_BUSY, &xprt->xpt_flags);
435 svc_xprt_enqueue(xprt); 423 svc_xprt_enqueue(xprt);
424 svc_xprt_put(xprt);
436} 425}
437EXPORT_SYMBOL_GPL(svc_xprt_received); 426EXPORT_SYMBOL_GPL(svc_xprt_received);
438 427
@@ -651,6 +640,11 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
651 if (signalled() || kthread_should_stop()) 640 if (signalled() || kthread_should_stop())
652 return -EINTR; 641 return -EINTR;
653 642
643 /* Normally we will wait up to 5 seconds for any required
644 * cache information to be provided.
645 */
646 rqstp->rq_chandle.thread_wait = 5*HZ;
647
654 spin_lock_bh(&pool->sp_lock); 648 spin_lock_bh(&pool->sp_lock);
655 xprt = svc_xprt_dequeue(pool); 649 xprt = svc_xprt_dequeue(pool);
656 if (xprt) { 650 if (xprt) {
@@ -658,6 +652,12 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
658 svc_xprt_get(xprt); 652 svc_xprt_get(xprt);
659 rqstp->rq_reserved = serv->sv_max_mesg; 653 rqstp->rq_reserved = serv->sv_max_mesg;
660 atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); 654 atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
655
656 /* As there is a shortage of threads and this request
657 * had to be queued, don't allow the thread to wait so
658 * long for cache updates.
659 */
660 rqstp->rq_chandle.thread_wait = 1*HZ;
661 } else { 661 } else {
662 /* No data pending. Go to sleep */ 662 /* No data pending. Go to sleep */
663 svc_thread_enqueue(pool, rqstp); 663 svc_thread_enqueue(pool, rqstp);
@@ -710,7 +710,10 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
710 if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) { 710 if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
711 dprintk("svc_recv: found XPT_CLOSE\n"); 711 dprintk("svc_recv: found XPT_CLOSE\n");
712 svc_delete_xprt(xprt); 712 svc_delete_xprt(xprt);
713 } else if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) { 713 /* Leave XPT_BUSY set on the dead xprt: */
714 goto out;
715 }
716 if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) {
714 struct svc_xprt *newxpt; 717 struct svc_xprt *newxpt;
715 newxpt = xprt->xpt_ops->xpo_accept(xprt); 718 newxpt = xprt->xpt_ops->xpo_accept(xprt);
716 if (newxpt) { 719 if (newxpt) {
@@ -735,28 +738,23 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
735 spin_unlock_bh(&serv->sv_lock); 738 spin_unlock_bh(&serv->sv_lock);
736 svc_xprt_received(newxpt); 739 svc_xprt_received(newxpt);
737 } 740 }
738 svc_xprt_received(xprt); 741 } else if (xprt->xpt_ops->xpo_has_wspace(xprt)) {
739 } else {
740 dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n", 742 dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n",
741 rqstp, pool->sp_id, xprt, 743 rqstp, pool->sp_id, xprt,
742 atomic_read(&xprt->xpt_ref.refcount)); 744 atomic_read(&xprt->xpt_ref.refcount));
743 rqstp->rq_deferred = svc_deferred_dequeue(xprt); 745 rqstp->rq_deferred = svc_deferred_dequeue(xprt);
744 if (rqstp->rq_deferred) { 746 if (rqstp->rq_deferred)
745 svc_xprt_received(xprt);
746 len = svc_deferred_recv(rqstp); 747 len = svc_deferred_recv(rqstp);
747 } else { 748 else
748 len = xprt->xpt_ops->xpo_recvfrom(rqstp); 749 len = xprt->xpt_ops->xpo_recvfrom(rqstp);
749 svc_xprt_received(xprt);
750 }
751 dprintk("svc: got len=%d\n", len); 750 dprintk("svc: got len=%d\n", len);
752 } 751 }
752 svc_xprt_received(xprt);
753 753
754 /* No data, incomplete (TCP) read, or accept() */ 754 /* No data, incomplete (TCP) read, or accept() */
755 if (len == 0 || len == -EAGAIN) { 755 if (len == 0 || len == -EAGAIN)
756 rqstp->rq_res.len = 0; 756 goto out;
757 svc_xprt_release(rqstp); 757
758 return -EAGAIN;
759 }
760 clear_bit(XPT_OLD, &xprt->xpt_flags); 758 clear_bit(XPT_OLD, &xprt->xpt_flags);
761 759
762 rqstp->rq_secure = svc_port_is_privileged(svc_addr(rqstp)); 760 rqstp->rq_secure = svc_port_is_privileged(svc_addr(rqstp));
@@ -765,6 +763,10 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
765 if (serv->sv_stats) 763 if (serv->sv_stats)
766 serv->sv_stats->netcnt++; 764 serv->sv_stats->netcnt++;
767 return len; 765 return len;
766out:
767 rqstp->rq_res.len = 0;
768 svc_xprt_release(rqstp);
769 return -EAGAIN;
768} 770}
769EXPORT_SYMBOL_GPL(svc_recv); 771EXPORT_SYMBOL_GPL(svc_recv);
770 772
@@ -868,6 +870,19 @@ static void svc_age_temp_xprts(unsigned long closure)
868 mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ); 870 mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ);
869} 871}
870 872
873static void call_xpt_users(struct svc_xprt *xprt)
874{
875 struct svc_xpt_user *u;
876
877 spin_lock(&xprt->xpt_lock);
878 while (!list_empty(&xprt->xpt_users)) {
879 u = list_first_entry(&xprt->xpt_users, struct svc_xpt_user, list);
880 list_del(&u->list);
881 u->callback(u);
882 }
883 spin_unlock(&xprt->xpt_lock);
884}
885
871/* 886/*
872 * Remove a dead transport 887 * Remove a dead transport
873 */ 888 */
@@ -878,7 +893,7 @@ void svc_delete_xprt(struct svc_xprt *xprt)
878 893
879 /* Only do this once */ 894 /* Only do this once */
880 if (test_and_set_bit(XPT_DEAD, &xprt->xpt_flags)) 895 if (test_and_set_bit(XPT_DEAD, &xprt->xpt_flags))
881 return; 896 BUG();
882 897
883 dprintk("svc: svc_delete_xprt(%p)\n", xprt); 898 dprintk("svc: svc_delete_xprt(%p)\n", xprt);
884 xprt->xpt_ops->xpo_detach(xprt); 899 xprt->xpt_ops->xpo_detach(xprt);
@@ -900,6 +915,7 @@ void svc_delete_xprt(struct svc_xprt *xprt)
900 while ((dr = svc_deferred_dequeue(xprt)) != NULL) 915 while ((dr = svc_deferred_dequeue(xprt)) != NULL)
901 kfree(dr); 916 kfree(dr);
902 917
918 call_xpt_users(xprt);
903 svc_xprt_put(xprt); 919 svc_xprt_put(xprt);
904} 920}
905 921
@@ -909,11 +925,13 @@ void svc_close_xprt(struct svc_xprt *xprt)
909 if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) 925 if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags))
910 /* someone else will have to effect the close */ 926 /* someone else will have to effect the close */
911 return; 927 return;
912 928 /*
913 svc_xprt_get(xprt); 929 * We expect svc_close_xprt() to work even when no threads are
930 * running (e.g., while configuring the server before starting
931 * any threads), so if the transport isn't busy, we delete
932 * it ourself:
933 */
914 svc_delete_xprt(xprt); 934 svc_delete_xprt(xprt);
915 clear_bit(XPT_BUSY, &xprt->xpt_flags);
916 svc_xprt_put(xprt);
917} 935}
918EXPORT_SYMBOL_GPL(svc_close_xprt); 936EXPORT_SYMBOL_GPL(svc_close_xprt);
919 937
@@ -922,16 +940,16 @@ void svc_close_all(struct list_head *xprt_list)
922 struct svc_xprt *xprt; 940 struct svc_xprt *xprt;
923 struct svc_xprt *tmp; 941 struct svc_xprt *tmp;
924 942
943 /*
944 * The server is shutting down, and no more threads are running.
945 * svc_xprt_enqueue() might still be running, but at worst it
946 * will re-add the xprt to sp_sockets, which will soon get
947 * freed. So we don't bother with any more locking, and don't
948 * leave the close to the (nonexistent) server threads:
949 */
925 list_for_each_entry_safe(xprt, tmp, xprt_list, xpt_list) { 950 list_for_each_entry_safe(xprt, tmp, xprt_list, xpt_list) {
926 set_bit(XPT_CLOSE, &xprt->xpt_flags); 951 set_bit(XPT_CLOSE, &xprt->xpt_flags);
927 if (test_bit(XPT_BUSY, &xprt->xpt_flags)) { 952 svc_delete_xprt(xprt);
928 /* Waiting to be processed, but no threads left,
929 * So just remove it from the waiting list
930 */
931 list_del_init(&xprt->xpt_ready);
932 clear_bit(XPT_BUSY, &xprt->xpt_flags);
933 }
934 svc_close_xprt(xprt);
935 } 953 }
936} 954}
937 955
@@ -1005,6 +1023,7 @@ static struct cache_deferred_req *svc_defer(struct cache_req *req)
1005 } 1023 }
1006 svc_xprt_get(rqstp->rq_xprt); 1024 svc_xprt_get(rqstp->rq_xprt);
1007 dr->xprt = rqstp->rq_xprt; 1025 dr->xprt = rqstp->rq_xprt;
1026 rqstp->rq_dropme = true;
1008 1027
1009 dr->handle.revisit = svc_revisit; 1028 dr->handle.revisit = svc_revisit;
1010 return &dr->handle; 1029 return &dr->handle;
@@ -1042,14 +1061,13 @@ static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt)
1042 if (!test_bit(XPT_DEFERRED, &xprt->xpt_flags)) 1061 if (!test_bit(XPT_DEFERRED, &xprt->xpt_flags))
1043 return NULL; 1062 return NULL;
1044 spin_lock(&xprt->xpt_lock); 1063 spin_lock(&xprt->xpt_lock);
1045 clear_bit(XPT_DEFERRED, &xprt->xpt_flags);
1046 if (!list_empty(&xprt->xpt_deferred)) { 1064 if (!list_empty(&xprt->xpt_deferred)) {
1047 dr = list_entry(xprt->xpt_deferred.next, 1065 dr = list_entry(xprt->xpt_deferred.next,
1048 struct svc_deferred_req, 1066 struct svc_deferred_req,
1049 handle.recent); 1067 handle.recent);
1050 list_del_init(&dr->handle.recent); 1068 list_del_init(&dr->handle.recent);
1051 set_bit(XPT_DEFERRED, &xprt->xpt_flags); 1069 } else
1052 } 1070 clear_bit(XPT_DEFERRED, &xprt->xpt_flags);
1053 spin_unlock(&xprt->xpt_lock); 1071 spin_unlock(&xprt->xpt_lock);
1054 return dr; 1072 return dr;
1055} 1073}