aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2011-12-01 14:16:17 -0500
committerTrond Myklebust <Trond.Myklebust@netapp.com>2011-12-01 14:16:17 -0500
commitc25573b5134294c0be82bfaecc6d08136835b271 (patch)
treee3ac5119ad559cc32d391e8384d83a6ac2c06371 /net
parent7fdcf13b292e8b2e38e42de24be2503e37b2cf97 (diff)
SUNRPC: Ensure we always bump the backlog queue in xprt_free_slot
Whenever we free a slot, we know that the resulting xprt->num_reqs will be less than xprt->max_reqs, so we know that we can release at least one backlogged rpc_task. Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com> Cc: stable@vger.kernel.org [>=3.1]
Diffstat (limited to 'net')
-rw-r--r--net/sunrpc/xprt.c10
1 files changed, 4 insertions, 6 deletions
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index f4385e45a5fc..c64c0ef519b5 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -995,13 +995,11 @@ out_init_req:
995 995
996static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req) 996static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
997{ 997{
998 if (xprt_dynamic_free_slot(xprt, req))
999 return;
1000
1001 memset(req, 0, sizeof(*req)); /* mark unused */
1002
1003 spin_lock(&xprt->reserve_lock); 998 spin_lock(&xprt->reserve_lock);
1004 list_add(&req->rq_list, &xprt->free); 999 if (!xprt_dynamic_free_slot(xprt, req)) {
1000 memset(req, 0, sizeof(*req)); /* mark unused */
1001 list_add(&req->rq_list, &xprt->free);
1002 }
1005 rpc_wake_up_next(&xprt->backlog); 1003 rpc_wake_up_next(&xprt->backlog);
1006 spin_unlock(&xprt->reserve_lock); 1004 spin_unlock(&xprt->reserve_lock);
1007} 1005}