aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc/xprt.c
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2011-07-17 18:11:30 -0400
committerTrond Myklebust <Trond.Myklebust@netapp.com>2011-07-17 18:11:30 -0400
commitd9ba131d8f58c0d2ff5029e7002ab43f913b36f9 (patch)
treef2ed7330c72077bf84954b989cbe1ff47522a115 /net/sunrpc/xprt.c
parent21de0a955f3af29fa1100d96f66e6adade89e77a (diff)
SUNRPC: Support dynamic slot allocation for TCP connections
Allow the number of available slots to grow with the TCP window size. Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'net/sunrpc/xprt.c')
-rw-r--r--net/sunrpc/xprt.c70
1 files changed, 59 insertions, 11 deletions
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index ea7b3c16cddd..be85cf04a479 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -935,25 +935,66 @@ void xprt_transmit(struct rpc_task *task)
935 spin_unlock_bh(&xprt->transport_lock); 935 spin_unlock_bh(&xprt->transport_lock);
936} 936}
937 937
938static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt, gfp_t gfp_flags)
939{
940 struct rpc_rqst *req = ERR_PTR(-EAGAIN);
941
942 if (!atomic_add_unless(&xprt->num_reqs, 1, xprt->max_reqs))
943 goto out;
944 req = kzalloc(sizeof(struct rpc_rqst), gfp_flags);
945 if (req != NULL)
946 goto out;
947 atomic_dec(&xprt->num_reqs);
948 req = ERR_PTR(-ENOMEM);
949out:
950 return req;
951}
952
953static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
954{
955 if (atomic_add_unless(&xprt->num_reqs, -1, xprt->min_reqs)) {
956 kfree(req);
957 return true;
958 }
959 return false;
960}
961
938static void xprt_alloc_slot(struct rpc_task *task) 962static void xprt_alloc_slot(struct rpc_task *task)
939{ 963{
940 struct rpc_xprt *xprt = task->tk_xprt; 964 struct rpc_xprt *xprt = task->tk_xprt;
965 struct rpc_rqst *req;
941 966
942 task->tk_status = 0;
943 if (!list_empty(&xprt->free)) { 967 if (!list_empty(&xprt->free)) {
944 struct rpc_rqst *req = list_entry(xprt->free.next, struct rpc_rqst, rq_list); 968 req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
945 list_del_init(&req->rq_list); 969 list_del(&req->rq_list);
946 task->tk_rqstp = req; 970 goto out_init_req;
947 xprt_request_init(task, xprt); 971 }
948 return; 972 req = xprt_dynamic_alloc_slot(xprt, GFP_NOWAIT);
973 if (!IS_ERR(req))
974 goto out_init_req;
975 switch (PTR_ERR(req)) {
976 case -ENOMEM:
977 rpc_delay(task, HZ >> 2);
978 dprintk("RPC: dynamic allocation of request slot "
979 "failed! Retrying\n");
980 break;
981 case -EAGAIN:
982 rpc_sleep_on(&xprt->backlog, task, NULL);
983 dprintk("RPC: waiting for request slot\n");
949 } 984 }
950 dprintk("RPC: waiting for request slot\n");
951 task->tk_status = -EAGAIN; 985 task->tk_status = -EAGAIN;
952 rpc_sleep_on(&xprt->backlog, task, NULL); 986 return;
987out_init_req:
988 task->tk_status = 0;
989 task->tk_rqstp = req;
990 xprt_request_init(task, xprt);
953} 991}
954 992
955static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req) 993static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
956{ 994{
995 if (xprt_dynamic_free_slot(xprt, req))
996 return;
997
957 memset(req, 0, sizeof(*req)); /* mark unused */ 998 memset(req, 0, sizeof(*req)); /* mark unused */
958 999
959 spin_lock(&xprt->reserve_lock); 1000 spin_lock(&xprt->reserve_lock);
@@ -972,7 +1013,9 @@ static void xprt_free_all_slots(struct rpc_xprt *xprt)
972 } 1013 }
973} 1014}
974 1015
975struct rpc_xprt *xprt_alloc(struct net *net, int size, int num_prealloc) 1016struct rpc_xprt *xprt_alloc(struct net *net, size_t size,
1017 unsigned int num_prealloc,
1018 unsigned int max_alloc)
976{ 1019{
977 struct rpc_xprt *xprt; 1020 struct rpc_xprt *xprt;
978 struct rpc_rqst *req; 1021 struct rpc_rqst *req;
@@ -992,7 +1035,12 @@ struct rpc_xprt *xprt_alloc(struct net *net, int size, int num_prealloc)
992 } 1035 }
993 if (i < num_prealloc) 1036 if (i < num_prealloc)
994 goto out_free; 1037 goto out_free;
995 xprt->max_reqs = num_prealloc; 1038 if (max_alloc > num_prealloc)
1039 xprt->max_reqs = max_alloc;
1040 else
1041 xprt->max_reqs = num_prealloc;
1042 xprt->min_reqs = num_prealloc;
1043 atomic_set(&xprt->num_reqs, num_prealloc);
996 1044
997 return xprt; 1045 return xprt;
998 1046
@@ -1036,7 +1084,6 @@ void xprt_reserve(struct rpc_task *task)
1036 if (!xprt_lock_write(xprt, task)) 1084 if (!xprt_lock_write(xprt, task))
1037 return; 1085 return;
1038 1086
1039 task->tk_status = -EIO;
1040 spin_lock(&xprt->reserve_lock); 1087 spin_lock(&xprt->reserve_lock);
1041 xprt_alloc_slot(task); 1088 xprt_alloc_slot(task);
1042 spin_unlock(&xprt->reserve_lock); 1089 spin_unlock(&xprt->reserve_lock);
@@ -1057,6 +1104,7 @@ static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
1057{ 1104{
1058 struct rpc_rqst *req = task->tk_rqstp; 1105 struct rpc_rqst *req = task->tk_rqstp;
1059 1106
1107 INIT_LIST_HEAD(&req->rq_list);
1060 req->rq_timeout = task->tk_client->cl_timeout->to_initval; 1108 req->rq_timeout = task->tk_client->cl_timeout->to_initval;
1061 req->rq_task = task; 1109 req->rq_task = task;
1062 req->rq_xprt = xprt; 1110 req->rq_xprt = xprt;