aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChuck Lever <cel@netapp.com>2005-08-25 19:25:51 -0400
committerTrond Myklebust <Trond.Myklebust@netapp.com>2005-09-23 12:38:40 -0400
commit49e9a89086b3cae784a4868ca852863e4f4ea3fe (patch)
tree5ba197fc1767fee5590b892b2f19aeb3583494c4
parent12a804698b29d040b7cdd92e8a44b0e75164dae9 (diff)
[PATCH] RPC: expose API for serializing access to RPC transports
The next method we abstract is the one that releases a transport, allowing another task to have access to the transport. Again, one generic version of this is provided for transports that don't need the RPC client to perform congestion control, and one version is for transports that can use the original Van Jacobson implementation in xprt.c. Test-plan: Use WAN simulation to cause sporadic bursty packet loss. Look for significant regression in performance or client stability. Signed-off-by: Chuck Lever <cel@netapp.com> Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
-rw-r--r--include/linux/sunrpc/xprt.h3
-rw-r--r--net/sunrpc/xprt.c77
-rw-r--r--net/sunrpc/xprtsock.c2
3 files changed, 68 insertions, 14 deletions
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index eee1c6877851..86833b725bb5 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -133,6 +133,7 @@ struct rpc_xprt;
133struct rpc_xprt_ops { 133struct rpc_xprt_ops {
134 void (*set_buffer_size)(struct rpc_xprt *xprt); 134 void (*set_buffer_size)(struct rpc_xprt *xprt);
135 int (*reserve_xprt)(struct rpc_task *task); 135 int (*reserve_xprt)(struct rpc_task *task);
136 void (*release_xprt)(struct rpc_xprt *xprt, struct rpc_task *task);
136 void (*connect)(struct rpc_task *task); 137 void (*connect)(struct rpc_task *task);
137 int (*send_request)(struct rpc_task *task); 138 int (*send_request)(struct rpc_task *task);
138 void (*set_retrans_timeout)(struct rpc_task *task); 139 void (*set_retrans_timeout)(struct rpc_task *task);
@@ -238,6 +239,8 @@ int xprt_reserve_xprt_cong(struct rpc_task *task);
238int xprt_prepare_transmit(struct rpc_task *task); 239int xprt_prepare_transmit(struct rpc_task *task);
239void xprt_transmit(struct rpc_task *task); 240void xprt_transmit(struct rpc_task *task);
240int xprt_adjust_timeout(struct rpc_rqst *req); 241int xprt_adjust_timeout(struct rpc_rqst *req);
242void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task);
243void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
241void xprt_release(struct rpc_task *task); 244void xprt_release(struct rpc_task *task);
242int xprt_destroy(struct rpc_xprt *xprt); 245int xprt_destroy(struct rpc_xprt *xprt);
243 246
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 2d1e8b83dd68..e92ea99dd318 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -153,14 +153,42 @@ static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
153 return retval; 153 return retval;
154} 154}
155 155
156
157static void __xprt_lock_write_next(struct rpc_xprt *xprt) 156static void __xprt_lock_write_next(struct rpc_xprt *xprt)
158{ 157{
159 struct rpc_task *task; 158 struct rpc_task *task;
159 struct rpc_rqst *req;
160
161 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
162 return;
163
164 task = rpc_wake_up_next(&xprt->resend);
165 if (!task) {
166 task = rpc_wake_up_next(&xprt->sending);
167 if (!task)
168 goto out_unlock;
169 }
170
171 req = task->tk_rqstp;
172 xprt->snd_task = task;
173 if (req) {
174 req->rq_bytes_sent = 0;
175 req->rq_ntrans++;
176 }
177 return;
178
179out_unlock:
180 smp_mb__before_clear_bit();
181 clear_bit(XPRT_LOCKED, &xprt->state);
182 smp_mb__after_clear_bit();
183}
184
185static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
186{
187 struct rpc_task *task;
160 188
161 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) 189 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
162 return; 190 return;
163 if (!xprt->nocong && RPCXPRT_CONGESTED(xprt)) 191 if (RPCXPRT_CONGESTED(xprt))
164 goto out_unlock; 192 goto out_unlock;
165 task = rpc_wake_up_next(&xprt->resend); 193 task = rpc_wake_up_next(&xprt->resend);
166 if (!task) { 194 if (!task) {
@@ -168,7 +196,7 @@ static void __xprt_lock_write_next(struct rpc_xprt *xprt)
168 if (!task) 196 if (!task)
169 goto out_unlock; 197 goto out_unlock;
170 } 198 }
171 if (xprt->nocong || __xprt_get_cong(xprt, task)) { 199 if (__xprt_get_cong(xprt, task)) {
172 struct rpc_rqst *req = task->tk_rqstp; 200 struct rpc_rqst *req = task->tk_rqstp;
173 xprt->snd_task = task; 201 xprt->snd_task = task;
174 if (req) { 202 if (req) {
@@ -183,11 +211,14 @@ out_unlock:
183 smp_mb__after_clear_bit(); 211 smp_mb__after_clear_bit();
184} 212}
185 213
186/* 214/**
187 * Releases the transport for use by other requests. 215 * xprt_release_xprt - allow other requests to use a transport
216 * @xprt: transport with other tasks potentially waiting
217 * @task: task that is releasing access to the transport
218 *
219 * Note that "task" can be NULL. No congestion control is provided.
188 */ 220 */
189static void 221void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
190__xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
191{ 222{
192 if (xprt->snd_task == task) { 223 if (xprt->snd_task == task) {
193 xprt->snd_task = NULL; 224 xprt->snd_task = NULL;
@@ -198,11 +229,29 @@ __xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
198 } 229 }
199} 230}
200 231
201static inline void 232/**
202xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task) 233 * xprt_release_xprt_cong - allow other requests to use a transport
234 * @xprt: transport with other tasks potentially waiting
235 * @task: task that is releasing access to the transport
236 *
237 * Note that "task" can be NULL. Another task is awoken to use the
238 * transport if the transport's congestion window allows it.
239 */
240void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
241{
242 if (xprt->snd_task == task) {
243 xprt->snd_task = NULL;
244 smp_mb__before_clear_bit();
245 clear_bit(XPRT_LOCKED, &xprt->state);
246 smp_mb__after_clear_bit();
247 __xprt_lock_write_next_cong(xprt);
248 }
249}
250
251static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
203{ 252{
204 spin_lock_bh(&xprt->transport_lock); 253 spin_lock_bh(&xprt->transport_lock);
205 __xprt_release_write(xprt, task); 254 xprt->ops->release_xprt(xprt, task);
206 spin_unlock_bh(&xprt->transport_lock); 255 spin_unlock_bh(&xprt->transport_lock);
207} 256}
208 257
@@ -237,7 +286,7 @@ __xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
237 return; 286 return;
238 req->rq_cong = 0; 287 req->rq_cong = 0;
239 xprt->cong -= RPC_CWNDSCALE; 288 xprt->cong -= RPC_CWNDSCALE;
240 __xprt_lock_write_next(xprt); 289 __xprt_lock_write_next_cong(xprt);
241} 290}
242 291
243/* 292/*
@@ -256,7 +305,7 @@ xprt_adjust_cwnd(struct rpc_xprt *xprt, int result)
256 cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd; 305 cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd;
257 if (cwnd > RPC_MAXCWND(xprt)) 306 if (cwnd > RPC_MAXCWND(xprt))
258 cwnd = RPC_MAXCWND(xprt); 307 cwnd = RPC_MAXCWND(xprt);
259 __xprt_lock_write_next(xprt); 308 __xprt_lock_write_next_cong(xprt);
260 } else if (result == -ETIMEDOUT) { 309 } else if (result == -ETIMEDOUT) {
261 cwnd >>= 1; 310 cwnd >>= 1;
262 if (cwnd < RPC_CWNDSCALE) 311 if (cwnd < RPC_CWNDSCALE)
@@ -693,7 +742,7 @@ void xprt_transmit(struct rpc_task *task)
693 task->tk_status = -ENOTCONN; 742 task->tk_status = -ENOTCONN;
694 else if (!req->rq_received) 743 else if (!req->rq_received)
695 rpc_sleep_on(&xprt->pending, task, NULL, xprt_timer); 744 rpc_sleep_on(&xprt->pending, task, NULL, xprt_timer);
696 __xprt_release_write(xprt, task); 745 xprt->ops->release_xprt(xprt, task);
697 spin_unlock_bh(&xprt->transport_lock); 746 spin_unlock_bh(&xprt->transport_lock);
698 return; 747 return;
699 } 748 }
@@ -792,7 +841,7 @@ void xprt_release(struct rpc_task *task)
792 if (!(req = task->tk_rqstp)) 841 if (!(req = task->tk_rqstp))
793 return; 842 return;
794 spin_lock_bh(&xprt->transport_lock); 843 spin_lock_bh(&xprt->transport_lock);
795 __xprt_release_write(xprt, task); 844 xprt->ops->release_xprt(xprt, task);
796 __xprt_put_cong(xprt, req); 845 __xprt_put_cong(xprt, req);
797 if (!list_empty(&req->rq_list)) 846 if (!list_empty(&req->rq_list))
798 list_del(&req->rq_list); 847 list_del(&req->rq_list);
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index fc4fbe8ea346..8589c1ad55e3 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -1046,6 +1046,7 @@ static void xs_connect(struct rpc_task *task)
1046static struct rpc_xprt_ops xs_udp_ops = { 1046static struct rpc_xprt_ops xs_udp_ops = {
1047 .set_buffer_size = xs_udp_set_buffer_size, 1047 .set_buffer_size = xs_udp_set_buffer_size,
1048 .reserve_xprt = xprt_reserve_xprt_cong, 1048 .reserve_xprt = xprt_reserve_xprt_cong,
1049 .release_xprt = xprt_release_xprt_cong,
1049 .connect = xs_connect, 1050 .connect = xs_connect,
1050 .send_request = xs_udp_send_request, 1051 .send_request = xs_udp_send_request,
1051 .set_retrans_timeout = xprt_set_retrans_timeout_rtt, 1052 .set_retrans_timeout = xprt_set_retrans_timeout_rtt,
@@ -1056,6 +1057,7 @@ static struct rpc_xprt_ops xs_udp_ops = {
1056static struct rpc_xprt_ops xs_tcp_ops = { 1057static struct rpc_xprt_ops xs_tcp_ops = {
1057 .set_buffer_size = xs_tcp_set_buffer_size, 1058 .set_buffer_size = xs_tcp_set_buffer_size,
1058 .reserve_xprt = xprt_reserve_xprt, 1059 .reserve_xprt = xprt_reserve_xprt,
1060 .release_xprt = xprt_release_xprt,
1059 .connect = xs_connect, 1061 .connect = xs_connect,
1060 .send_request = xs_tcp_send_request, 1062 .send_request = xs_tcp_send_request,
1061 .set_retrans_timeout = xprt_set_retrans_timeout_def, 1063 .set_retrans_timeout = xprt_set_retrans_timeout_def,