aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc/xprt.c
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2011-07-17 16:01:03 -0400
committerTrond Myklebust <Trond.Myklebust@netapp.com>2011-07-17 16:01:03 -0400
commit43cedbf0e8dfb9c5610eb7985d5f21263e313802 (patch)
tree7758630292b6a276a3db72e63803ddc02c0a4444 /net/sunrpc/xprt.c
parentf85ef69ce08bc2209858135328335f668ba35bdb (diff)
SUNRPC: Ensure that we grab the XPRT_LOCK before calling xprt_alloc_slot
This throttles the allocation of new slots when the socket is busy reconnecting and/or is out of buffer space. Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'net/sunrpc/xprt.c')
-rw-r--r--net/sunrpc/xprt.c66
1 files changed, 42 insertions, 24 deletions
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index fbdbaf2cd58d..ccd583a46ff6 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -191,10 +191,9 @@ EXPORT_SYMBOL_GPL(xprt_load_transport);
191 * transport connects from colliding with writes. No congestion control 191 * transport connects from colliding with writes. No congestion control
192 * is provided. 192 * is provided.
193 */ 193 */
194int xprt_reserve_xprt(struct rpc_task *task) 194int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
195{ 195{
196 struct rpc_rqst *req = task->tk_rqstp; 196 struct rpc_rqst *req = task->tk_rqstp;
197 struct rpc_xprt *xprt = req->rq_xprt;
198 197
199 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) { 198 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
200 if (task == xprt->snd_task) 199 if (task == xprt->snd_task)
@@ -202,8 +201,10 @@ int xprt_reserve_xprt(struct rpc_task *task)
202 goto out_sleep; 201 goto out_sleep;
203 } 202 }
204 xprt->snd_task = task; 203 xprt->snd_task = task;
205 req->rq_bytes_sent = 0; 204 if (req != NULL) {
206 req->rq_ntrans++; 205 req->rq_bytes_sent = 0;
206 req->rq_ntrans++;
207 }
207 208
208 return 1; 209 return 1;
209 210
@@ -212,7 +213,7 @@ out_sleep:
212 task->tk_pid, xprt); 213 task->tk_pid, xprt);
213 task->tk_timeout = 0; 214 task->tk_timeout = 0;
214 task->tk_status = -EAGAIN; 215 task->tk_status = -EAGAIN;
215 if (req->rq_ntrans) 216 if (req != NULL && req->rq_ntrans)
216 rpc_sleep_on(&xprt->resend, task, NULL); 217 rpc_sleep_on(&xprt->resend, task, NULL);
217 else 218 else
218 rpc_sleep_on(&xprt->sending, task, NULL); 219 rpc_sleep_on(&xprt->sending, task, NULL);
@@ -239,9 +240,8 @@ static void xprt_clear_locked(struct rpc_xprt *xprt)
239 * integrated into the decision of whether a request is allowed to be 240 * integrated into the decision of whether a request is allowed to be
240 * woken up and given access to the transport. 241 * woken up and given access to the transport.
241 */ 242 */
242int xprt_reserve_xprt_cong(struct rpc_task *task) 243int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
243{ 244{
244 struct rpc_xprt *xprt = task->tk_xprt;
245 struct rpc_rqst *req = task->tk_rqstp; 245 struct rpc_rqst *req = task->tk_rqstp;
246 246
247 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) { 247 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
@@ -249,12 +249,14 @@ int xprt_reserve_xprt_cong(struct rpc_task *task)
249 return 1; 249 return 1;
250 goto out_sleep; 250 goto out_sleep;
251 } 251 }
252 if (req == NULL) {
253 xprt->snd_task = task;
254 return 1;
255 }
252 if (__xprt_get_cong(xprt, task)) { 256 if (__xprt_get_cong(xprt, task)) {
253 xprt->snd_task = task; 257 xprt->snd_task = task;
254 if (req) { 258 req->rq_bytes_sent = 0;
255 req->rq_bytes_sent = 0; 259 req->rq_ntrans++;
256 req->rq_ntrans++;
257 }
258 return 1; 260 return 1;
259 } 261 }
260 xprt_clear_locked(xprt); 262 xprt_clear_locked(xprt);
@@ -262,7 +264,7 @@ out_sleep:
262 dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt); 264 dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt);
263 task->tk_timeout = 0; 265 task->tk_timeout = 0;
264 task->tk_status = -EAGAIN; 266 task->tk_status = -EAGAIN;
265 if (req && req->rq_ntrans) 267 if (req != NULL && req->rq_ntrans)
266 rpc_sleep_on(&xprt->resend, task, NULL); 268 rpc_sleep_on(&xprt->resend, task, NULL);
267 else 269 else
268 rpc_sleep_on(&xprt->sending, task, NULL); 270 rpc_sleep_on(&xprt->sending, task, NULL);
@@ -275,7 +277,7 @@ static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
275 int retval; 277 int retval;
276 278
277 spin_lock_bh(&xprt->transport_lock); 279 spin_lock_bh(&xprt->transport_lock);
278 retval = xprt->ops->reserve_xprt(task); 280 retval = xprt->ops->reserve_xprt(xprt, task);
279 spin_unlock_bh(&xprt->transport_lock); 281 spin_unlock_bh(&xprt->transport_lock);
280 return retval; 282 return retval;
281} 283}
@@ -291,7 +293,7 @@ static void __xprt_lock_write_next(struct rpc_xprt *xprt)
291 task = rpc_wake_up_next(&xprt->resend); 293 task = rpc_wake_up_next(&xprt->resend);
292 if (!task) { 294 if (!task) {
293 task = rpc_wake_up_next(&xprt->sending); 295 task = rpc_wake_up_next(&xprt->sending);
294 if (!task) 296 if (task == NULL)
295 goto out_unlock; 297 goto out_unlock;
296 } 298 }
297 299
@@ -310,6 +312,7 @@ out_unlock:
310static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt) 312static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
311{ 313{
312 struct rpc_task *task; 314 struct rpc_task *task;
315 struct rpc_rqst *req;
313 316
314 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) 317 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
315 return; 318 return;
@@ -318,16 +321,19 @@ static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
318 task = rpc_wake_up_next(&xprt->resend); 321 task = rpc_wake_up_next(&xprt->resend);
319 if (!task) { 322 if (!task) {
320 task = rpc_wake_up_next(&xprt->sending); 323 task = rpc_wake_up_next(&xprt->sending);
321 if (!task) 324 if (task == NULL)
322 goto out_unlock; 325 goto out_unlock;
323 } 326 }
327
328 req = task->tk_rqstp;
329 if (req == NULL) {
330 xprt->snd_task = task;
331 return;
332 }
324 if (__xprt_get_cong(xprt, task)) { 333 if (__xprt_get_cong(xprt, task)) {
325 struct rpc_rqst *req = task->tk_rqstp;
326 xprt->snd_task = task; 334 xprt->snd_task = task;
327 if (req) { 335 req->rq_bytes_sent = 0;
328 req->rq_bytes_sent = 0; 336 req->rq_ntrans++;
329 req->rq_ntrans++;
330 }
331 return; 337 return;
332 } 338 }
333out_unlock: 339out_unlock:
@@ -852,7 +858,7 @@ int xprt_prepare_transmit(struct rpc_task *task)
852 err = req->rq_reply_bytes_recvd; 858 err = req->rq_reply_bytes_recvd;
853 goto out_unlock; 859 goto out_unlock;
854 } 860 }
855 if (!xprt->ops->reserve_xprt(task)) 861 if (!xprt->ops->reserve_xprt(xprt, task))
856 err = -EAGAIN; 862 err = -EAGAIN;
857out_unlock: 863out_unlock:
858 spin_unlock_bh(&xprt->transport_lock); 864 spin_unlock_bh(&xprt->transport_lock);
@@ -933,8 +939,6 @@ static void xprt_alloc_slot(struct rpc_task *task)
933 struct rpc_xprt *xprt = task->tk_xprt; 939 struct rpc_xprt *xprt = task->tk_xprt;
934 940
935 task->tk_status = 0; 941 task->tk_status = 0;
936 if (task->tk_rqstp)
937 return;
938 if (!list_empty(&xprt->free)) { 942 if (!list_empty(&xprt->free)) {
939 struct rpc_rqst *req = list_entry(xprt->free.next, struct rpc_rqst, rq_list); 943 struct rpc_rqst *req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
940 list_del_init(&req->rq_list); 944 list_del_init(&req->rq_list);
@@ -944,7 +948,6 @@ static void xprt_alloc_slot(struct rpc_task *task)
944 } 948 }
945 dprintk("RPC: waiting for request slot\n"); 949 dprintk("RPC: waiting for request slot\n");
946 task->tk_status = -EAGAIN; 950 task->tk_status = -EAGAIN;
947 task->tk_timeout = 0;
948 rpc_sleep_on(&xprt->backlog, task, NULL); 951 rpc_sleep_on(&xprt->backlog, task, NULL);
949} 952}
950 953
@@ -1001,10 +1004,25 @@ void xprt_reserve(struct rpc_task *task)
1001{ 1004{
1002 struct rpc_xprt *xprt = task->tk_xprt; 1005 struct rpc_xprt *xprt = task->tk_xprt;
1003 1006
1007 task->tk_status = 0;
1008 if (task->tk_rqstp != NULL)
1009 return;
1010
1011 /* Note: grabbing the xprt_lock_write() here is not strictly needed,
1012 * but ensures that we throttle new slot allocation if the transport
1013 * is congested (e.g. if reconnecting or if we're out of socket
1014 * write buffer space).
1015 */
1016 task->tk_timeout = 0;
1017 task->tk_status = -EAGAIN;
1018 if (!xprt_lock_write(xprt, task))
1019 return;
1020
1004 task->tk_status = -EIO; 1021 task->tk_status = -EIO;
1005 spin_lock(&xprt->reserve_lock); 1022 spin_lock(&xprt->reserve_lock);
1006 xprt_alloc_slot(task); 1023 xprt_alloc_slot(task);
1007 spin_unlock(&xprt->reserve_lock); 1024 spin_unlock(&xprt->reserve_lock);
1025 xprt_release_write(xprt, task);
1008} 1026}
1009 1027
1010static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt) 1028static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt)