aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2008-02-22 16:34:17 -0500
committerTrond Myklebust <Trond.Myklebust@netapp.com>2008-02-26 00:40:44 -0500
commit5d00837b90340af9106dcd93af75fd664c8eb87f (patch)
treef537dc84421cf150d66b630e56ea8107078c07a8
parentfda1393938035559b417dd5b26b9cc293a7aee00 (diff)
SUNRPC: Run rpc timeout functions as callbacks instead of in softirqs
An audit of the current RPC timeout functions shows that they don't really ever need to run in the softirq context. As long as the softirq is able to signal that the wakeup is due to a timeout (which it can do by setting task->tk_status to -ETIMEDOUT) then the callback functions can just run as standard task->tk_callback functions (in the rpciod/process context). The only possible border-line case would be xprt_timer() for the case of UDP, when the callback is used to reduce the size of the transport congestion window. In testing, however, the effect of moving that update to a callback would appear to be minor. Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
-rw-r--r--fs/nfs/nfs4proc.c2
-rw-r--r--fs/nfs/nfs4state.c2
-rw-r--r--include/linux/sunrpc/sched.h4
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c4
-rw-r--r--net/sunrpc/rpcb_clnt.c2
-rw-r--r--net/sunrpc/sched.c50
-rw-r--r--net/sunrpc/xprt.c28
7 files changed, 36 insertions, 56 deletions
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 54743396b66..bbb0d58ee6a 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2765,7 +2765,7 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server)
2765 case -NFS4ERR_STALE_CLIENTID: 2765 case -NFS4ERR_STALE_CLIENTID:
2766 case -NFS4ERR_STALE_STATEID: 2766 case -NFS4ERR_STALE_STATEID:
2767 case -NFS4ERR_EXPIRED: 2767 case -NFS4ERR_EXPIRED:
2768 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL, NULL); 2768 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
2769 nfs4_schedule_state_recovery(clp); 2769 nfs4_schedule_state_recovery(clp);
2770 if (test_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state) == 0) 2770 if (test_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state) == 0)
2771 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task); 2771 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index b962397004c..a2ef02824aa 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -731,7 +731,7 @@ int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task)
731 list_add_tail(&seqid->list, &sequence->list); 731 list_add_tail(&seqid->list, &sequence->list);
732 if (list_first_entry(&sequence->list, struct nfs_seqid, list) == seqid) 732 if (list_first_entry(&sequence->list, struct nfs_seqid, list) == seqid)
733 goto unlock; 733 goto unlock;
734 rpc_sleep_on(&sequence->wait, task, NULL, NULL); 734 rpc_sleep_on(&sequence->wait, task, NULL);
735 status = -EAGAIN; 735 status = -EAGAIN;
736unlock: 736unlock:
737 spin_unlock(&sequence->lock); 737 spin_unlock(&sequence->lock);
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index 7963ef0ffb8..503a937bdca 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -56,12 +56,10 @@ struct rpc_task {
56 __u8 tk_cred_retry; 56 __u8 tk_cred_retry;
57 57
58 /* 58 /*
59 * timeout_fn to be executed by timer bottom half
60 * callback to be executed after waking up 59 * callback to be executed after waking up
61 * action next procedure for async tasks 60 * action next procedure for async tasks
62 * tk_ops caller callbacks 61 * tk_ops caller callbacks
63 */ 62 */
64 void (*tk_timeout_fn)(struct rpc_task *);
65 void (*tk_callback)(struct rpc_task *); 63 void (*tk_callback)(struct rpc_task *);
66 void (*tk_action)(struct rpc_task *); 64 void (*tk_action)(struct rpc_task *);
67 const struct rpc_call_ops *tk_ops; 65 const struct rpc_call_ops *tk_ops;
@@ -231,7 +229,7 @@ void rpc_execute(struct rpc_task *);
231void rpc_init_priority_wait_queue(struct rpc_wait_queue *, const char *); 229void rpc_init_priority_wait_queue(struct rpc_wait_queue *, const char *);
232void rpc_init_wait_queue(struct rpc_wait_queue *, const char *); 230void rpc_init_wait_queue(struct rpc_wait_queue *, const char *);
233void rpc_sleep_on(struct rpc_wait_queue *, struct rpc_task *, 231void rpc_sleep_on(struct rpc_wait_queue *, struct rpc_task *,
234 rpc_action action, rpc_action timer); 232 rpc_action action);
235void rpc_wake_up_queued_task(struct rpc_wait_queue *, 233void rpc_wake_up_queued_task(struct rpc_wait_queue *,
236 struct rpc_task *); 234 struct rpc_task *);
237void rpc_wake_up(struct rpc_wait_queue *); 235void rpc_wake_up(struct rpc_wait_queue *);
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 6dac3879228..dc6391bcda1 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -408,13 +408,13 @@ gss_refresh_upcall(struct rpc_task *task)
408 } 408 }
409 spin_lock(&inode->i_lock); 409 spin_lock(&inode->i_lock);
410 if (gss_cred->gc_upcall != NULL) 410 if (gss_cred->gc_upcall != NULL)
411 rpc_sleep_on(&gss_cred->gc_upcall->rpc_waitqueue, task, NULL, NULL); 411 rpc_sleep_on(&gss_cred->gc_upcall->rpc_waitqueue, task, NULL);
412 else if (gss_msg->ctx == NULL && gss_msg->msg.errno >= 0) { 412 else if (gss_msg->ctx == NULL && gss_msg->msg.errno >= 0) {
413 task->tk_timeout = 0; 413 task->tk_timeout = 0;
414 gss_cred->gc_upcall = gss_msg; 414 gss_cred->gc_upcall = gss_msg;
415 /* gss_upcall_callback will release the reference to gss_upcall_msg */ 415 /* gss_upcall_callback will release the reference to gss_upcall_msg */
416 atomic_inc(&gss_msg->count); 416 atomic_inc(&gss_msg->count);
417 rpc_sleep_on(&gss_msg->rpc_waitqueue, task, gss_upcall_callback, NULL); 417 rpc_sleep_on(&gss_msg->rpc_waitqueue, task, gss_upcall_callback);
418 } else 418 } else
419 err = gss_msg->msg.errno; 419 err = gss_msg->msg.errno;
420 spin_unlock(&inode->i_lock); 420 spin_unlock(&inode->i_lock);
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index 3164a0871cf..f480c718b40 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -298,7 +298,7 @@ void rpcb_getport_async(struct rpc_task *task)
298 298
299 /* Put self on queue before sending rpcbind request, in case 299 /* Put self on queue before sending rpcbind request, in case
300 * rpcb_getport_done completes before we return from rpc_run_task */ 300 * rpcb_getport_done completes before we return from rpc_run_task */
301 rpc_sleep_on(&xprt->binding, task, NULL, NULL); 301 rpc_sleep_on(&xprt->binding, task, NULL);
302 302
303 /* Someone else may have bound if we slept */ 303 /* Someone else may have bound if we slept */
304 if (xprt_bound(xprt)) { 304 if (xprt_bound(xprt)) {
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 35acdc39bfc..caf12fd6b6a 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -56,29 +56,18 @@ struct workqueue_struct *rpciod_workqueue;
56 * queue->lock and bh_disabled in order to avoid races within 56 * queue->lock and bh_disabled in order to avoid races within
57 * rpc_run_timer(). 57 * rpc_run_timer().
58 */ 58 */
59static inline void 59static void
60__rpc_disable_timer(struct rpc_task *task) 60__rpc_disable_timer(struct rpc_task *task)
61{ 61{
62 dprintk("RPC: %5u disabling timer\n", task->tk_pid); 62 dprintk("RPC: %5u disabling timer\n", task->tk_pid);
63 task->tk_timeout_fn = NULL;
64 task->tk_timeout = 0; 63 task->tk_timeout = 0;
65} 64}
66 65
67/* 66/*
68 * Default timeout handler if none specified by user
69 */
70static void
71__rpc_default_timer(struct rpc_task *task)
72{
73 dprintk("RPC: %5u timeout (default timer)\n", task->tk_pid);
74 task->tk_status = -ETIMEDOUT;
75}
76
77/*
78 * Set up a timer for the current task. 67 * Set up a timer for the current task.
79 */ 68 */
80static inline void 69static void
81__rpc_add_timer(struct rpc_task *task, rpc_action timer) 70__rpc_add_timer(struct rpc_task *task)
82{ 71{
83 if (!task->tk_timeout) 72 if (!task->tk_timeout)
84 return; 73 return;
@@ -86,10 +75,6 @@ __rpc_add_timer(struct rpc_task *task, rpc_action timer)
86 dprintk("RPC: %5u setting alarm for %lu ms\n", 75 dprintk("RPC: %5u setting alarm for %lu ms\n",
87 task->tk_pid, task->tk_timeout * 1000 / HZ); 76 task->tk_pid, task->tk_timeout * 1000 / HZ);
88 77
89 if (timer)
90 task->tk_timeout_fn = timer;
91 else
92 task->tk_timeout_fn = __rpc_default_timer;
93 set_bit(RPC_TASK_HAS_TIMER, &task->tk_runstate); 78 set_bit(RPC_TASK_HAS_TIMER, &task->tk_runstate);
94 mod_timer(&task->tk_timer, jiffies + task->tk_timeout); 79 mod_timer(&task->tk_timer, jiffies + task->tk_timeout);
95} 80}
@@ -297,7 +282,6 @@ EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
297 */ 282 */
298static void rpc_make_runnable(struct rpc_task *task) 283static void rpc_make_runnable(struct rpc_task *task)
299{ 284{
300 BUG_ON(task->tk_timeout_fn);
301 rpc_clear_queued(task); 285 rpc_clear_queued(task);
302 if (rpc_test_and_set_running(task)) 286 if (rpc_test_and_set_running(task))
303 return; 287 return;
@@ -327,7 +311,7 @@ static void rpc_make_runnable(struct rpc_task *task)
327 * as it's on a wait queue. 311 * as it's on a wait queue.
328 */ 312 */
329static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, 313static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
330 rpc_action action, rpc_action timer) 314 rpc_action action)
331{ 315{
332 dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n", 316 dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n",
333 task->tk_pid, rpc_qname(q), jiffies); 317 task->tk_pid, rpc_qname(q), jiffies);
@@ -341,11 +325,11 @@ static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
341 325
342 BUG_ON(task->tk_callback != NULL); 326 BUG_ON(task->tk_callback != NULL);
343 task->tk_callback = action; 327 task->tk_callback = action;
344 __rpc_add_timer(task, timer); 328 __rpc_add_timer(task);
345} 329}
346 330
347void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, 331void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
348 rpc_action action, rpc_action timer) 332 rpc_action action)
349{ 333{
350 /* Mark the task as being activated if so needed */ 334 /* Mark the task as being activated if so needed */
351 rpc_set_active(task); 335 rpc_set_active(task);
@@ -354,7 +338,7 @@ void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
354 * Protect the queue operations. 338 * Protect the queue operations.
355 */ 339 */
356 spin_lock_bh(&q->lock); 340 spin_lock_bh(&q->lock);
357 __rpc_sleep_on(q, task, action, timer); 341 __rpc_sleep_on(q, task, action);
358 spin_unlock_bh(&q->lock); 342 spin_unlock_bh(&q->lock);
359} 343}
360EXPORT_SYMBOL_GPL(rpc_sleep_on); 344EXPORT_SYMBOL_GPL(rpc_sleep_on);
@@ -559,20 +543,15 @@ EXPORT_SYMBOL_GPL(rpc_wake_up_status);
559static void rpc_run_timer(unsigned long ptr) 543static void rpc_run_timer(unsigned long ptr)
560{ 544{
561 struct rpc_task *task = (struct rpc_task *)ptr; 545 struct rpc_task *task = (struct rpc_task *)ptr;
562 void (*callback)(struct rpc_task *); 546 struct rpc_wait_queue *queue = task->tk_waitqueue;
563 547
564 if (RPC_IS_QUEUED(task)) { 548 spin_lock(&queue->lock);
565 struct rpc_wait_queue *queue = task->tk_waitqueue; 549 if (RPC_IS_QUEUED(task) && task->tk_waitqueue == queue) {
566 callback = task->tk_timeout_fn; 550 dprintk("RPC: %5u timeout\n", task->tk_pid);
567 551 task->tk_status = -ETIMEDOUT;
568 dprintk("RPC: %5u running timer\n", task->tk_pid);
569 if (callback != NULL)
570 callback(task);
571 /* Note: we're already in a bh-safe context */
572 spin_lock(&queue->lock);
573 rpc_wake_up_task_queue_locked(queue, task); 552 rpc_wake_up_task_queue_locked(queue, task);
574 spin_unlock(&queue->lock);
575 } 553 }
554 spin_unlock(&queue->lock);
576 smp_mb__before_clear_bit(); 555 smp_mb__before_clear_bit();
577 clear_bit(RPC_TASK_HAS_TIMER, &task->tk_runstate); 556 clear_bit(RPC_TASK_HAS_TIMER, &task->tk_runstate);
578 smp_mb__after_clear_bit(); 557 smp_mb__after_clear_bit();
@@ -580,6 +559,7 @@ static void rpc_run_timer(unsigned long ptr)
580 559
581static void __rpc_atrun(struct rpc_task *task) 560static void __rpc_atrun(struct rpc_task *task)
582{ 561{
562 task->tk_status = 0;
583} 563}
584 564
585/* 565/*
@@ -588,7 +568,7 @@ static void __rpc_atrun(struct rpc_task *task)
588void rpc_delay(struct rpc_task *task, unsigned long delay) 568void rpc_delay(struct rpc_task *task, unsigned long delay)
589{ 569{
590 task->tk_timeout = delay; 570 task->tk_timeout = delay;
591 rpc_sleep_on(&delay_queue, task, NULL, __rpc_atrun); 571 rpc_sleep_on(&delay_queue, task, __rpc_atrun);
592} 572}
593EXPORT_SYMBOL_GPL(rpc_delay); 573EXPORT_SYMBOL_GPL(rpc_delay);
594 574
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 6e2772217e5..9bf118c5431 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -188,9 +188,9 @@ out_sleep:
188 task->tk_timeout = 0; 188 task->tk_timeout = 0;
189 task->tk_status = -EAGAIN; 189 task->tk_status = -EAGAIN;
190 if (req && req->rq_ntrans) 190 if (req && req->rq_ntrans)
191 rpc_sleep_on(&xprt->resend, task, NULL, NULL); 191 rpc_sleep_on(&xprt->resend, task, NULL);
192 else 192 else
193 rpc_sleep_on(&xprt->sending, task, NULL, NULL); 193 rpc_sleep_on(&xprt->sending, task, NULL);
194 return 0; 194 return 0;
195} 195}
196EXPORT_SYMBOL_GPL(xprt_reserve_xprt); 196EXPORT_SYMBOL_GPL(xprt_reserve_xprt);
@@ -238,9 +238,9 @@ out_sleep:
238 task->tk_timeout = 0; 238 task->tk_timeout = 0;
239 task->tk_status = -EAGAIN; 239 task->tk_status = -EAGAIN;
240 if (req && req->rq_ntrans) 240 if (req && req->rq_ntrans)
241 rpc_sleep_on(&xprt->resend, task, NULL, NULL); 241 rpc_sleep_on(&xprt->resend, task, NULL);
242 else 242 else
243 rpc_sleep_on(&xprt->sending, task, NULL, NULL); 243 rpc_sleep_on(&xprt->sending, task, NULL);
244 return 0; 244 return 0;
245} 245}
246EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong); 246EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong);
@@ -453,7 +453,7 @@ void xprt_wait_for_buffer_space(struct rpc_task *task)
453 struct rpc_xprt *xprt = req->rq_xprt; 453 struct rpc_xprt *xprt = req->rq_xprt;
454 454
455 task->tk_timeout = req->rq_timeout; 455 task->tk_timeout = req->rq_timeout;
456 rpc_sleep_on(&xprt->pending, task, NULL, NULL); 456 rpc_sleep_on(&xprt->pending, task, NULL);
457} 457}
458EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space); 458EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space);
459 459
@@ -652,7 +652,7 @@ void xprt_connect(struct rpc_task *task)
652 task->tk_rqstp->rq_bytes_sent = 0; 652 task->tk_rqstp->rq_bytes_sent = 0;
653 653
654 task->tk_timeout = xprt->connect_timeout; 654 task->tk_timeout = xprt->connect_timeout;
655 rpc_sleep_on(&xprt->pending, task, xprt_connect_status, NULL); 655 rpc_sleep_on(&xprt->pending, task, xprt_connect_status);
656 xprt->stat.connect_start = jiffies; 656 xprt->stat.connect_start = jiffies;
657 xprt->ops->connect(task); 657 xprt->ops->connect(task);
658 } 658 }
@@ -769,15 +769,17 @@ static void xprt_timer(struct rpc_task *task)
769 struct rpc_rqst *req = task->tk_rqstp; 769 struct rpc_rqst *req = task->tk_rqstp;
770 struct rpc_xprt *xprt = req->rq_xprt; 770 struct rpc_xprt *xprt = req->rq_xprt;
771 771
772 if (task->tk_status != -ETIMEDOUT)
773 return;
772 dprintk("RPC: %5u xprt_timer\n", task->tk_pid); 774 dprintk("RPC: %5u xprt_timer\n", task->tk_pid);
773 775
774 spin_lock(&xprt->transport_lock); 776 spin_lock_bh(&xprt->transport_lock);
775 if (!req->rq_received) { 777 if (!req->rq_received) {
776 if (xprt->ops->timer) 778 if (xprt->ops->timer)
777 xprt->ops->timer(task); 779 xprt->ops->timer(task);
778 task->tk_status = -ETIMEDOUT; 780 } else
779 } 781 task->tk_status = 0;
780 spin_unlock(&xprt->transport_lock); 782 spin_unlock_bh(&xprt->transport_lock);
781} 783}
782 784
783/** 785/**
@@ -862,7 +864,7 @@ void xprt_transmit(struct rpc_task *task)
862 if (!xprt_connected(xprt)) 864 if (!xprt_connected(xprt))
863 task->tk_status = -ENOTCONN; 865 task->tk_status = -ENOTCONN;
864 else if (!req->rq_received) 866 else if (!req->rq_received)
865 rpc_sleep_on(&xprt->pending, task, NULL, xprt_timer); 867 rpc_sleep_on(&xprt->pending, task, xprt_timer);
866 spin_unlock_bh(&xprt->transport_lock); 868 spin_unlock_bh(&xprt->transport_lock);
867 return; 869 return;
868 } 870 }
@@ -873,7 +875,7 @@ void xprt_transmit(struct rpc_task *task)
873 */ 875 */
874 task->tk_status = status; 876 task->tk_status = status;
875 if (status == -ECONNREFUSED) 877 if (status == -ECONNREFUSED)
876 rpc_sleep_on(&xprt->sending, task, NULL, NULL); 878 rpc_sleep_on(&xprt->sending, task, NULL);
877} 879}
878 880
879static inline void do_xprt_reserve(struct rpc_task *task) 881static inline void do_xprt_reserve(struct rpc_task *task)
@@ -893,7 +895,7 @@ static inline void do_xprt_reserve(struct rpc_task *task)
893 dprintk("RPC: waiting for request slot\n"); 895 dprintk("RPC: waiting for request slot\n");
894 task->tk_status = -EAGAIN; 896 task->tk_status = -EAGAIN;
895 task->tk_timeout = 0; 897 task->tk_timeout = 0;
896 rpc_sleep_on(&xprt->backlog, task, NULL, NULL); 898 rpc_sleep_on(&xprt->backlog, task, NULL);
897} 899}
898 900
899/** 901/**