aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2005-06-22 13:16:21 -0400
committerTrond Myklebust <Trond.Myklebust@netapp.com>2005-06-22 16:07:07 -0400
commit96651ab341cde0fee940ec837f323d711cbfa7d5 (patch)
tree83882bc6a68bb9862ec0fddc33a8309512ccc010
parenta656db998785324a818005bcf71bae6dcbbb3cf5 (diff)
[PATCH] RPC: Shrink struct rpc_task by switching to wait_on_bit()
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
-rw-r--r--include/linux/sunrpc/sched.h1
-rw-r--r--net/sunrpc/sched.c31
2 files changed, 18 insertions, 14 deletions
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index 99d17ed7cebb..4d77e90d0b30 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -31,7 +31,6 @@ struct rpc_wait_queue;
31struct rpc_wait { 31struct rpc_wait {
32 struct list_head list; /* wait queue links */ 32 struct list_head list; /* wait queue links */
33 struct list_head links; /* Links to related tasks */ 33 struct list_head links; /* Links to related tasks */
34 wait_queue_head_t waitq; /* sync: sleep on this q */
35 struct rpc_wait_queue * rpc_waitq; /* RPC wait queue we're on */ 34 struct rpc_wait_queue * rpc_waitq; /* RPC wait queue we're on */
36}; 35};
37 36
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index cc298fa4b81d..2d9eb7fbd521 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -290,7 +290,7 @@ static void rpc_make_runnable(struct rpc_task *task)
290 return; 290 return;
291 } 291 }
292 } else 292 } else
293 wake_up(&task->u.tk_wait.waitq); 293 wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED);
294} 294}
295 295
296/* 296/*
@@ -578,6 +578,14 @@ static inline int __rpc_do_exit(struct rpc_task *task)
578 return 1; 578 return 1;
579} 579}
580 580
581static int rpc_wait_bit_interruptible(void *word)
582{
583 if (signal_pending(current))
584 return -ERESTARTSYS;
585 schedule();
586 return 0;
587}
588
581/* 589/*
582 * This is the RPC `scheduler' (or rather, the finite state machine). 590 * This is the RPC `scheduler' (or rather, the finite state machine).
583 */ 591 */
@@ -648,22 +656,21 @@ static int __rpc_execute(struct rpc_task *task)
648 656
649 /* sync task: sleep here */ 657 /* sync task: sleep here */
650 dprintk("RPC: %4d sync task going to sleep\n", task->tk_pid); 658 dprintk("RPC: %4d sync task going to sleep\n", task->tk_pid);
651 if (RPC_TASK_UNINTERRUPTIBLE(task)) { 659 /* Note: Caller should be using rpc_clnt_sigmask() */
652 __wait_event(task->u.tk_wait.waitq, !RPC_IS_QUEUED(task)); 660 status = out_of_line_wait_on_bit(&task->tk_runstate,
653 } else { 661 RPC_TASK_QUEUED, rpc_wait_bit_interruptible,
654 __wait_event_interruptible(task->u.tk_wait.waitq, !RPC_IS_QUEUED(task), status); 662 TASK_INTERRUPTIBLE);
663 if (status == -ERESTARTSYS) {
655 /* 664 /*
656 * When a sync task receives a signal, it exits with 665 * When a sync task receives a signal, it exits with
657 * -ERESTARTSYS. In order to catch any callbacks that 666 * -ERESTARTSYS. In order to catch any callbacks that
658 * clean up after sleeping on some queue, we don't 667 * clean up after sleeping on some queue, we don't
659 * break the loop here, but go around once more. 668 * break the loop here, but go around once more.
660 */ 669 */
661 if (status == -ERESTARTSYS) { 670 dprintk("RPC: %4d got signal\n", task->tk_pid);
662 dprintk("RPC: %4d got signal\n", task->tk_pid); 671 task->tk_flags |= RPC_TASK_KILLED;
663 task->tk_flags |= RPC_TASK_KILLED; 672 rpc_exit(task, -ERESTARTSYS);
664 rpc_exit(task, -ERESTARTSYS); 673 rpc_wake_up_task(task);
665 rpc_wake_up_task(task);
666 }
667 } 674 }
668 rpc_set_running(task); 675 rpc_set_running(task);
669 dprintk("RPC: %4d sync task resuming\n", task->tk_pid); 676 dprintk("RPC: %4d sync task resuming\n", task->tk_pid);
@@ -766,8 +773,6 @@ void rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt, rpc_action call
766 773
767 /* Initialize workqueue for async tasks */ 774 /* Initialize workqueue for async tasks */
768 task->tk_workqueue = rpciod_workqueue; 775 task->tk_workqueue = rpciod_workqueue;
769 if (!RPC_IS_ASYNC(task))
770 init_waitqueue_head(&task->u.tk_wait.waitq);
771 776
772 if (clnt) { 777 if (clnt) {
773 atomic_inc(&clnt->cl_users); 778 atomic_inc(&clnt->cl_users);