diff options
author | Trond Myklebust <Trond.Myklebust@netapp.com> | 2005-06-22 13:16:21 -0400 |
---|---|---|
committer | Trond Myklebust <Trond.Myklebust@netapp.com> | 2005-06-22 16:07:07 -0400 |
commit | 96651ab341cde0fee940ec837f323d711cbfa7d5 (patch) | |
tree | 83882bc6a68bb9862ec0fddc33a8309512ccc010 /net | |
parent | a656db998785324a818005bcf71bae6dcbbb3cf5 (diff) |
[PATCH] RPC: Shrink struct rpc_task by switching to wait_on_bit()
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'net')
-rw-r--r-- | net/sunrpc/sched.c | 31 |
1 files changed, 18 insertions, 13 deletions
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index cc298fa4b81d..2d9eb7fbd521 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
@@ -290,7 +290,7 @@ static void rpc_make_runnable(struct rpc_task *task) | |||
290 | return; | 290 | return; |
291 | } | 291 | } |
292 | } else | 292 | } else |
293 | wake_up(&task->u.tk_wait.waitq); | 293 | wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED); |
294 | } | 294 | } |
295 | 295 | ||
296 | /* | 296 | /* |
@@ -578,6 +578,14 @@ static inline int __rpc_do_exit(struct rpc_task *task) | |||
578 | return 1; | 578 | return 1; |
579 | } | 579 | } |
580 | 580 | ||
581 | static int rpc_wait_bit_interruptible(void *word) | ||
582 | { | ||
583 | if (signal_pending(current)) | ||
584 | return -ERESTARTSYS; | ||
585 | schedule(); | ||
586 | return 0; | ||
587 | } | ||
588 | |||
581 | /* | 589 | /* |
582 | * This is the RPC `scheduler' (or rather, the finite state machine). | 590 | * This is the RPC `scheduler' (or rather, the finite state machine). |
583 | */ | 591 | */ |
@@ -648,22 +656,21 @@ static int __rpc_execute(struct rpc_task *task) | |||
648 | 656 | ||
649 | /* sync task: sleep here */ | 657 | /* sync task: sleep here */ |
650 | dprintk("RPC: %4d sync task going to sleep\n", task->tk_pid); | 658 | dprintk("RPC: %4d sync task going to sleep\n", task->tk_pid); |
651 | if (RPC_TASK_UNINTERRUPTIBLE(task)) { | 659 | /* Note: Caller should be using rpc_clnt_sigmask() */ |
652 | __wait_event(task->u.tk_wait.waitq, !RPC_IS_QUEUED(task)); | 660 | status = out_of_line_wait_on_bit(&task->tk_runstate, |
653 | } else { | 661 | RPC_TASK_QUEUED, rpc_wait_bit_interruptible, |
654 | __wait_event_interruptible(task->u.tk_wait.waitq, !RPC_IS_QUEUED(task), status); | 662 | TASK_INTERRUPTIBLE); |
663 | if (status == -ERESTARTSYS) { | ||
655 | /* | 664 | /* |
656 | * When a sync task receives a signal, it exits with | 665 | * When a sync task receives a signal, it exits with |
657 | * -ERESTARTSYS. In order to catch any callbacks that | 666 | * -ERESTARTSYS. In order to catch any callbacks that |
658 | * clean up after sleeping on some queue, we don't | 667 | * clean up after sleeping on some queue, we don't |
659 | * break the loop here, but go around once more. | 668 | * break the loop here, but go around once more. |
660 | */ | 669 | */ |
661 | if (status == -ERESTARTSYS) { | 670 | dprintk("RPC: %4d got signal\n", task->tk_pid); |
662 | dprintk("RPC: %4d got signal\n", task->tk_pid); | 671 | task->tk_flags |= RPC_TASK_KILLED; |
663 | task->tk_flags |= RPC_TASK_KILLED; | 672 | rpc_exit(task, -ERESTARTSYS); |
664 | rpc_exit(task, -ERESTARTSYS); | 673 | rpc_wake_up_task(task); |
665 | rpc_wake_up_task(task); | ||
666 | } | ||
667 | } | 674 | } |
668 | rpc_set_running(task); | 675 | rpc_set_running(task); |
669 | dprintk("RPC: %4d sync task resuming\n", task->tk_pid); | 676 | dprintk("RPC: %4d sync task resuming\n", task->tk_pid); |
@@ -766,8 +773,6 @@ void rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt, rpc_action call | |||
766 | 773 | ||
767 | /* Initialize workqueue for async tasks */ | 774 | /* Initialize workqueue for async tasks */ |
768 | task->tk_workqueue = rpciod_workqueue; | 775 | task->tk_workqueue = rpciod_workqueue; |
769 | if (!RPC_IS_ASYNC(task)) | ||
770 | init_waitqueue_head(&task->u.tk_wait.waitq); | ||
771 | 776 | ||
772 | if (clnt) { | 777 | if (clnt) { |
773 | atomic_inc(&clnt->cl_users); | 778 | atomic_inc(&clnt->cl_users); |