diff options
Diffstat (limited to 'net/sunrpc/sched.c')
-rw-r--r-- | net/sunrpc/sched.c | 84 |
1 files changed, 48 insertions, 36 deletions
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index c06614d0e31d..2d9eb7fbd521 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
@@ -290,7 +290,7 @@ static void rpc_make_runnable(struct rpc_task *task) | |||
290 | return; | 290 | return; |
291 | } | 291 | } |
292 | } else | 292 | } else |
293 | wake_up(&task->u.tk_wait.waitq); | 293 | wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED); |
294 | } | 294 | } |
295 | 295 | ||
296 | /* | 296 | /* |
@@ -555,6 +555,38 @@ __rpc_atrun(struct rpc_task *task) | |||
555 | } | 555 | } |
556 | 556 | ||
557 | /* | 557 | /* |
558 | * Helper that calls task->tk_exit if it exists and then returns | ||
559 | * true if we should exit __rpc_execute. | ||
560 | */ | ||
561 | static inline int __rpc_do_exit(struct rpc_task *task) | ||
562 | { | ||
563 | if (task->tk_exit != NULL) { | ||
564 | lock_kernel(); | ||
565 | task->tk_exit(task); | ||
566 | unlock_kernel(); | ||
567 | /* If tk_action is non-null, we should restart the call */ | ||
568 | if (task->tk_action != NULL) { | ||
569 | if (!RPC_ASSASSINATED(task)) { | ||
570 | /* Release RPC slot and buffer memory */ | ||
571 | xprt_release(task); | ||
572 | rpc_free(task); | ||
573 | return 0; | ||
574 | } | ||
575 | printk(KERN_ERR "RPC: dead task tried to walk away.\n"); | ||
576 | } | ||
577 | } | ||
578 | return 1; | ||
579 | } | ||
580 | |||
581 | static int rpc_wait_bit_interruptible(void *word) | ||
582 | { | ||
583 | if (signal_pending(current)) | ||
584 | return -ERESTARTSYS; | ||
585 | schedule(); | ||
586 | return 0; | ||
587 | } | ||
588 | |||
589 | /* | ||
558 | * This is the RPC `scheduler' (or rather, the finite state machine). | 590 | * This is the RPC `scheduler' (or rather, the finite state machine). |
559 | */ | 591 | */ |
560 | static int __rpc_execute(struct rpc_task *task) | 592 | static int __rpc_execute(struct rpc_task *task) |
@@ -566,8 +598,7 @@ static int __rpc_execute(struct rpc_task *task) | |||
566 | 598 | ||
567 | BUG_ON(RPC_IS_QUEUED(task)); | 599 | BUG_ON(RPC_IS_QUEUED(task)); |
568 | 600 | ||
569 | restarted: | 601 | for (;;) { |
570 | while (1) { | ||
571 | /* | 602 | /* |
572 | * Garbage collection of pending timers... | 603 | * Garbage collection of pending timers... |
573 | */ | 604 | */ |
@@ -600,11 +631,12 @@ static int __rpc_execute(struct rpc_task *task) | |||
600 | * by someone else. | 631 | * by someone else. |
601 | */ | 632 | */ |
602 | if (!RPC_IS_QUEUED(task)) { | 633 | if (!RPC_IS_QUEUED(task)) { |
603 | if (!task->tk_action) | 634 | if (task->tk_action != NULL) { |
635 | lock_kernel(); | ||
636 | task->tk_action(task); | ||
637 | unlock_kernel(); | ||
638 | } else if (__rpc_do_exit(task)) | ||
604 | break; | 639 | break; |
605 | lock_kernel(); | ||
606 | task->tk_action(task); | ||
607 | unlock_kernel(); | ||
608 | } | 640 | } |
609 | 641 | ||
610 | /* | 642 | /* |
@@ -624,44 +656,26 @@ static int __rpc_execute(struct rpc_task *task) | |||
624 | 656 | ||
625 | /* sync task: sleep here */ | 657 | /* sync task: sleep here */ |
626 | dprintk("RPC: %4d sync task going to sleep\n", task->tk_pid); | 658 | dprintk("RPC: %4d sync task going to sleep\n", task->tk_pid); |
627 | if (RPC_TASK_UNINTERRUPTIBLE(task)) { | 659 | /* Note: Caller should be using rpc_clnt_sigmask() */ |
628 | __wait_event(task->u.tk_wait.waitq, !RPC_IS_QUEUED(task)); | 660 | status = out_of_line_wait_on_bit(&task->tk_runstate, |
629 | } else { | 661 | RPC_TASK_QUEUED, rpc_wait_bit_interruptible, |
630 | __wait_event_interruptible(task->u.tk_wait.waitq, !RPC_IS_QUEUED(task), status); | 662 | TASK_INTERRUPTIBLE); |
663 | if (status == -ERESTARTSYS) { | ||
631 | /* | 664 | /* |
632 | * When a sync task receives a signal, it exits with | 665 | * When a sync task receives a signal, it exits with |
633 | * -ERESTARTSYS. In order to catch any callbacks that | 666 | * -ERESTARTSYS. In order to catch any callbacks that |
634 | * clean up after sleeping on some queue, we don't | 667 | * clean up after sleeping on some queue, we don't |
635 | * break the loop here, but go around once more. | 668 | * break the loop here, but go around once more. |
636 | */ | 669 | */ |
637 | if (status == -ERESTARTSYS) { | 670 | dprintk("RPC: %4d got signal\n", task->tk_pid); |
638 | dprintk("RPC: %4d got signal\n", task->tk_pid); | 671 | task->tk_flags |= RPC_TASK_KILLED; |
639 | task->tk_flags |= RPC_TASK_KILLED; | 672 | rpc_exit(task, -ERESTARTSYS); |
640 | rpc_exit(task, -ERESTARTSYS); | 673 | rpc_wake_up_task(task); |
641 | rpc_wake_up_task(task); | ||
642 | } | ||
643 | } | 674 | } |
644 | rpc_set_running(task); | 675 | rpc_set_running(task); |
645 | dprintk("RPC: %4d sync task resuming\n", task->tk_pid); | 676 | dprintk("RPC: %4d sync task resuming\n", task->tk_pid); |
646 | } | 677 | } |
647 | 678 | ||
648 | if (task->tk_exit) { | ||
649 | lock_kernel(); | ||
650 | task->tk_exit(task); | ||
651 | unlock_kernel(); | ||
652 | /* If tk_action is non-null, the user wants us to restart */ | ||
653 | if (task->tk_action) { | ||
654 | if (!RPC_ASSASSINATED(task)) { | ||
655 | /* Release RPC slot and buffer memory */ | ||
656 | if (task->tk_rqstp) | ||
657 | xprt_release(task); | ||
658 | rpc_free(task); | ||
659 | goto restarted; | ||
660 | } | ||
661 | printk(KERN_ERR "RPC: dead task tries to walk away.\n"); | ||
662 | } | ||
663 | } | ||
664 | |||
665 | dprintk("RPC: %4d exit() = %d\n", task->tk_pid, task->tk_status); | 679 | dprintk("RPC: %4d exit() = %d\n", task->tk_pid, task->tk_status); |
666 | status = task->tk_status; | 680 | status = task->tk_status; |
667 | 681 | ||
@@ -759,8 +773,6 @@ void rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt, rpc_action call | |||
759 | 773 | ||
760 | /* Initialize workqueue for async tasks */ | 774 | /* Initialize workqueue for async tasks */ |
761 | task->tk_workqueue = rpciod_workqueue; | 775 | task->tk_workqueue = rpciod_workqueue; |
762 | if (!RPC_IS_ASYNC(task)) | ||
763 | init_waitqueue_head(&task->u.tk_wait.waitq); | ||
764 | 776 | ||
765 | if (clnt) { | 777 | if (clnt) { |
766 | atomic_inc(&clnt->cl_users); | 778 | atomic_inc(&clnt->cl_users); |