diff options
Diffstat (limited to 'net/sunrpc/sched.c')
| -rw-r--r-- | net/sunrpc/sched.c | 130 |
1 files changed, 28 insertions, 102 deletions
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index aae6907fd546..cace6049e4a5 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
| @@ -25,7 +25,6 @@ | |||
| 25 | 25 | ||
| 26 | #ifdef RPC_DEBUG | 26 | #ifdef RPC_DEBUG |
| 27 | #define RPCDBG_FACILITY RPCDBG_SCHED | 27 | #define RPCDBG_FACILITY RPCDBG_SCHED |
| 28 | #define RPC_TASK_MAGIC_ID 0xf00baa | ||
| 29 | #endif | 28 | #endif |
| 30 | 29 | ||
| 31 | /* | 30 | /* |
| @@ -237,7 +236,6 @@ static void rpc_task_set_debuginfo(struct rpc_task *task) | |||
| 237 | { | 236 | { |
| 238 | static atomic_t rpc_pid; | 237 | static atomic_t rpc_pid; |
| 239 | 238 | ||
| 240 | task->tk_magic = RPC_TASK_MAGIC_ID; | ||
| 241 | task->tk_pid = atomic_inc_return(&rpc_pid); | 239 | task->tk_pid = atomic_inc_return(&rpc_pid); |
| 242 | } | 240 | } |
| 243 | #else | 241 | #else |
| @@ -248,17 +246,8 @@ static inline void rpc_task_set_debuginfo(struct rpc_task *task) | |||
| 248 | 246 | ||
| 249 | static void rpc_set_active(struct rpc_task *task) | 247 | static void rpc_set_active(struct rpc_task *task) |
| 250 | { | 248 | { |
| 251 | struct rpc_clnt *clnt; | ||
| 252 | if (test_and_set_bit(RPC_TASK_ACTIVE, &task->tk_runstate) != 0) | ||
| 253 | return; | ||
| 254 | rpc_task_set_debuginfo(task); | 249 | rpc_task_set_debuginfo(task); |
| 255 | /* Add to global list of all tasks */ | 250 | set_bit(RPC_TASK_ACTIVE, &task->tk_runstate); |
| 256 | clnt = task->tk_client; | ||
| 257 | if (clnt != NULL) { | ||
| 258 | spin_lock(&clnt->cl_lock); | ||
| 259 | list_add_tail(&task->tk_task, &clnt->cl_tasks); | ||
| 260 | spin_unlock(&clnt->cl_lock); | ||
| 261 | } | ||
| 262 | } | 251 | } |
| 263 | 252 | ||
| 264 | /* | 253 | /* |
| @@ -321,11 +310,6 @@ static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, | |||
| 321 | dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n", | 310 | dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n", |
| 322 | task->tk_pid, rpc_qname(q), jiffies); | 311 | task->tk_pid, rpc_qname(q), jiffies); |
| 323 | 312 | ||
| 324 | if (!RPC_IS_ASYNC(task) && !RPC_IS_ACTIVATED(task)) { | ||
| 325 | printk(KERN_ERR "RPC: Inactive synchronous task put to sleep!\n"); | ||
| 326 | return; | ||
| 327 | } | ||
| 328 | |||
| 329 | __rpc_add_wait_queue(q, task); | 313 | __rpc_add_wait_queue(q, task); |
| 330 | 314 | ||
| 331 | BUG_ON(task->tk_callback != NULL); | 315 | BUG_ON(task->tk_callback != NULL); |
| @@ -336,8 +320,8 @@ static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, | |||
| 336 | void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, | 320 | void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, |
| 337 | rpc_action action) | 321 | rpc_action action) |
| 338 | { | 322 | { |
| 339 | /* Mark the task as being activated if so needed */ | 323 | /* We shouldn't ever put an inactive task to sleep */ |
| 340 | rpc_set_active(task); | 324 | BUG_ON(!RPC_IS_ACTIVATED(task)); |
| 341 | 325 | ||
| 342 | /* | 326 | /* |
| 343 | * Protect the queue operations. | 327 | * Protect the queue operations. |
| @@ -360,9 +344,6 @@ static void __rpc_do_wake_up_task(struct rpc_wait_queue *queue, struct rpc_task | |||
| 360 | dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n", | 344 | dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n", |
| 361 | task->tk_pid, jiffies); | 345 | task->tk_pid, jiffies); |
| 362 | 346 | ||
| 363 | #ifdef RPC_DEBUG | ||
| 364 | BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID); | ||
| 365 | #endif | ||
| 366 | /* Has the task been executed yet? If not, we cannot wake it up! */ | 347 | /* Has the task been executed yet? If not, we cannot wake it up! */ |
| 367 | if (!RPC_IS_ACTIVATED(task)) { | 348 | if (!RPC_IS_ACTIVATED(task)) { |
| 368 | printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task); | 349 | printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task); |
| @@ -411,14 +392,6 @@ void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task | |||
| 411 | EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task); | 392 | EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task); |
| 412 | 393 | ||
| 413 | /* | 394 | /* |
| 414 | * Wake up the specified task | ||
| 415 | */ | ||
| 416 | static void rpc_wake_up_task(struct rpc_task *task) | ||
| 417 | { | ||
| 418 | rpc_wake_up_queued_task(task->tk_waitqueue, task); | ||
| 419 | } | ||
| 420 | |||
| 421 | /* | ||
| 422 | * Wake up the next task on a priority queue. | 395 | * Wake up the next task on a priority queue. |
| 423 | */ | 396 | */ |
| 424 | static struct rpc_task * __rpc_wake_up_next_priority(struct rpc_wait_queue *queue) | 397 | static struct rpc_task * __rpc_wake_up_next_priority(struct rpc_wait_queue *queue) |
| @@ -605,7 +578,15 @@ void rpc_exit_task(struct rpc_task *task) | |||
| 605 | } | 578 | } |
| 606 | } | 579 | } |
| 607 | } | 580 | } |
| 608 | EXPORT_SYMBOL_GPL(rpc_exit_task); | 581 | |
| 582 | void rpc_exit(struct rpc_task *task, int status) | ||
| 583 | { | ||
| 584 | task->tk_status = status; | ||
| 585 | task->tk_action = rpc_exit_task; | ||
| 586 | if (RPC_IS_QUEUED(task)) | ||
| 587 | rpc_wake_up_queued_task(task->tk_waitqueue, task); | ||
| 588 | } | ||
| 589 | EXPORT_SYMBOL_GPL(rpc_exit); | ||
| 609 | 590 | ||
| 610 | void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata) | 591 | void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata) |
| 611 | { | 592 | { |
| @@ -695,7 +676,6 @@ static void __rpc_execute(struct rpc_task *task) | |||
| 695 | dprintk("RPC: %5u got signal\n", task->tk_pid); | 676 | dprintk("RPC: %5u got signal\n", task->tk_pid); |
| 696 | task->tk_flags |= RPC_TASK_KILLED; | 677 | task->tk_flags |= RPC_TASK_KILLED; |
| 697 | rpc_exit(task, -ERESTARTSYS); | 678 | rpc_exit(task, -ERESTARTSYS); |
| 698 | rpc_wake_up_task(task); | ||
| 699 | } | 679 | } |
| 700 | rpc_set_running(task); | 680 | rpc_set_running(task); |
| 701 | dprintk("RPC: %5u sync task resuming\n", task->tk_pid); | 681 | dprintk("RPC: %5u sync task resuming\n", task->tk_pid); |
| @@ -719,8 +699,9 @@ static void __rpc_execute(struct rpc_task *task) | |||
| 719 | void rpc_execute(struct rpc_task *task) | 699 | void rpc_execute(struct rpc_task *task) |
| 720 | { | 700 | { |
| 721 | rpc_set_active(task); | 701 | rpc_set_active(task); |
| 722 | rpc_set_running(task); | 702 | rpc_make_runnable(task); |
| 723 | __rpc_execute(task); | 703 | if (!RPC_IS_ASYNC(task)) |
| 704 | __rpc_execute(task); | ||
| 724 | } | 705 | } |
| 725 | 706 | ||
| 726 | static void rpc_async_schedule(struct work_struct *work) | 707 | static void rpc_async_schedule(struct work_struct *work) |
| @@ -813,28 +794,11 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta | |||
| 813 | /* Initialize workqueue for async tasks */ | 794 | /* Initialize workqueue for async tasks */ |
| 814 | task->tk_workqueue = task_setup_data->workqueue; | 795 | task->tk_workqueue = task_setup_data->workqueue; |
| 815 | 796 | ||
| 816 | task->tk_client = task_setup_data->rpc_client; | ||
| 817 | if (task->tk_client != NULL) { | ||
| 818 | kref_get(&task->tk_client->cl_kref); | ||
| 819 | if (task->tk_client->cl_softrtry) | ||
| 820 | task->tk_flags |= RPC_TASK_SOFT; | ||
| 821 | } | ||
| 822 | |||
| 823 | if (task->tk_ops->rpc_call_prepare != NULL) | 797 | if (task->tk_ops->rpc_call_prepare != NULL) |
| 824 | task->tk_action = rpc_prepare_task; | 798 | task->tk_action = rpc_prepare_task; |
| 825 | 799 | ||
| 826 | if (task_setup_data->rpc_message != NULL) { | ||
| 827 | task->tk_msg.rpc_proc = task_setup_data->rpc_message->rpc_proc; | ||
| 828 | task->tk_msg.rpc_argp = task_setup_data->rpc_message->rpc_argp; | ||
| 829 | task->tk_msg.rpc_resp = task_setup_data->rpc_message->rpc_resp; | ||
| 830 | /* Bind the user cred */ | ||
| 831 | rpcauth_bindcred(task, task_setup_data->rpc_message->rpc_cred, task_setup_data->flags); | ||
| 832 | if (task->tk_action == NULL) | ||
| 833 | rpc_call_start(task); | ||
| 834 | } | ||
| 835 | |||
| 836 | /* starting timestamp */ | 800 | /* starting timestamp */ |
| 837 | task->tk_start = jiffies; | 801 | task->tk_start = ktime_get(); |
| 838 | 802 | ||
| 839 | dprintk("RPC: new task initialized, procpid %u\n", | 803 | dprintk("RPC: new task initialized, procpid %u\n", |
| 840 | task_pid_nr(current)); | 804 | task_pid_nr(current)); |
| @@ -856,16 +820,23 @@ struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data) | |||
| 856 | 820 | ||
| 857 | if (task == NULL) { | 821 | if (task == NULL) { |
| 858 | task = rpc_alloc_task(); | 822 | task = rpc_alloc_task(); |
| 859 | if (task == NULL) | 823 | if (task == NULL) { |
| 860 | goto out; | 824 | rpc_release_calldata(setup_data->callback_ops, |
| 825 | setup_data->callback_data); | ||
| 826 | return ERR_PTR(-ENOMEM); | ||
| 827 | } | ||
| 861 | flags = RPC_TASK_DYNAMIC; | 828 | flags = RPC_TASK_DYNAMIC; |
| 862 | } | 829 | } |
| 863 | 830 | ||
| 864 | rpc_init_task(task, setup_data); | 831 | rpc_init_task(task, setup_data); |
| 832 | if (task->tk_status < 0) { | ||
| 833 | int err = task->tk_status; | ||
| 834 | rpc_put_task(task); | ||
| 835 | return ERR_PTR(err); | ||
| 836 | } | ||
| 865 | 837 | ||
| 866 | task->tk_flags |= flags; | 838 | task->tk_flags |= flags; |
| 867 | dprintk("RPC: allocated task %p\n", task); | 839 | dprintk("RPC: allocated task %p\n", task); |
| 868 | out: | ||
| 869 | return task; | 840 | return task; |
| 870 | } | 841 | } |
| 871 | 842 | ||
| @@ -894,11 +865,8 @@ void rpc_put_task(struct rpc_task *task) | |||
| 894 | if (task->tk_rqstp) | 865 | if (task->tk_rqstp) |
| 895 | xprt_release(task); | 866 | xprt_release(task); |
| 896 | if (task->tk_msg.rpc_cred) | 867 | if (task->tk_msg.rpc_cred) |
| 897 | rpcauth_unbindcred(task); | 868 | put_rpccred(task->tk_msg.rpc_cred); |
| 898 | if (task->tk_client) { | 869 | rpc_task_release_client(task); |
| 899 | rpc_release_client(task->tk_client); | ||
| 900 | task->tk_client = NULL; | ||
| 901 | } | ||
| 902 | if (task->tk_workqueue != NULL) { | 870 | if (task->tk_workqueue != NULL) { |
| 903 | INIT_WORK(&task->u.tk_work, rpc_async_release); | 871 | INIT_WORK(&task->u.tk_work, rpc_async_release); |
| 904 | queue_work(task->tk_workqueue, &task->u.tk_work); | 872 | queue_work(task->tk_workqueue, &task->u.tk_work); |
| @@ -909,58 +877,16 @@ EXPORT_SYMBOL_GPL(rpc_put_task); | |||
| 909 | 877 | ||
| 910 | static void rpc_release_task(struct rpc_task *task) | 878 | static void rpc_release_task(struct rpc_task *task) |
| 911 | { | 879 | { |
| 912 | #ifdef RPC_DEBUG | ||
| 913 | BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID); | ||
| 914 | #endif | ||
| 915 | dprintk("RPC: %5u release task\n", task->tk_pid); | 880 | dprintk("RPC: %5u release task\n", task->tk_pid); |
| 916 | 881 | ||
| 917 | if (!list_empty(&task->tk_task)) { | ||
| 918 | struct rpc_clnt *clnt = task->tk_client; | ||
| 919 | /* Remove from client task list */ | ||
| 920 | spin_lock(&clnt->cl_lock); | ||
| 921 | list_del(&task->tk_task); | ||
| 922 | spin_unlock(&clnt->cl_lock); | ||
| 923 | } | ||
| 924 | BUG_ON (RPC_IS_QUEUED(task)); | 882 | BUG_ON (RPC_IS_QUEUED(task)); |
| 925 | 883 | ||
| 926 | #ifdef RPC_DEBUG | ||
| 927 | task->tk_magic = 0; | ||
| 928 | #endif | ||
| 929 | /* Wake up anyone who is waiting for task completion */ | 884 | /* Wake up anyone who is waiting for task completion */ |
| 930 | rpc_mark_complete_task(task); | 885 | rpc_mark_complete_task(task); |
| 931 | 886 | ||
| 932 | rpc_put_task(task); | 887 | rpc_put_task(task); |
| 933 | } | 888 | } |
| 934 | 889 | ||
| 935 | /* | ||
| 936 | * Kill all tasks for the given client. | ||
| 937 | * XXX: kill their descendants as well? | ||
| 938 | */ | ||
| 939 | void rpc_killall_tasks(struct rpc_clnt *clnt) | ||
| 940 | { | ||
| 941 | struct rpc_task *rovr; | ||
| 942 | |||
| 943 | |||
| 944 | if (list_empty(&clnt->cl_tasks)) | ||
| 945 | return; | ||
| 946 | dprintk("RPC: killing all tasks for client %p\n", clnt); | ||
| 947 | /* | ||
| 948 | * Spin lock all_tasks to prevent changes... | ||
| 949 | */ | ||
| 950 | spin_lock(&clnt->cl_lock); | ||
| 951 | list_for_each_entry(rovr, &clnt->cl_tasks, tk_task) { | ||
| 952 | if (! RPC_IS_ACTIVATED(rovr)) | ||
| 953 | continue; | ||
| 954 | if (!(rovr->tk_flags & RPC_TASK_KILLED)) { | ||
| 955 | rovr->tk_flags |= RPC_TASK_KILLED; | ||
| 956 | rpc_exit(rovr, -EIO); | ||
| 957 | rpc_wake_up_task(rovr); | ||
| 958 | } | ||
| 959 | } | ||
| 960 | spin_unlock(&clnt->cl_lock); | ||
| 961 | } | ||
| 962 | EXPORT_SYMBOL_GPL(rpc_killall_tasks); | ||
| 963 | |||
| 964 | int rpciod_up(void) | 890 | int rpciod_up(void) |
| 965 | { | 891 | { |
| 966 | return try_module_get(THIS_MODULE) ? 0 : -EINVAL; | 892 | return try_module_get(THIS_MODULE) ? 0 : -EINVAL; |
