diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
commit | c71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch) | |
tree | ecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /net/sunrpc/sched.c | |
parent | ea53c912f8a86a8567697115b6a0d8152beee5c8 (diff) | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts:
litmus/sched_cedf.c
Diffstat (limited to 'net/sunrpc/sched.c')
-rw-r--r-- | net/sunrpc/sched.c | 130 |
1 files changed, 80 insertions, 50 deletions
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index cace6049e4a5..4814e246a874 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
@@ -252,23 +252,37 @@ static void rpc_set_active(struct rpc_task *task) | |||
252 | 252 | ||
253 | /* | 253 | /* |
254 | * Mark an RPC call as having completed by clearing the 'active' bit | 254 | * Mark an RPC call as having completed by clearing the 'active' bit |
255 | * and then waking up all tasks that were sleeping. | ||
255 | */ | 256 | */ |
256 | static void rpc_mark_complete_task(struct rpc_task *task) | 257 | static int rpc_complete_task(struct rpc_task *task) |
257 | { | 258 | { |
258 | smp_mb__before_clear_bit(); | 259 | void *m = &task->tk_runstate; |
260 | wait_queue_head_t *wq = bit_waitqueue(m, RPC_TASK_ACTIVE); | ||
261 | struct wait_bit_key k = __WAIT_BIT_KEY_INITIALIZER(m, RPC_TASK_ACTIVE); | ||
262 | unsigned long flags; | ||
263 | int ret; | ||
264 | |||
265 | spin_lock_irqsave(&wq->lock, flags); | ||
259 | clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate); | 266 | clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate); |
260 | smp_mb__after_clear_bit(); | 267 | ret = atomic_dec_and_test(&task->tk_count); |
261 | wake_up_bit(&task->tk_runstate, RPC_TASK_ACTIVE); | 268 | if (waitqueue_active(wq)) |
269 | __wake_up_locked_key(wq, TASK_NORMAL, &k); | ||
270 | spin_unlock_irqrestore(&wq->lock, flags); | ||
271 | return ret; | ||
262 | } | 272 | } |
263 | 273 | ||
264 | /* | 274 | /* |
265 | * Allow callers to wait for completion of an RPC call | 275 | * Allow callers to wait for completion of an RPC call |
276 | * | ||
277 | * Note the use of out_of_line_wait_on_bit() rather than wait_on_bit() | ||
278 | * to enforce taking of the wq->lock and hence avoid races with | ||
279 | * rpc_complete_task(). | ||
266 | */ | 280 | */ |
267 | int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *)) | 281 | int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *)) |
268 | { | 282 | { |
269 | if (action == NULL) | 283 | if (action == NULL) |
270 | action = rpc_wait_bit_killable; | 284 | action = rpc_wait_bit_killable; |
271 | return wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE, | 285 | return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE, |
272 | action, TASK_KILLABLE); | 286 | action, TASK_KILLABLE); |
273 | } | 287 | } |
274 | EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task); | 288 | EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task); |
@@ -285,15 +299,8 @@ static void rpc_make_runnable(struct rpc_task *task) | |||
285 | if (rpc_test_and_set_running(task)) | 299 | if (rpc_test_and_set_running(task)) |
286 | return; | 300 | return; |
287 | if (RPC_IS_ASYNC(task)) { | 301 | if (RPC_IS_ASYNC(task)) { |
288 | int status; | ||
289 | |||
290 | INIT_WORK(&task->u.tk_work, rpc_async_schedule); | 302 | INIT_WORK(&task->u.tk_work, rpc_async_schedule); |
291 | status = queue_work(rpciod_workqueue, &task->u.tk_work); | 303 | queue_work(rpciod_workqueue, &task->u.tk_work); |
292 | if (status < 0) { | ||
293 | printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status); | ||
294 | task->tk_status = status; | ||
295 | return; | ||
296 | } | ||
297 | } else | 304 | } else |
298 | wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED); | 305 | wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED); |
299 | } | 306 | } |
@@ -376,7 +383,7 @@ int rpc_queue_empty(struct rpc_wait_queue *queue) | |||
376 | spin_lock_bh(&queue->lock); | 383 | spin_lock_bh(&queue->lock); |
377 | res = queue->qlen; | 384 | res = queue->qlen; |
378 | spin_unlock_bh(&queue->lock); | 385 | spin_unlock_bh(&queue->lock); |
379 | return (res == 0); | 386 | return res == 0; |
380 | } | 387 | } |
381 | EXPORT_SYMBOL_GPL(rpc_queue_empty); | 388 | EXPORT_SYMBOL_GPL(rpc_queue_empty); |
382 | 389 | ||
@@ -609,32 +616,25 @@ static void __rpc_execute(struct rpc_task *task) | |||
609 | BUG_ON(RPC_IS_QUEUED(task)); | 616 | BUG_ON(RPC_IS_QUEUED(task)); |
610 | 617 | ||
611 | for (;;) { | 618 | for (;;) { |
619 | void (*do_action)(struct rpc_task *); | ||
612 | 620 | ||
613 | /* | 621 | /* |
614 | * Execute any pending callback. | 622 | * Execute any pending callback first. |
615 | */ | 623 | */ |
616 | if (task->tk_callback) { | 624 | do_action = task->tk_callback; |
617 | void (*save_callback)(struct rpc_task *); | 625 | task->tk_callback = NULL; |
618 | 626 | if (do_action == NULL) { | |
619 | /* | 627 | /* |
620 | * We set tk_callback to NULL before calling it, | 628 | * Perform the next FSM step. |
621 | * in case it sets the tk_callback field itself: | 629 | * tk_action may be NULL if the task has been killed. |
630 | * In particular, note that rpc_killall_tasks may | ||
631 | * do this at any time, so beware when dereferencing. | ||
622 | */ | 632 | */ |
623 | save_callback = task->tk_callback; | 633 | do_action = task->tk_action; |
624 | task->tk_callback = NULL; | 634 | if (do_action == NULL) |
625 | save_callback(task); | ||
626 | } | ||
627 | |||
628 | /* | ||
629 | * Perform the next FSM step. | ||
630 | * tk_action may be NULL when the task has been killed | ||
631 | * by someone else. | ||
632 | */ | ||
633 | if (!RPC_IS_QUEUED(task)) { | ||
634 | if (task->tk_action == NULL) | ||
635 | break; | 635 | break; |
636 | task->tk_action(task); | ||
637 | } | 636 | } |
637 | do_action(task); | ||
638 | 638 | ||
639 | /* | 639 | /* |
640 | * Lockless check for whether task is sleeping or not. | 640 | * Lockless check for whether task is sleeping or not. |
@@ -787,6 +787,7 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta | |||
787 | /* Initialize retry counters */ | 787 | /* Initialize retry counters */ |
788 | task->tk_garb_retry = 2; | 788 | task->tk_garb_retry = 2; |
789 | task->tk_cred_retry = 2; | 789 | task->tk_cred_retry = 2; |
790 | task->tk_rebind_retry = 2; | ||
790 | 791 | ||
791 | task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW; | 792 | task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW; |
792 | task->tk_owner = current->tgid; | 793 | task->tk_owner = current->tgid; |
@@ -829,12 +830,6 @@ struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data) | |||
829 | } | 830 | } |
830 | 831 | ||
831 | rpc_init_task(task, setup_data); | 832 | rpc_init_task(task, setup_data); |
832 | if (task->tk_status < 0) { | ||
833 | int err = task->tk_status; | ||
834 | rpc_put_task(task); | ||
835 | return ERR_PTR(err); | ||
836 | } | ||
837 | |||
838 | task->tk_flags |= flags; | 833 | task->tk_flags |= flags; |
839 | dprintk("RPC: allocated task %p\n", task); | 834 | dprintk("RPC: allocated task %p\n", task); |
840 | return task; | 835 | return task; |
@@ -857,34 +852,69 @@ static void rpc_async_release(struct work_struct *work) | |||
857 | rpc_free_task(container_of(work, struct rpc_task, u.tk_work)); | 852 | rpc_free_task(container_of(work, struct rpc_task, u.tk_work)); |
858 | } | 853 | } |
859 | 854 | ||
860 | void rpc_put_task(struct rpc_task *task) | 855 | static void rpc_release_resources_task(struct rpc_task *task) |
861 | { | 856 | { |
862 | if (!atomic_dec_and_test(&task->tk_count)) | ||
863 | return; | ||
864 | /* Release resources */ | ||
865 | if (task->tk_rqstp) | 857 | if (task->tk_rqstp) |
866 | xprt_release(task); | 858 | xprt_release(task); |
867 | if (task->tk_msg.rpc_cred) | 859 | if (task->tk_msg.rpc_cred) { |
868 | put_rpccred(task->tk_msg.rpc_cred); | 860 | put_rpccred(task->tk_msg.rpc_cred); |
861 | task->tk_msg.rpc_cred = NULL; | ||
862 | } | ||
869 | rpc_task_release_client(task); | 863 | rpc_task_release_client(task); |
870 | if (task->tk_workqueue != NULL) { | 864 | } |
865 | |||
866 | static void rpc_final_put_task(struct rpc_task *task, | ||
867 | struct workqueue_struct *q) | ||
868 | { | ||
869 | if (q != NULL) { | ||
871 | INIT_WORK(&task->u.tk_work, rpc_async_release); | 870 | INIT_WORK(&task->u.tk_work, rpc_async_release); |
872 | queue_work(task->tk_workqueue, &task->u.tk_work); | 871 | queue_work(q, &task->u.tk_work); |
873 | } else | 872 | } else |
874 | rpc_free_task(task); | 873 | rpc_free_task(task); |
875 | } | 874 | } |
875 | |||
876 | static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q) | ||
877 | { | ||
878 | if (atomic_dec_and_test(&task->tk_count)) { | ||
879 | rpc_release_resources_task(task); | ||
880 | rpc_final_put_task(task, q); | ||
881 | } | ||
882 | } | ||
883 | |||
884 | void rpc_put_task(struct rpc_task *task) | ||
885 | { | ||
886 | rpc_do_put_task(task, NULL); | ||
887 | } | ||
876 | EXPORT_SYMBOL_GPL(rpc_put_task); | 888 | EXPORT_SYMBOL_GPL(rpc_put_task); |
877 | 889 | ||
890 | void rpc_put_task_async(struct rpc_task *task) | ||
891 | { | ||
892 | rpc_do_put_task(task, task->tk_workqueue); | ||
893 | } | ||
894 | EXPORT_SYMBOL_GPL(rpc_put_task_async); | ||
895 | |||
878 | static void rpc_release_task(struct rpc_task *task) | 896 | static void rpc_release_task(struct rpc_task *task) |
879 | { | 897 | { |
880 | dprintk("RPC: %5u release task\n", task->tk_pid); | 898 | dprintk("RPC: %5u release task\n", task->tk_pid); |
881 | 899 | ||
882 | BUG_ON (RPC_IS_QUEUED(task)); | 900 | BUG_ON (RPC_IS_QUEUED(task)); |
883 | 901 | ||
884 | /* Wake up anyone who is waiting for task completion */ | 902 | rpc_release_resources_task(task); |
885 | rpc_mark_complete_task(task); | ||
886 | 903 | ||
887 | rpc_put_task(task); | 904 | /* |
905 | * Note: at this point we have been removed from rpc_clnt->cl_tasks, | ||
906 | * so it should be safe to use task->tk_count as a test for whether | ||
907 | * or not any other processes still hold references to our rpc_task. | ||
908 | */ | ||
909 | if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) { | ||
910 | /* Wake up anyone who may be waiting for task completion */ | ||
911 | if (!rpc_complete_task(task)) | ||
912 | return; | ||
913 | } else { | ||
914 | if (!atomic_dec_and_test(&task->tk_count)) | ||
915 | return; | ||
916 | } | ||
917 | rpc_final_put_task(task, task->tk_workqueue); | ||
888 | } | 918 | } |
889 | 919 | ||
890 | int rpciod_up(void) | 920 | int rpciod_up(void) |
@@ -908,7 +938,7 @@ static int rpciod_start(void) | |||
908 | * Create the rpciod thread and wait for it to start. | 938 | * Create the rpciod thread and wait for it to start. |
909 | */ | 939 | */ |
910 | dprintk("RPC: creating workqueue rpciod\n"); | 940 | dprintk("RPC: creating workqueue rpciod\n"); |
911 | wq = create_workqueue("rpciod"); | 941 | wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM, 0); |
912 | rpciod_workqueue = wq; | 942 | rpciod_workqueue = wq; |
913 | return rpciod_workqueue != NULL; | 943 | return rpciod_workqueue != NULL; |
914 | } | 944 | } |