diff options
Diffstat (limited to 'net/sunrpc/sched.c')
-rw-r--r-- | net/sunrpc/sched.c | 137 |
1 files changed, 85 insertions, 52 deletions
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 225e6510b523..79bc4cdf5d48 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
@@ -266,12 +266,28 @@ static int rpc_wait_bit_interruptible(void *word) | |||
266 | return 0; | 266 | return 0; |
267 | } | 267 | } |
268 | 268 | ||
269 | static void rpc_set_active(struct rpc_task *task) | ||
270 | { | ||
271 | if (test_and_set_bit(RPC_TASK_ACTIVE, &task->tk_runstate) != 0) | ||
272 | return; | ||
273 | spin_lock(&rpc_sched_lock); | ||
274 | #ifdef RPC_DEBUG | ||
275 | task->tk_magic = RPC_TASK_MAGIC_ID; | ||
276 | task->tk_pid = rpc_task_id++; | ||
277 | #endif | ||
278 | /* Add to global list of all tasks */ | ||
279 | list_add_tail(&task->tk_task, &all_tasks); | ||
280 | spin_unlock(&rpc_sched_lock); | ||
281 | } | ||
282 | |||
269 | /* | 283 | /* |
270 | * Mark an RPC call as having completed by clearing the 'active' bit | 284 | * Mark an RPC call as having completed by clearing the 'active' bit |
271 | */ | 285 | */ |
272 | static inline void rpc_mark_complete_task(struct rpc_task *task) | 286 | static void rpc_mark_complete_task(struct rpc_task *task) |
273 | { | 287 | { |
274 | rpc_clear_active(task); | 288 | smp_mb__before_clear_bit(); |
289 | clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate); | ||
290 | smp_mb__after_clear_bit(); | ||
275 | wake_up_bit(&task->tk_runstate, RPC_TASK_ACTIVE); | 291 | wake_up_bit(&task->tk_runstate, RPC_TASK_ACTIVE); |
276 | } | 292 | } |
277 | 293 | ||
@@ -295,13 +311,15 @@ EXPORT_SYMBOL(__rpc_wait_for_completion_task); | |||
295 | */ | 311 | */ |
296 | static void rpc_make_runnable(struct rpc_task *task) | 312 | static void rpc_make_runnable(struct rpc_task *task) |
297 | { | 313 | { |
298 | int do_ret; | ||
299 | |||
300 | BUG_ON(task->tk_timeout_fn); | 314 | BUG_ON(task->tk_timeout_fn); |
301 | do_ret = rpc_test_and_set_running(task); | ||
302 | rpc_clear_queued(task); | 315 | rpc_clear_queued(task); |
303 | if (do_ret) | 316 | if (rpc_test_and_set_running(task)) |
304 | return; | 317 | return; |
318 | /* We might have raced */ | ||
319 | if (RPC_IS_QUEUED(task)) { | ||
320 | rpc_clear_running(task); | ||
321 | return; | ||
322 | } | ||
305 | if (RPC_IS_ASYNC(task)) { | 323 | if (RPC_IS_ASYNC(task)) { |
306 | int status; | 324 | int status; |
307 | 325 | ||
@@ -333,9 +351,6 @@ static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, | |||
333 | return; | 351 | return; |
334 | } | 352 | } |
335 | 353 | ||
336 | /* Mark the task as being activated if so needed */ | ||
337 | rpc_set_active(task); | ||
338 | |||
339 | __rpc_add_wait_queue(q, task); | 354 | __rpc_add_wait_queue(q, task); |
340 | 355 | ||
341 | BUG_ON(task->tk_callback != NULL); | 356 | BUG_ON(task->tk_callback != NULL); |
@@ -346,6 +361,9 @@ static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, | |||
346 | void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, | 361 | void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, |
347 | rpc_action action, rpc_action timer) | 362 | rpc_action action, rpc_action timer) |
348 | { | 363 | { |
364 | /* Mark the task as being activated if so needed */ | ||
365 | rpc_set_active(task); | ||
366 | |||
349 | /* | 367 | /* |
350 | * Protect the queue operations. | 368 | * Protect the queue operations. |
351 | */ | 369 | */ |
@@ -409,16 +427,19 @@ __rpc_default_timer(struct rpc_task *task) | |||
409 | */ | 427 | */ |
410 | void rpc_wake_up_task(struct rpc_task *task) | 428 | void rpc_wake_up_task(struct rpc_task *task) |
411 | { | 429 | { |
430 | rcu_read_lock_bh(); | ||
412 | if (rpc_start_wakeup(task)) { | 431 | if (rpc_start_wakeup(task)) { |
413 | if (RPC_IS_QUEUED(task)) { | 432 | if (RPC_IS_QUEUED(task)) { |
414 | struct rpc_wait_queue *queue = task->u.tk_wait.rpc_waitq; | 433 | struct rpc_wait_queue *queue = task->u.tk_wait.rpc_waitq; |
415 | 434 | ||
416 | spin_lock_bh(&queue->lock); | 435 | /* Note: we're already in a bh-safe context */ |
436 | spin_lock(&queue->lock); | ||
417 | __rpc_do_wake_up_task(task); | 437 | __rpc_do_wake_up_task(task); |
418 | spin_unlock_bh(&queue->lock); | 438 | spin_unlock(&queue->lock); |
419 | } | 439 | } |
420 | rpc_finish_wakeup(task); | 440 | rpc_finish_wakeup(task); |
421 | } | 441 | } |
442 | rcu_read_unlock_bh(); | ||
422 | } | 443 | } |
423 | 444 | ||
424 | /* | 445 | /* |
@@ -481,14 +502,16 @@ struct rpc_task * rpc_wake_up_next(struct rpc_wait_queue *queue) | |||
481 | struct rpc_task *task = NULL; | 502 | struct rpc_task *task = NULL; |
482 | 503 | ||
483 | dprintk("RPC: wake_up_next(%p \"%s\")\n", queue, rpc_qname(queue)); | 504 | dprintk("RPC: wake_up_next(%p \"%s\")\n", queue, rpc_qname(queue)); |
484 | spin_lock_bh(&queue->lock); | 505 | rcu_read_lock_bh(); |
506 | spin_lock(&queue->lock); | ||
485 | if (RPC_IS_PRIORITY(queue)) | 507 | if (RPC_IS_PRIORITY(queue)) |
486 | task = __rpc_wake_up_next_priority(queue); | 508 | task = __rpc_wake_up_next_priority(queue); |
487 | else { | 509 | else { |
488 | task_for_first(task, &queue->tasks[0]) | 510 | task_for_first(task, &queue->tasks[0]) |
489 | __rpc_wake_up_task(task); | 511 | __rpc_wake_up_task(task); |
490 | } | 512 | } |
491 | spin_unlock_bh(&queue->lock); | 513 | spin_unlock(&queue->lock); |
514 | rcu_read_unlock_bh(); | ||
492 | 515 | ||
493 | return task; | 516 | return task; |
494 | } | 517 | } |
@@ -504,7 +527,8 @@ void rpc_wake_up(struct rpc_wait_queue *queue) | |||
504 | struct rpc_task *task, *next; | 527 | struct rpc_task *task, *next; |
505 | struct list_head *head; | 528 | struct list_head *head; |
506 | 529 | ||
507 | spin_lock_bh(&queue->lock); | 530 | rcu_read_lock_bh(); |
531 | spin_lock(&queue->lock); | ||
508 | head = &queue->tasks[queue->maxpriority]; | 532 | head = &queue->tasks[queue->maxpriority]; |
509 | for (;;) { | 533 | for (;;) { |
510 | list_for_each_entry_safe(task, next, head, u.tk_wait.list) | 534 | list_for_each_entry_safe(task, next, head, u.tk_wait.list) |
@@ -513,7 +537,8 @@ void rpc_wake_up(struct rpc_wait_queue *queue) | |||
513 | break; | 537 | break; |
514 | head--; | 538 | head--; |
515 | } | 539 | } |
516 | spin_unlock_bh(&queue->lock); | 540 | spin_unlock(&queue->lock); |
541 | rcu_read_unlock_bh(); | ||
517 | } | 542 | } |
518 | 543 | ||
519 | /** | 544 | /** |
@@ -528,7 +553,8 @@ void rpc_wake_up_status(struct rpc_wait_queue *queue, int status) | |||
528 | struct rpc_task *task, *next; | 553 | struct rpc_task *task, *next; |
529 | struct list_head *head; | 554 | struct list_head *head; |
530 | 555 | ||
531 | spin_lock_bh(&queue->lock); | 556 | rcu_read_lock_bh(); |
557 | spin_lock(&queue->lock); | ||
532 | head = &queue->tasks[queue->maxpriority]; | 558 | head = &queue->tasks[queue->maxpriority]; |
533 | for (;;) { | 559 | for (;;) { |
534 | list_for_each_entry_safe(task, next, head, u.tk_wait.list) { | 560 | list_for_each_entry_safe(task, next, head, u.tk_wait.list) { |
@@ -539,7 +565,8 @@ void rpc_wake_up_status(struct rpc_wait_queue *queue, int status) | |||
539 | break; | 565 | break; |
540 | head--; | 566 | head--; |
541 | } | 567 | } |
542 | spin_unlock_bh(&queue->lock); | 568 | spin_unlock(&queue->lock); |
569 | rcu_read_unlock_bh(); | ||
543 | } | 570 | } |
544 | 571 | ||
545 | static void __rpc_atrun(struct rpc_task *task) | 572 | static void __rpc_atrun(struct rpc_task *task) |
@@ -561,7 +588,9 @@ void rpc_delay(struct rpc_task *task, unsigned long delay) | |||
561 | */ | 588 | */ |
562 | static void rpc_prepare_task(struct rpc_task *task) | 589 | static void rpc_prepare_task(struct rpc_task *task) |
563 | { | 590 | { |
591 | lock_kernel(); | ||
564 | task->tk_ops->rpc_call_prepare(task, task->tk_calldata); | 592 | task->tk_ops->rpc_call_prepare(task, task->tk_calldata); |
593 | unlock_kernel(); | ||
565 | } | 594 | } |
566 | 595 | ||
567 | /* | 596 | /* |
@@ -571,7 +600,9 @@ void rpc_exit_task(struct rpc_task *task) | |||
571 | { | 600 | { |
572 | task->tk_action = NULL; | 601 | task->tk_action = NULL; |
573 | if (task->tk_ops->rpc_call_done != NULL) { | 602 | if (task->tk_ops->rpc_call_done != NULL) { |
603 | lock_kernel(); | ||
574 | task->tk_ops->rpc_call_done(task, task->tk_calldata); | 604 | task->tk_ops->rpc_call_done(task, task->tk_calldata); |
605 | unlock_kernel(); | ||
575 | if (task->tk_action != NULL) { | 606 | if (task->tk_action != NULL) { |
576 | WARN_ON(RPC_ASSASSINATED(task)); | 607 | WARN_ON(RPC_ASSASSINATED(task)); |
577 | /* Always release the RPC slot and buffer memory */ | 608 | /* Always release the RPC slot and buffer memory */ |
@@ -581,6 +612,15 @@ void rpc_exit_task(struct rpc_task *task) | |||
581 | } | 612 | } |
582 | EXPORT_SYMBOL(rpc_exit_task); | 613 | EXPORT_SYMBOL(rpc_exit_task); |
583 | 614 | ||
615 | void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata) | ||
616 | { | ||
617 | if (ops->rpc_release != NULL) { | ||
618 | lock_kernel(); | ||
619 | ops->rpc_release(calldata); | ||
620 | unlock_kernel(); | ||
621 | } | ||
622 | } | ||
623 | |||
584 | /* | 624 | /* |
585 | * This is the RPC `scheduler' (or rather, the finite state machine). | 625 | * This is the RPC `scheduler' (or rather, the finite state machine). |
586 | */ | 626 | */ |
@@ -615,9 +655,7 @@ static int __rpc_execute(struct rpc_task *task) | |||
615 | */ | 655 | */ |
616 | save_callback=task->tk_callback; | 656 | save_callback=task->tk_callback; |
617 | task->tk_callback=NULL; | 657 | task->tk_callback=NULL; |
618 | lock_kernel(); | ||
619 | save_callback(task); | 658 | save_callback(task); |
620 | unlock_kernel(); | ||
621 | } | 659 | } |
622 | 660 | ||
623 | /* | 661 | /* |
@@ -628,9 +666,7 @@ static int __rpc_execute(struct rpc_task *task) | |||
628 | if (!RPC_IS_QUEUED(task)) { | 666 | if (!RPC_IS_QUEUED(task)) { |
629 | if (task->tk_action == NULL) | 667 | if (task->tk_action == NULL) |
630 | break; | 668 | break; |
631 | lock_kernel(); | ||
632 | task->tk_action(task); | 669 | task->tk_action(task); |
633 | unlock_kernel(); | ||
634 | } | 670 | } |
635 | 671 | ||
636 | /* | 672 | /* |
@@ -671,8 +707,6 @@ static int __rpc_execute(struct rpc_task *task) | |||
671 | } | 707 | } |
672 | 708 | ||
673 | dprintk("RPC: %4d, return %d, status %d\n", task->tk_pid, status, task->tk_status); | 709 | dprintk("RPC: %4d, return %d, status %d\n", task->tk_pid, status, task->tk_status); |
674 | /* Wake up anyone who is waiting for task completion */ | ||
675 | rpc_mark_complete_task(task); | ||
676 | /* Release all resources associated with the task */ | 710 | /* Release all resources associated with the task */ |
677 | rpc_release_task(task); | 711 | rpc_release_task(task); |
678 | return status; | 712 | return status; |
@@ -786,15 +820,6 @@ void rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt, int flags, cons | |||
786 | task->tk_flags |= RPC_TASK_NOINTR; | 820 | task->tk_flags |= RPC_TASK_NOINTR; |
787 | } | 821 | } |
788 | 822 | ||
789 | #ifdef RPC_DEBUG | ||
790 | task->tk_magic = RPC_TASK_MAGIC_ID; | ||
791 | task->tk_pid = rpc_task_id++; | ||
792 | #endif | ||
793 | /* Add to global list of all tasks */ | ||
794 | spin_lock(&rpc_sched_lock); | ||
795 | list_add_tail(&task->tk_task, &all_tasks); | ||
796 | spin_unlock(&rpc_sched_lock); | ||
797 | |||
798 | BUG_ON(task->tk_ops == NULL); | 823 | BUG_ON(task->tk_ops == NULL); |
799 | 824 | ||
800 | /* starting timestamp */ | 825 | /* starting timestamp */ |
@@ -810,8 +835,9 @@ rpc_alloc_task(void) | |||
810 | return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS); | 835 | return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS); |
811 | } | 836 | } |
812 | 837 | ||
813 | static void rpc_free_task(struct rpc_task *task) | 838 | static void rpc_free_task(struct rcu_head *rcu) |
814 | { | 839 | { |
840 | struct rpc_task *task = container_of(rcu, struct rpc_task, u.tk_rcu); | ||
815 | dprintk("RPC: %4d freeing task\n", task->tk_pid); | 841 | dprintk("RPC: %4d freeing task\n", task->tk_pid); |
816 | mempool_free(task, rpc_task_mempool); | 842 | mempool_free(task, rpc_task_mempool); |
817 | } | 843 | } |
@@ -847,16 +873,34 @@ cleanup: | |||
847 | goto out; | 873 | goto out; |
848 | } | 874 | } |
849 | 875 | ||
850 | void rpc_release_task(struct rpc_task *task) | 876 | |
877 | void rpc_put_task(struct rpc_task *task) | ||
851 | { | 878 | { |
852 | const struct rpc_call_ops *tk_ops = task->tk_ops; | 879 | const struct rpc_call_ops *tk_ops = task->tk_ops; |
853 | void *calldata = task->tk_calldata; | 880 | void *calldata = task->tk_calldata; |
854 | 881 | ||
882 | if (!atomic_dec_and_test(&task->tk_count)) | ||
883 | return; | ||
884 | /* Release resources */ | ||
885 | if (task->tk_rqstp) | ||
886 | xprt_release(task); | ||
887 | if (task->tk_msg.rpc_cred) | ||
888 | rpcauth_unbindcred(task); | ||
889 | if (task->tk_client) { | ||
890 | rpc_release_client(task->tk_client); | ||
891 | task->tk_client = NULL; | ||
892 | } | ||
893 | if (task->tk_flags & RPC_TASK_DYNAMIC) | ||
894 | call_rcu_bh(&task->u.tk_rcu, rpc_free_task); | ||
895 | rpc_release_calldata(tk_ops, calldata); | ||
896 | } | ||
897 | EXPORT_SYMBOL(rpc_put_task); | ||
898 | |||
899 | void rpc_release_task(struct rpc_task *task) | ||
900 | { | ||
855 | #ifdef RPC_DEBUG | 901 | #ifdef RPC_DEBUG |
856 | BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID); | 902 | BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID); |
857 | #endif | 903 | #endif |
858 | if (!atomic_dec_and_test(&task->tk_count)) | ||
859 | return; | ||
860 | dprintk("RPC: %4d release task\n", task->tk_pid); | 904 | dprintk("RPC: %4d release task\n", task->tk_pid); |
861 | 905 | ||
862 | /* Remove from global task list */ | 906 | /* Remove from global task list */ |
@@ -869,23 +913,13 @@ void rpc_release_task(struct rpc_task *task) | |||
869 | /* Synchronously delete any running timer */ | 913 | /* Synchronously delete any running timer */ |
870 | rpc_delete_timer(task); | 914 | rpc_delete_timer(task); |
871 | 915 | ||
872 | /* Release resources */ | ||
873 | if (task->tk_rqstp) | ||
874 | xprt_release(task); | ||
875 | if (task->tk_msg.rpc_cred) | ||
876 | rpcauth_unbindcred(task); | ||
877 | if (task->tk_client) { | ||
878 | rpc_release_client(task->tk_client); | ||
879 | task->tk_client = NULL; | ||
880 | } | ||
881 | |||
882 | #ifdef RPC_DEBUG | 916 | #ifdef RPC_DEBUG |
883 | task->tk_magic = 0; | 917 | task->tk_magic = 0; |
884 | #endif | 918 | #endif |
885 | if (task->tk_flags & RPC_TASK_DYNAMIC) | 919 | /* Wake up anyone who is waiting for task completion */ |
886 | rpc_free_task(task); | 920 | rpc_mark_complete_task(task); |
887 | if (tk_ops->rpc_release) | 921 | |
888 | tk_ops->rpc_release(calldata); | 922 | rpc_put_task(task); |
889 | } | 923 | } |
890 | 924 | ||
891 | /** | 925 | /** |
@@ -902,8 +936,7 @@ struct rpc_task *rpc_run_task(struct rpc_clnt *clnt, int flags, | |||
902 | struct rpc_task *task; | 936 | struct rpc_task *task; |
903 | task = rpc_new_task(clnt, flags, ops, data); | 937 | task = rpc_new_task(clnt, flags, ops, data); |
904 | if (task == NULL) { | 938 | if (task == NULL) { |
905 | if (ops->rpc_release != NULL) | 939 | rpc_release_calldata(ops, data); |
906 | ops->rpc_release(data); | ||
907 | return ERR_PTR(-ENOMEM); | 940 | return ERR_PTR(-ENOMEM); |
908 | } | 941 | } |
909 | atomic_inc(&task->tk_count); | 942 | atomic_inc(&task->tk_count); |