diff options
author | Trond Myklebust <Trond.Myklebust@netapp.com> | 2008-02-26 00:53:49 -0500 |
---|---|---|
committer | Trond Myklebust <Trond.Myklebust@netapp.com> | 2008-02-29 02:26:28 -0500 |
commit | 5e4424af9a1f062c6451681dff24a26e27741cc6 (patch) | |
tree | fcba54a8fc2d94c1d691ab5b8b956ac71b5cb8d4 /net | |
parent | ff2d7db848f8db7cade39e55f78f86d77e0de01a (diff) |
SUNRPC: Remove now-redundant RCU-safe rpc_task free path
Now that we've tightened up the locking rules for RPC queue wakeups, we can
remove the RCU-safe kfree calls...
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'net')
-rw-r--r-- | net/sunrpc/sched.c | 37 |
1 files changed, 12 insertions, 25 deletions
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 88a686a8e43e..cae219c8caeb 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
@@ -393,11 +393,9 @@ static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct r | |||
393 | */ | 393 | */ |
394 | void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task) | 394 | void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task) |
395 | { | 395 | { |
396 | rcu_read_lock_bh(); | 396 | spin_lock_bh(&queue->lock); |
397 | spin_lock(&queue->lock); | ||
398 | rpc_wake_up_task_queue_locked(queue, task); | 397 | rpc_wake_up_task_queue_locked(queue, task); |
399 | spin_unlock(&queue->lock); | 398 | spin_unlock_bh(&queue->lock); |
400 | rcu_read_unlock_bh(); | ||
401 | } | 399 | } |
402 | EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task); | 400 | EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task); |
403 | 401 | ||
@@ -470,16 +468,14 @@ struct rpc_task * rpc_wake_up_next(struct rpc_wait_queue *queue) | |||
470 | 468 | ||
471 | dprintk("RPC: wake_up_next(%p \"%s\")\n", | 469 | dprintk("RPC: wake_up_next(%p \"%s\")\n", |
472 | queue, rpc_qname(queue)); | 470 | queue, rpc_qname(queue)); |
473 | rcu_read_lock_bh(); | 471 | spin_lock_bh(&queue->lock); |
474 | spin_lock(&queue->lock); | ||
475 | if (RPC_IS_PRIORITY(queue)) | 472 | if (RPC_IS_PRIORITY(queue)) |
476 | task = __rpc_wake_up_next_priority(queue); | 473 | task = __rpc_wake_up_next_priority(queue); |
477 | else { | 474 | else { |
478 | task_for_first(task, &queue->tasks[0]) | 475 | task_for_first(task, &queue->tasks[0]) |
479 | rpc_wake_up_task_queue_locked(queue, task); | 476 | rpc_wake_up_task_queue_locked(queue, task); |
480 | } | 477 | } |
481 | spin_unlock(&queue->lock); | 478 | spin_unlock_bh(&queue->lock); |
482 | rcu_read_unlock_bh(); | ||
483 | 479 | ||
484 | return task; | 480 | return task; |
485 | } | 481 | } |
@@ -496,8 +492,7 @@ void rpc_wake_up(struct rpc_wait_queue *queue) | |||
496 | struct rpc_task *task, *next; | 492 | struct rpc_task *task, *next; |
497 | struct list_head *head; | 493 | struct list_head *head; |
498 | 494 | ||
499 | rcu_read_lock_bh(); | 495 | spin_lock_bh(&queue->lock); |
500 | spin_lock(&queue->lock); | ||
501 | head = &queue->tasks[queue->maxpriority]; | 496 | head = &queue->tasks[queue->maxpriority]; |
502 | for (;;) { | 497 | for (;;) { |
503 | list_for_each_entry_safe(task, next, head, u.tk_wait.list) | 498 | list_for_each_entry_safe(task, next, head, u.tk_wait.list) |
@@ -506,8 +501,7 @@ void rpc_wake_up(struct rpc_wait_queue *queue) | |||
506 | break; | 501 | break; |
507 | head--; | 502 | head--; |
508 | } | 503 | } |
509 | spin_unlock(&queue->lock); | 504 | spin_unlock_bh(&queue->lock); |
510 | rcu_read_unlock_bh(); | ||
511 | } | 505 | } |
512 | EXPORT_SYMBOL_GPL(rpc_wake_up); | 506 | EXPORT_SYMBOL_GPL(rpc_wake_up); |
513 | 507 | ||
@@ -523,8 +517,7 @@ void rpc_wake_up_status(struct rpc_wait_queue *queue, int status) | |||
523 | struct rpc_task *task, *next; | 517 | struct rpc_task *task, *next; |
524 | struct list_head *head; | 518 | struct list_head *head; |
525 | 519 | ||
526 | rcu_read_lock_bh(); | 520 | spin_lock_bh(&queue->lock); |
527 | spin_lock(&queue->lock); | ||
528 | head = &queue->tasks[queue->maxpriority]; | 521 | head = &queue->tasks[queue->maxpriority]; |
529 | for (;;) { | 522 | for (;;) { |
530 | list_for_each_entry_safe(task, next, head, u.tk_wait.list) { | 523 | list_for_each_entry_safe(task, next, head, u.tk_wait.list) { |
@@ -535,8 +528,7 @@ void rpc_wake_up_status(struct rpc_wait_queue *queue, int status) | |||
535 | break; | 528 | break; |
536 | head--; | 529 | head--; |
537 | } | 530 | } |
538 | spin_unlock(&queue->lock); | 531 | spin_unlock_bh(&queue->lock); |
539 | rcu_read_unlock_bh(); | ||
540 | } | 532 | } |
541 | EXPORT_SYMBOL_GPL(rpc_wake_up_status); | 533 | EXPORT_SYMBOL_GPL(rpc_wake_up_status); |
542 | 534 | ||
@@ -848,13 +840,6 @@ rpc_alloc_task(void) | |||
848 | return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS); | 840 | return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS); |
849 | } | 841 | } |
850 | 842 | ||
851 | static void rpc_free_task_rcu(struct rcu_head *rcu) | ||
852 | { | ||
853 | struct rpc_task *task = container_of(rcu, struct rpc_task, u.tk_rcu); | ||
854 | dprintk("RPC: %5u freeing task\n", task->tk_pid); | ||
855 | mempool_free(task, rpc_task_mempool); | ||
856 | } | ||
857 | |||
858 | /* | 843 | /* |
859 | * Create a new task for the specified client. | 844 | * Create a new task for the specified client. |
860 | */ | 845 | */ |
@@ -883,8 +868,10 @@ static void rpc_free_task(struct rpc_task *task) | |||
883 | const struct rpc_call_ops *tk_ops = task->tk_ops; | 868 | const struct rpc_call_ops *tk_ops = task->tk_ops; |
884 | void *calldata = task->tk_calldata; | 869 | void *calldata = task->tk_calldata; |
885 | 870 | ||
886 | if (task->tk_flags & RPC_TASK_DYNAMIC) | 871 | if (task->tk_flags & RPC_TASK_DYNAMIC) { |
887 | call_rcu_bh(&task->u.tk_rcu, rpc_free_task_rcu); | 872 | dprintk("RPC: %5u freeing task\n", task->tk_pid); |
873 | mempool_free(task, rpc_task_mempool); | ||
874 | } | ||
888 | rpc_release_calldata(tk_ops, calldata); | 875 | rpc_release_calldata(tk_ops, calldata); |
889 | } | 876 | } |
890 | 877 | ||