aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc/sched.c
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2006-11-13 16:23:44 -0500
committerTrond Myklebust <Trond.Myklebust@netapp.com>2006-12-06 10:46:26 -0500
commit8aca67f0ae2d8811165c22326825a645cc8e1b48 (patch)
tree19e82f4bc7b4f865a9dcf4744e7c224ea517ba10 /net/sunrpc/sched.c
parente6b3c4db6fbcd0d33720696f37790d6b8be12313 (diff)
SUNRPC: Fix a potential race in rpc_wake_up_task()
Use RCU to ensure that we can safely call rpc_finish_wakeup after we've called __rpc_do_wake_up_task. If not, there is a theoretical race, in which the rpc_task finishes executing, and gets freed first. Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'net/sunrpc/sched.c')
-rw-r--r--net/sunrpc/sched.c30
1 files changed, 20 insertions, 10 deletions
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 66d01365f3a5..6b808c03fb72 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -427,16 +427,19 @@ __rpc_default_timer(struct rpc_task *task)
427 */ 427 */
428void rpc_wake_up_task(struct rpc_task *task) 428void rpc_wake_up_task(struct rpc_task *task)
429{ 429{
430 rcu_read_lock_bh();
430 if (rpc_start_wakeup(task)) { 431 if (rpc_start_wakeup(task)) {
431 if (RPC_IS_QUEUED(task)) { 432 if (RPC_IS_QUEUED(task)) {
432 struct rpc_wait_queue *queue = task->u.tk_wait.rpc_waitq; 433 struct rpc_wait_queue *queue = task->u.tk_wait.rpc_waitq;
433 434
434 spin_lock_bh(&queue->lock); 435 /* Note: we're already in a bh-safe context */
436 spin_lock(&queue->lock);
435 __rpc_do_wake_up_task(task); 437 __rpc_do_wake_up_task(task);
436 spin_unlock_bh(&queue->lock); 438 spin_unlock(&queue->lock);
437 } 439 }
438 rpc_finish_wakeup(task); 440 rpc_finish_wakeup(task);
439 } 441 }
442 rcu_read_unlock_bh();
440} 443}
441 444
442/* 445/*
@@ -499,14 +502,16 @@ struct rpc_task * rpc_wake_up_next(struct rpc_wait_queue *queue)
499 struct rpc_task *task = NULL; 502 struct rpc_task *task = NULL;
500 503
501 dprintk("RPC: wake_up_next(%p \"%s\")\n", queue, rpc_qname(queue)); 504 dprintk("RPC: wake_up_next(%p \"%s\")\n", queue, rpc_qname(queue));
502 spin_lock_bh(&queue->lock); 505 rcu_read_lock_bh();
506 spin_lock(&queue->lock);
503 if (RPC_IS_PRIORITY(queue)) 507 if (RPC_IS_PRIORITY(queue))
504 task = __rpc_wake_up_next_priority(queue); 508 task = __rpc_wake_up_next_priority(queue);
505 else { 509 else {
506 task_for_first(task, &queue->tasks[0]) 510 task_for_first(task, &queue->tasks[0])
507 __rpc_wake_up_task(task); 511 __rpc_wake_up_task(task);
508 } 512 }
509 spin_unlock_bh(&queue->lock); 513 spin_unlock(&queue->lock);
514 rcu_read_unlock_bh();
510 515
511 return task; 516 return task;
512} 517}
@@ -522,7 +527,8 @@ void rpc_wake_up(struct rpc_wait_queue *queue)
522 struct rpc_task *task, *next; 527 struct rpc_task *task, *next;
523 struct list_head *head; 528 struct list_head *head;
524 529
525 spin_lock_bh(&queue->lock); 530 rcu_read_lock_bh();
531 spin_lock(&queue->lock);
526 head = &queue->tasks[queue->maxpriority]; 532 head = &queue->tasks[queue->maxpriority];
527 for (;;) { 533 for (;;) {
528 list_for_each_entry_safe(task, next, head, u.tk_wait.list) 534 list_for_each_entry_safe(task, next, head, u.tk_wait.list)
@@ -531,7 +537,8 @@ void rpc_wake_up(struct rpc_wait_queue *queue)
531 break; 537 break;
532 head--; 538 head--;
533 } 539 }
534 spin_unlock_bh(&queue->lock); 540 spin_unlock(&queue->lock);
541 rcu_read_unlock_bh();
535} 542}
536 543
537/** 544/**
@@ -546,7 +553,8 @@ void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
546 struct rpc_task *task, *next; 553 struct rpc_task *task, *next;
547 struct list_head *head; 554 struct list_head *head;
548 555
549 spin_lock_bh(&queue->lock); 556 rcu_read_lock_bh();
557 spin_lock(&queue->lock);
550 head = &queue->tasks[queue->maxpriority]; 558 head = &queue->tasks[queue->maxpriority];
551 for (;;) { 559 for (;;) {
552 list_for_each_entry_safe(task, next, head, u.tk_wait.list) { 560 list_for_each_entry_safe(task, next, head, u.tk_wait.list) {
@@ -557,7 +565,8 @@ void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
557 break; 565 break;
558 head--; 566 head--;
559 } 567 }
560 spin_unlock_bh(&queue->lock); 568 spin_unlock(&queue->lock);
569 rcu_read_unlock_bh();
561} 570}
562 571
563static void __rpc_atrun(struct rpc_task *task) 572static void __rpc_atrun(struct rpc_task *task)
@@ -817,8 +826,9 @@ rpc_alloc_task(void)
817 return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS); 826 return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS);
818} 827}
819 828
820static void rpc_free_task(struct rpc_task *task) 829static void rpc_free_task(struct rcu_head *rcu)
821{ 830{
831 struct rpc_task *task = container_of(rcu, struct rpc_task, u.tk_rcu);
822 dprintk("RPC: %4d freeing task\n", task->tk_pid); 832 dprintk("RPC: %4d freeing task\n", task->tk_pid);
823 mempool_free(task, rpc_task_mempool); 833 mempool_free(task, rpc_task_mempool);
824} 834}
@@ -872,7 +882,7 @@ void rpc_put_task(struct rpc_task *task)
872 task->tk_client = NULL; 882 task->tk_client = NULL;
873 } 883 }
874 if (task->tk_flags & RPC_TASK_DYNAMIC) 884 if (task->tk_flags & RPC_TASK_DYNAMIC)
875 rpc_free_task(task); 885 call_rcu_bh(&task->u.tk_rcu, rpc_free_task);
876 if (tk_ops->rpc_release) 886 if (tk_ops->rpc_release)
877 tk_ops->rpc_release(calldata); 887 tk_ops->rpc_release(calldata);
878} 888}