diff options
| author | Trond Myklebust <Trond.Myklebust@netapp.com> | 2009-03-10 20:33:16 -0400 |
|---|---|---|
| committer | Trond Myklebust <Trond.Myklebust@netapp.com> | 2009-03-10 20:33:16 -0400 |
| commit | eb9b55ab4d73280597fd183b367d50452f4d7846 (patch) | |
| tree | 80e0c4e5315ea5fd18bb5ba9d37ec5f77170631a | |
| parent | 16b71fdf97599f1b1b7f38418ee9922d9f117396 (diff) | |
SUNRPC: Tighten up the task locking rules in __rpc_execute()
We should probably not be testing any flags after we've cleared the
RPC_TASK_RUNNING flag, since rpc_make_runnable() is then free to assign the
rpc_task to another workqueue, which may then destroy it.
We can fix any races with rpc_make_runnable() by ensuring that we only
clear the RPC_TASK_RUNNING flag while holding the rpc_wait_queue->lock that
the task is supposed to be sleeping on (and then checking whether or not
the task really is sleeping).
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
| -rw-r--r-- | net/sunrpc/sched.c | 33 |
1 files changed, 20 insertions, 13 deletions
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 385f427beda..ff50a054686 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
| @@ -293,11 +293,6 @@ static void rpc_make_runnable(struct rpc_task *task) | |||
| 293 | rpc_clear_queued(task); | 293 | rpc_clear_queued(task); |
| 294 | if (rpc_test_and_set_running(task)) | 294 | if (rpc_test_and_set_running(task)) |
| 295 | return; | 295 | return; |
| 296 | /* We might have raced */ | ||
| 297 | if (RPC_IS_QUEUED(task)) { | ||
| 298 | rpc_clear_running(task); | ||
| 299 | return; | ||
| 300 | } | ||
| 301 | if (RPC_IS_ASYNC(task)) { | 296 | if (RPC_IS_ASYNC(task)) { |
| 302 | int status; | 297 | int status; |
| 303 | 298 | ||
| @@ -607,7 +602,9 @@ void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata) | |||
| 607 | */ | 602 | */ |
| 608 | static void __rpc_execute(struct rpc_task *task) | 603 | static void __rpc_execute(struct rpc_task *task) |
| 609 | { | 604 | { |
| 610 | int status = 0; | 605 | struct rpc_wait_queue *queue; |
| 606 | int task_is_async = RPC_IS_ASYNC(task); | ||
| 607 | int status = 0; | ||
| 611 | 608 | ||
| 612 | dprintk("RPC: %5u __rpc_execute flags=0x%x\n", | 609 | dprintk("RPC: %5u __rpc_execute flags=0x%x\n", |
| 613 | task->tk_pid, task->tk_flags); | 610 | task->tk_pid, task->tk_flags); |
| @@ -647,15 +644,25 @@ static void __rpc_execute(struct rpc_task *task) | |||
| 647 | */ | 644 | */ |
| 648 | if (!RPC_IS_QUEUED(task)) | 645 | if (!RPC_IS_QUEUED(task)) |
| 649 | continue; | 646 | continue; |
| 650 | rpc_clear_running(task); | 647 | /* |
| 651 | if (RPC_IS_ASYNC(task)) { | 648 | * The queue->lock protects against races with |
| 652 | /* Careful! we may have raced... */ | 649 | * rpc_make_runnable(). |
| 653 | if (RPC_IS_QUEUED(task)) | 650 | * |
| 654 | return; | 651 | * Note that once we clear RPC_TASK_RUNNING on an asynchronous |
| 655 | if (rpc_test_and_set_running(task)) | 652 | * rpc_task, rpc_make_runnable() can assign it to a |
| 656 | return; | 653 | * different workqueue. We therefore cannot assume that the |
| 654 | * rpc_task pointer may still be dereferenced. | ||
| 655 | */ | ||
| 656 | queue = task->tk_waitqueue; | ||
| 657 | spin_lock_bh(&queue->lock); | ||
| 658 | if (!RPC_IS_QUEUED(task)) { | ||
| 659 | spin_unlock_bh(&queue->lock); | ||
| 657 | continue; | 660 | continue; |
| 658 | } | 661 | } |
| 662 | rpc_clear_running(task); | ||
| 663 | spin_unlock_bh(&queue->lock); | ||
| 664 | if (task_is_async) | ||
| 665 | return; | ||
| 659 | 666 | ||
| 660 | /* sync task: sleep here */ | 667 | /* sync task: sleep here */ |
| 661 | dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid); | 668 | dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid); |
