diff options
author | Trond Myklebust <Trond.Myklebust@netapp.com> | 2011-02-21 14:05:41 -0500 |
---|---|---|
committer | Trond Myklebust <Trond.Myklebust@netapp.com> | 2011-03-10 15:04:52 -0500 |
commit | bf294b41cefcb22fc3139e0f42c5b3f06728bd5e (patch) | |
tree | 250251c040a2d2e278b5a2ddd03c8d20a27be129 /net | |
parent | 214d93b02c4fe93638ad268613c9702a81ed9192 (diff) |
SUNRPC: Close a race in __rpc_wait_for_completion_task()
Although they run as rpciod background tasks, under normal operation
(i.e. no SIGKILL), functions like nfs_sillyrename(), nfs4_proc_unlck()
and nfs4_do_close() want to be fully synchronous. This means that when we
exit, we want all references to the rpc_task to be gone, and we want
any dentry references etc. held by that task to be released.
For this reason these functions call __rpc_wait_for_completion_task(),
followed by rpc_put_task() in the expectation that the latter will be
releasing the last reference to the rpc_task, and thus ensuring that the
callback_ops->rpc_release() has been called synchronously.
This patch fixes a race which exists due to the fact that
rpciod calls rpc_complete_task() (in order to wake up the callers of
__rpc_wait_for_completion_task()) and then subsequently calls
rpc_put_task() without ensuring that these two steps are done atomically.
In order to avoid adding new spin locks, the patch uses the existing
waitqueue spin lock to order the rpc_task reference count releases between
the waiting process and rpciod.
The common case where nobody is waiting for completion is optimised for by
checking if the RPC_TASK_ASYNC flag is cleared and/or if the rpc_task
reference count is 1: in those cases we drop trying to grab the spin lock,
and immediately free up the rpc_task.
Those few processes that need to put the rpc_task from inside an
asynchronous context and that do not care about ordering are given a new
helper: rpc_put_task_async().
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'net')
-rw-r--r-- | net/sunrpc/sched.c | 75 |
1 files changed, 61 insertions, 14 deletions
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 243fc09b164e..59e599498e37 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
@@ -252,23 +252,37 @@ static void rpc_set_active(struct rpc_task *task) | |||
252 | 252 | ||
253 | /* | 253 | /* |
254 | * Mark an RPC call as having completed by clearing the 'active' bit | 254 | * Mark an RPC call as having completed by clearing the 'active' bit |
255 | * and then waking up all tasks that were sleeping. | ||
255 | */ | 256 | */ |
256 | static void rpc_mark_complete_task(struct rpc_task *task) | 257 | static int rpc_complete_task(struct rpc_task *task) |
257 | { | 258 | { |
258 | smp_mb__before_clear_bit(); | 259 | void *m = &task->tk_runstate; |
260 | wait_queue_head_t *wq = bit_waitqueue(m, RPC_TASK_ACTIVE); | ||
261 | struct wait_bit_key k = __WAIT_BIT_KEY_INITIALIZER(m, RPC_TASK_ACTIVE); | ||
262 | unsigned long flags; | ||
263 | int ret; | ||
264 | |||
265 | spin_lock_irqsave(&wq->lock, flags); | ||
259 | clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate); | 266 | clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate); |
260 | smp_mb__after_clear_bit(); | 267 | ret = atomic_dec_and_test(&task->tk_count); |
261 | wake_up_bit(&task->tk_runstate, RPC_TASK_ACTIVE); | 268 | if (waitqueue_active(wq)) |
269 | __wake_up_locked_key(wq, TASK_NORMAL, &k); | ||
270 | spin_unlock_irqrestore(&wq->lock, flags); | ||
271 | return ret; | ||
262 | } | 272 | } |
263 | 273 | ||
264 | /* | 274 | /* |
265 | * Allow callers to wait for completion of an RPC call | 275 | * Allow callers to wait for completion of an RPC call |
276 | * | ||
277 | * Note the use of out_of_line_wait_on_bit() rather than wait_on_bit() | ||
278 | * to enforce taking of the wq->lock and hence avoid races with | ||
279 | * rpc_complete_task(). | ||
266 | */ | 280 | */ |
267 | int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *)) | 281 | int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *)) |
268 | { | 282 | { |
269 | if (action == NULL) | 283 | if (action == NULL) |
270 | action = rpc_wait_bit_killable; | 284 | action = rpc_wait_bit_killable; |
271 | return wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE, | 285 | return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE, |
272 | action, TASK_KILLABLE); | 286 | action, TASK_KILLABLE); |
273 | } | 287 | } |
274 | EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task); | 288 | EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task); |
@@ -857,34 +871,67 @@ static void rpc_async_release(struct work_struct *work) | |||
857 | rpc_free_task(container_of(work, struct rpc_task, u.tk_work)); | 871 | rpc_free_task(container_of(work, struct rpc_task, u.tk_work)); |
858 | } | 872 | } |
859 | 873 | ||
860 | void rpc_put_task(struct rpc_task *task) | 874 | static void rpc_release_resources_task(struct rpc_task *task) |
861 | { | 875 | { |
862 | if (!atomic_dec_and_test(&task->tk_count)) | ||
863 | return; | ||
864 | /* Release resources */ | ||
865 | if (task->tk_rqstp) | 876 | if (task->tk_rqstp) |
866 | xprt_release(task); | 877 | xprt_release(task); |
867 | if (task->tk_msg.rpc_cred) | 878 | if (task->tk_msg.rpc_cred) |
868 | put_rpccred(task->tk_msg.rpc_cred); | 879 | put_rpccred(task->tk_msg.rpc_cred); |
869 | rpc_task_release_client(task); | 880 | rpc_task_release_client(task); |
870 | if (task->tk_workqueue != NULL) { | 881 | } |
882 | |||
883 | static void rpc_final_put_task(struct rpc_task *task, | ||
884 | struct workqueue_struct *q) | ||
885 | { | ||
886 | if (q != NULL) { | ||
871 | INIT_WORK(&task->u.tk_work, rpc_async_release); | 887 | INIT_WORK(&task->u.tk_work, rpc_async_release); |
872 | queue_work(task->tk_workqueue, &task->u.tk_work); | 888 | queue_work(q, &task->u.tk_work); |
873 | } else | 889 | } else |
874 | rpc_free_task(task); | 890 | rpc_free_task(task); |
875 | } | 891 | } |
892 | |||
893 | static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q) | ||
894 | { | ||
895 | if (atomic_dec_and_test(&task->tk_count)) { | ||
896 | rpc_release_resources_task(task); | ||
897 | rpc_final_put_task(task, q); | ||
898 | } | ||
899 | } | ||
900 | |||
901 | void rpc_put_task(struct rpc_task *task) | ||
902 | { | ||
903 | rpc_do_put_task(task, NULL); | ||
904 | } | ||
876 | EXPORT_SYMBOL_GPL(rpc_put_task); | 905 | EXPORT_SYMBOL_GPL(rpc_put_task); |
877 | 906 | ||
907 | void rpc_put_task_async(struct rpc_task *task) | ||
908 | { | ||
909 | rpc_do_put_task(task, task->tk_workqueue); | ||
910 | } | ||
911 | EXPORT_SYMBOL_GPL(rpc_put_task_async); | ||
912 | |||
878 | static void rpc_release_task(struct rpc_task *task) | 913 | static void rpc_release_task(struct rpc_task *task) |
879 | { | 914 | { |
880 | dprintk("RPC: %5u release task\n", task->tk_pid); | 915 | dprintk("RPC: %5u release task\n", task->tk_pid); |
881 | 916 | ||
882 | BUG_ON (RPC_IS_QUEUED(task)); | 917 | BUG_ON (RPC_IS_QUEUED(task)); |
883 | 918 | ||
884 | /* Wake up anyone who is waiting for task completion */ | 919 | rpc_release_resources_task(task); |
885 | rpc_mark_complete_task(task); | ||
886 | 920 | ||
887 | rpc_put_task(task); | 921 | /* |
922 | * Note: at this point we have been removed from rpc_clnt->cl_tasks, | ||
923 | * so it should be safe to use task->tk_count as a test for whether | ||
924 | * or not any other processes still hold references to our rpc_task. | ||
925 | */ | ||
926 | if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) { | ||
927 | /* Wake up anyone who may be waiting for task completion */ | ||
928 | if (!rpc_complete_task(task)) | ||
929 | return; | ||
930 | } else { | ||
931 | if (!atomic_dec_and_test(&task->tk_count)) | ||
932 | return; | ||
933 | } | ||
934 | rpc_final_put_task(task, task->tk_workqueue); | ||
888 | } | 935 | } |
889 | 936 | ||
890 | int rpciod_up(void) | 937 | int rpciod_up(void) |