aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2008-02-19 20:04:21 -0500
committerTrond Myklebust <Trond.Myklebust@netapp.com>2008-02-26 00:40:34 -0500
commit32bfb5c0f495dd88ef6bac4b76885d0820563739 (patch)
tree01adff1612a7aaad148a322b79bc02224e8ca735
parent383ba71938519959be8e0b598ec658f0c211ff45 (diff)
SUNRPC: Allow the rpc_release() callback to be run on another workqueue
A lot of the work done by the rpc_release() callback is inappropriate for rpciod as it will often involve things like starting a new rpc call in order to clean up state after an interrupted NFSv4 open() call, or calls to mntput(), etc. This patch allows the caller of rpc_run_task() to specify that the rpc_release callback should run on a different workqueue than the default rpciod_workqueue. Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
-rw-r--r--include/linux/sunrpc/sched.h1
-rw-r--r--net/sunrpc/sched.c29
2 files changed, 22 insertions, 8 deletions
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index f689f02e6793..fefb0ab52189 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -123,6 +123,7 @@ struct rpc_task_setup {
123 const struct rpc_message *rpc_message; 123 const struct rpc_message *rpc_message;
124 const struct rpc_call_ops *callback_ops; 124 const struct rpc_call_ops *callback_ops;
125 void *callback_data; 125 void *callback_data;
126 struct workqueue_struct *workqueue;
126 unsigned short flags; 127 unsigned short flags;
127 signed char priority; 128 signed char priority;
128}; 129};
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 4c669121e607..3e0b22382a3b 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -326,7 +326,7 @@ static void rpc_make_runnable(struct rpc_task *task)
326 int status; 326 int status;
327 327
328 INIT_WORK(&task->u.tk_work, rpc_async_schedule); 328 INIT_WORK(&task->u.tk_work, rpc_async_schedule);
329 status = queue_work(task->tk_workqueue, &task->u.tk_work); 329 status = queue_work(rpciod_workqueue, &task->u.tk_work);
330 if (status < 0) { 330 if (status < 0) {
331 printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status); 331 printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status);
332 task->tk_status = status; 332 task->tk_status = status;
@@ -832,7 +832,7 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta
832 task->tk_owner = current->tgid; 832 task->tk_owner = current->tgid;
833 833
834 /* Initialize workqueue for async tasks */ 834 /* Initialize workqueue for async tasks */
835 task->tk_workqueue = rpciod_workqueue; 835 task->tk_workqueue = task_setup_data->workqueue;
836 836
837 task->tk_client = task_setup_data->rpc_client; 837 task->tk_client = task_setup_data->rpc_client;
838 if (task->tk_client != NULL) { 838 if (task->tk_client != NULL) {
@@ -868,7 +868,7 @@ rpc_alloc_task(void)
868 return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS); 868 return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS);
869} 869}
870 870
871static void rpc_free_task(struct rcu_head *rcu) 871static void rpc_free_task_rcu(struct rcu_head *rcu)
872{ 872{
873 struct rpc_task *task = container_of(rcu, struct rpc_task, u.tk_rcu); 873 struct rpc_task *task = container_of(rcu, struct rpc_task, u.tk_rcu);
874 dprintk("RPC: %5u freeing task\n", task->tk_pid); 874 dprintk("RPC: %5u freeing task\n", task->tk_pid);
@@ -898,12 +898,23 @@ out:
898 return task; 898 return task;
899} 899}
900 900
901 901static void rpc_free_task(struct rpc_task *task)
902void rpc_put_task(struct rpc_task *task)
903{ 902{
904 const struct rpc_call_ops *tk_ops = task->tk_ops; 903 const struct rpc_call_ops *tk_ops = task->tk_ops;
905 void *calldata = task->tk_calldata; 904 void *calldata = task->tk_calldata;
906 905
906 if (task->tk_flags & RPC_TASK_DYNAMIC)
907 call_rcu_bh(&task->u.tk_rcu, rpc_free_task_rcu);
908 rpc_release_calldata(tk_ops, calldata);
909}
910
911static void rpc_async_release(struct work_struct *work)
912{
913 rpc_free_task(container_of(work, struct rpc_task, u.tk_work));
914}
915
916void rpc_put_task(struct rpc_task *task)
917{
907 if (!atomic_dec_and_test(&task->tk_count)) 918 if (!atomic_dec_and_test(&task->tk_count))
908 return; 919 return;
909 /* Release resources */ 920 /* Release resources */
@@ -915,9 +926,11 @@ void rpc_put_task(struct rpc_task *task)
915 rpc_release_client(task->tk_client); 926 rpc_release_client(task->tk_client);
916 task->tk_client = NULL; 927 task->tk_client = NULL;
917 } 928 }
918 if (task->tk_flags & RPC_TASK_DYNAMIC) 929 if (task->tk_workqueue != NULL) {
919 call_rcu_bh(&task->u.tk_rcu, rpc_free_task); 930 INIT_WORK(&task->u.tk_work, rpc_async_release);
920 rpc_release_calldata(tk_ops, calldata); 931 queue_work(task->tk_workqueue, &task->u.tk_work);
932 } else
933 rpc_free_task(task);
921} 934}
922EXPORT_SYMBOL_GPL(rpc_put_task); 935EXPORT_SYMBOL_GPL(rpc_put_task);
923 936