aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/sunrpc/sched.c')
-rw-r--r--net/sunrpc/sched.c77
1 files changed, 4 insertions, 73 deletions
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 37452762af70..a42296db2ecd 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -246,17 +246,8 @@ static inline void rpc_task_set_debuginfo(struct rpc_task *task)
246 246
247static void rpc_set_active(struct rpc_task *task) 247static void rpc_set_active(struct rpc_task *task)
248{ 248{
249 struct rpc_clnt *clnt;
250 if (test_and_set_bit(RPC_TASK_ACTIVE, &task->tk_runstate) != 0)
251 return;
252 rpc_task_set_debuginfo(task); 249 rpc_task_set_debuginfo(task);
253 /* Add to global list of all tasks */ 250 set_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
254 clnt = task->tk_client;
255 if (clnt != NULL) {
256 spin_lock(&clnt->cl_lock);
257 list_add_tail(&task->tk_task, &clnt->cl_tasks);
258 spin_unlock(&clnt->cl_lock);
259 }
260} 251}
261 252
262/* 253/*
@@ -319,11 +310,6 @@ static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
319 dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n", 310 dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n",
320 task->tk_pid, rpc_qname(q), jiffies); 311 task->tk_pid, rpc_qname(q), jiffies);
321 312
322 if (!RPC_IS_ASYNC(task) && !RPC_IS_ACTIVATED(task)) {
323 printk(KERN_ERR "RPC: Inactive synchronous task put to sleep!\n");
324 return;
325 }
326
327 __rpc_add_wait_queue(q, task); 313 __rpc_add_wait_queue(q, task);
328 314
329 BUG_ON(task->tk_callback != NULL); 315 BUG_ON(task->tk_callback != NULL);
@@ -334,8 +320,8 @@ static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
334void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, 320void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
335 rpc_action action) 321 rpc_action action)
336{ 322{
337 /* Mark the task as being activated if so needed */ 323 /* We shouldn't ever put an inactive task to sleep */
338 rpc_set_active(task); 324 BUG_ON(!RPC_IS_ACTIVATED(task));
339 325
340 /* 326 /*
341 * Protect the queue operations. 327 * Protect the queue operations.
@@ -807,26 +793,9 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta
807 /* Initialize workqueue for async tasks */ 793 /* Initialize workqueue for async tasks */
808 task->tk_workqueue = task_setup_data->workqueue; 794 task->tk_workqueue = task_setup_data->workqueue;
809 795
810 task->tk_client = task_setup_data->rpc_client;
811 if (task->tk_client != NULL) {
812 kref_get(&task->tk_client->cl_kref);
813 if (task->tk_client->cl_softrtry)
814 task->tk_flags |= RPC_TASK_SOFT;
815 }
816
817 if (task->tk_ops->rpc_call_prepare != NULL) 796 if (task->tk_ops->rpc_call_prepare != NULL)
818 task->tk_action = rpc_prepare_task; 797 task->tk_action = rpc_prepare_task;
819 798
820 if (task_setup_data->rpc_message != NULL) {
821 task->tk_msg.rpc_proc = task_setup_data->rpc_message->rpc_proc;
822 task->tk_msg.rpc_argp = task_setup_data->rpc_message->rpc_argp;
823 task->tk_msg.rpc_resp = task_setup_data->rpc_message->rpc_resp;
824 /* Bind the user cred */
825 rpcauth_bindcred(task, task_setup_data->rpc_message->rpc_cred, task_setup_data->flags);
826 if (task->tk_action == NULL)
827 rpc_call_start(task);
828 }
829
830 /* starting timestamp */ 799 /* starting timestamp */
831 task->tk_start = ktime_get(); 800 task->tk_start = ktime_get();
832 801
@@ -896,10 +865,7 @@ void rpc_put_task(struct rpc_task *task)
896 xprt_release(task); 865 xprt_release(task);
897 if (task->tk_msg.rpc_cred) 866 if (task->tk_msg.rpc_cred)
898 rpcauth_unbindcred(task); 867 rpcauth_unbindcred(task);
899 if (task->tk_client) { 868 rpc_task_release_client(task);
900 rpc_release_client(task->tk_client);
901 task->tk_client = NULL;
902 }
903 if (task->tk_workqueue != NULL) { 869 if (task->tk_workqueue != NULL) {
904 INIT_WORK(&task->u.tk_work, rpc_async_release); 870 INIT_WORK(&task->u.tk_work, rpc_async_release);
905 queue_work(task->tk_workqueue, &task->u.tk_work); 871 queue_work(task->tk_workqueue, &task->u.tk_work);
@@ -912,13 +878,6 @@ static void rpc_release_task(struct rpc_task *task)
912{ 878{
913 dprintk("RPC: %5u release task\n", task->tk_pid); 879 dprintk("RPC: %5u release task\n", task->tk_pid);
914 880
915 if (!list_empty(&task->tk_task)) {
916 struct rpc_clnt *clnt = task->tk_client;
917 /* Remove from client task list */
918 spin_lock(&clnt->cl_lock);
919 list_del(&task->tk_task);
920 spin_unlock(&clnt->cl_lock);
921 }
922 BUG_ON (RPC_IS_QUEUED(task)); 881 BUG_ON (RPC_IS_QUEUED(task));
923 882
924 /* Wake up anyone who is waiting for task completion */ 883 /* Wake up anyone who is waiting for task completion */
@@ -927,34 +886,6 @@ static void rpc_release_task(struct rpc_task *task)
927 rpc_put_task(task); 886 rpc_put_task(task);
928} 887}
929 888
930/*
931 * Kill all tasks for the given client.
932 * XXX: kill their descendants as well?
933 */
934void rpc_killall_tasks(struct rpc_clnt *clnt)
935{
936 struct rpc_task *rovr;
937
938
939 if (list_empty(&clnt->cl_tasks))
940 return;
941 dprintk("RPC: killing all tasks for client %p\n", clnt);
942 /*
943 * Spin lock all_tasks to prevent changes...
944 */
945 spin_lock(&clnt->cl_lock);
946 list_for_each_entry(rovr, &clnt->cl_tasks, tk_task) {
947 if (! RPC_IS_ACTIVATED(rovr))
948 continue;
949 if (!(rovr->tk_flags & RPC_TASK_KILLED)) {
950 rovr->tk_flags |= RPC_TASK_KILLED;
951 rpc_exit(rovr, -EIO);
952 }
953 }
954 spin_unlock(&clnt->cl_lock);
955}
956EXPORT_SYMBOL_GPL(rpc_killall_tasks);
957
958int rpciod_up(void) 889int rpciod_up(void)
959{ 890{
960 return try_module_get(THIS_MODULE) ? 0 : -EINVAL; 891 return try_module_get(THIS_MODULE) ? 0 : -EINVAL;