diff options
author | Chuck Lever <chuck.lever@oracle.com> | 2006-08-22 20:06:16 -0400 |
---|---|---|
committer | Trond Myklebust <Trond.Myklebust@netapp.com> | 2006-09-22 23:24:40 -0400 |
commit | 5b1eacbcd78930d976eb50a93f1779d311b553d1 (patch) | |
tree | 28e573a581f185a0f00e6dbd80435039e31d8c24 /net | |
parent | c4a5692fb83f23008c720fe84454d5603e80b103 (diff) |
SUNRPC: Support for RPC child tasks no longer needed
The previous patches removed the last user of RPC child tasks, so we can
remove support for child tasks from net/sunrpc/sched.c now.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'net')
-rw-r--r-- | net/sunrpc/sched.c | 82 |
1 files changed, 0 insertions, 82 deletions
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 5c3eee768504..015ffe423a2f 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
@@ -45,12 +45,6 @@ static void rpciod_killall(void); | |||
45 | static void rpc_async_schedule(void *); | 45 | static void rpc_async_schedule(void *); |
46 | 46 | ||
47 | /* | 47 | /* |
48 | * RPC tasks that create another task (e.g. for contacting the portmapper) | ||
49 | * will wait on this queue for their child's completion | ||
50 | */ | ||
51 | static RPC_WAITQ(childq, "childq"); | ||
52 | |||
53 | /* | ||
54 | * RPC tasks sit here while waiting for conditions to improve. | 48 | * RPC tasks sit here while waiting for conditions to improve. |
55 | */ | 49 | */ |
56 | static RPC_WAITQ(delay_queue, "delayq"); | 50 | static RPC_WAITQ(delay_queue, "delayq"); |
@@ -324,16 +318,6 @@ static void rpc_make_runnable(struct rpc_task *task) | |||
324 | } | 318 | } |
325 | 319 | ||
326 | /* | 320 | /* |
327 | * Place a newly initialized task on the workqueue. | ||
328 | */ | ||
329 | static inline void | ||
330 | rpc_schedule_run(struct rpc_task *task) | ||
331 | { | ||
332 | rpc_set_active(task); | ||
333 | rpc_make_runnable(task); | ||
334 | } | ||
335 | |||
336 | /* | ||
337 | * Prepare for sleeping on a wait queue. | 321 | * Prepare for sleeping on a wait queue. |
338 | * By always appending tasks to the list we ensure FIFO behavior. | 322 | * By always appending tasks to the list we ensure FIFO behavior. |
339 | * NB: An RPC task will only receive interrupt-driven events as long | 323 | * NB: An RPC task will only receive interrupt-driven events as long |
@@ -933,72 +917,6 @@ struct rpc_task *rpc_run_task(struct rpc_clnt *clnt, int flags, | |||
933 | } | 917 | } |
934 | EXPORT_SYMBOL(rpc_run_task); | 918 | EXPORT_SYMBOL(rpc_run_task); |
935 | 919 | ||
936 | /** | ||
937 | * rpc_find_parent - find the parent of a child task. | ||
938 | * @child: child task | ||
939 | * @parent: parent task | ||
940 | * | ||
941 | * Checks that the parent task is still sleeping on the | ||
942 | * queue 'childq'. If so returns a pointer to the parent. | ||
943 | * Upon failure returns NULL. | ||
944 | * | ||
945 | * Caller must hold childq.lock | ||
946 | */ | ||
947 | static inline struct rpc_task *rpc_find_parent(struct rpc_task *child, struct rpc_task *parent) | ||
948 | { | ||
949 | struct rpc_task *task; | ||
950 | struct list_head *le; | ||
951 | |||
952 | task_for_each(task, le, &childq.tasks[0]) | ||
953 | if (task == parent) | ||
954 | return parent; | ||
955 | |||
956 | return NULL; | ||
957 | } | ||
958 | |||
959 | static void rpc_child_exit(struct rpc_task *child, void *calldata) | ||
960 | { | ||
961 | struct rpc_task *parent; | ||
962 | |||
963 | spin_lock_bh(&childq.lock); | ||
964 | if ((parent = rpc_find_parent(child, calldata)) != NULL) { | ||
965 | parent->tk_status = child->tk_status; | ||
966 | __rpc_wake_up_task(parent); | ||
967 | } | ||
968 | spin_unlock_bh(&childq.lock); | ||
969 | } | ||
970 | |||
971 | static const struct rpc_call_ops rpc_child_ops = { | ||
972 | .rpc_call_done = rpc_child_exit, | ||
973 | }; | ||
974 | |||
975 | /* | ||
976 | * Note: rpc_new_task releases the client after a failure. | ||
977 | */ | ||
978 | struct rpc_task * | ||
979 | rpc_new_child(struct rpc_clnt *clnt, struct rpc_task *parent) | ||
980 | { | ||
981 | struct rpc_task *task; | ||
982 | |||
983 | task = rpc_new_task(clnt, RPC_TASK_ASYNC | RPC_TASK_CHILD, &rpc_child_ops, parent); | ||
984 | if (!task) | ||
985 | goto fail; | ||
986 | return task; | ||
987 | |||
988 | fail: | ||
989 | parent->tk_status = -ENOMEM; | ||
990 | return NULL; | ||
991 | } | ||
992 | |||
993 | void rpc_run_child(struct rpc_task *task, struct rpc_task *child, rpc_action func) | ||
994 | { | ||
995 | spin_lock_bh(&childq.lock); | ||
996 | /* N.B. Is it possible for the child to have already finished? */ | ||
997 | __rpc_sleep_on(&childq, task, func, NULL); | ||
998 | rpc_schedule_run(child); | ||
999 | spin_unlock_bh(&childq.lock); | ||
1000 | } | ||
1001 | |||
1002 | /* | 920 | /* |
1003 | * Kill all tasks for the given client. | 921 | * Kill all tasks for the given client. |
1004 | * XXX: kill their descendants as well? | 922 | * XXX: kill their descendants as well? |