aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/sunrpc/sched.h5
-rw-r--r--net/sunrpc/sched.c82
2 files changed, 0 insertions, 87 deletions
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index 82a91bb22362..f399c138f79d 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -127,7 +127,6 @@ struct rpc_call_ops {
127 */ 127 */
128#define RPC_TASK_ASYNC 0x0001 /* is an async task */ 128#define RPC_TASK_ASYNC 0x0001 /* is an async task */
129#define RPC_TASK_SWAPPER 0x0002 /* is swapping in/out */ 129#define RPC_TASK_SWAPPER 0x0002 /* is swapping in/out */
130#define RPC_TASK_CHILD 0x0008 /* is child of other task */
131#define RPC_CALL_MAJORSEEN 0x0020 /* major timeout seen */ 130#define RPC_CALL_MAJORSEEN 0x0020 /* major timeout seen */
132#define RPC_TASK_ROOTCREDS 0x0040 /* force root creds */ 131#define RPC_TASK_ROOTCREDS 0x0040 /* force root creds */
133#define RPC_TASK_DYNAMIC 0x0080 /* task was kmalloc'ed */ 132#define RPC_TASK_DYNAMIC 0x0080 /* task was kmalloc'ed */
@@ -136,7 +135,6 @@ struct rpc_call_ops {
136#define RPC_TASK_NOINTR 0x0400 /* uninterruptible task */ 135#define RPC_TASK_NOINTR 0x0400 /* uninterruptible task */
137 136
138#define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC) 137#define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC)
139#define RPC_IS_CHILD(t) ((t)->tk_flags & RPC_TASK_CHILD)
140#define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER) 138#define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER)
141#define RPC_DO_ROOTOVERRIDE(t) ((t)->tk_flags & RPC_TASK_ROOTCREDS) 139#define RPC_DO_ROOTOVERRIDE(t) ((t)->tk_flags & RPC_TASK_ROOTCREDS)
142#define RPC_ASSASSINATED(t) ((t)->tk_flags & RPC_TASK_KILLED) 140#define RPC_ASSASSINATED(t) ((t)->tk_flags & RPC_TASK_KILLED)
@@ -253,7 +251,6 @@ struct rpc_task *rpc_new_task(struct rpc_clnt *, int flags,
253 const struct rpc_call_ops *ops, void *data); 251 const struct rpc_call_ops *ops, void *data);
254struct rpc_task *rpc_run_task(struct rpc_clnt *clnt, int flags, 252struct rpc_task *rpc_run_task(struct rpc_clnt *clnt, int flags,
255 const struct rpc_call_ops *ops, void *data); 253 const struct rpc_call_ops *ops, void *data);
256struct rpc_task *rpc_new_child(struct rpc_clnt *, struct rpc_task *parent);
257void rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt, 254void rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt,
258 int flags, const struct rpc_call_ops *ops, 255 int flags, const struct rpc_call_ops *ops,
259 void *data); 256 void *data);
@@ -261,8 +258,6 @@ void rpc_release_task(struct rpc_task *);
261void rpc_exit_task(struct rpc_task *); 258void rpc_exit_task(struct rpc_task *);
262void rpc_killall_tasks(struct rpc_clnt *); 259void rpc_killall_tasks(struct rpc_clnt *);
263int rpc_execute(struct rpc_task *); 260int rpc_execute(struct rpc_task *);
264void rpc_run_child(struct rpc_task *parent, struct rpc_task *child,
265 rpc_action action);
266void rpc_init_priority_wait_queue(struct rpc_wait_queue *, const char *); 261void rpc_init_priority_wait_queue(struct rpc_wait_queue *, const char *);
267void rpc_init_wait_queue(struct rpc_wait_queue *, const char *); 262void rpc_init_wait_queue(struct rpc_wait_queue *, const char *);
268void rpc_sleep_on(struct rpc_wait_queue *, struct rpc_task *, 263void rpc_sleep_on(struct rpc_wait_queue *, struct rpc_task *,
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 5c3eee768504..015ffe423a2f 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -45,12 +45,6 @@ static void rpciod_killall(void);
45static void rpc_async_schedule(void *); 45static void rpc_async_schedule(void *);
46 46
47/* 47/*
48 * RPC tasks that create another task (e.g. for contacting the portmapper)
49 * will wait on this queue for their child's completion
50 */
51static RPC_WAITQ(childq, "childq");
52
53/*
54 * RPC tasks sit here while waiting for conditions to improve. 48 * RPC tasks sit here while waiting for conditions to improve.
55 */ 49 */
56static RPC_WAITQ(delay_queue, "delayq"); 50static RPC_WAITQ(delay_queue, "delayq");
@@ -324,16 +318,6 @@ static void rpc_make_runnable(struct rpc_task *task)
324} 318}
325 319
326/* 320/*
327 * Place a newly initialized task on the workqueue.
328 */
329static inline void
330rpc_schedule_run(struct rpc_task *task)
331{
332 rpc_set_active(task);
333 rpc_make_runnable(task);
334}
335
336/*
337 * Prepare for sleeping on a wait queue. 321 * Prepare for sleeping on a wait queue.
338 * By always appending tasks to the list we ensure FIFO behavior. 322 * By always appending tasks to the list we ensure FIFO behavior.
339 * NB: An RPC task will only receive interrupt-driven events as long 323 * NB: An RPC task will only receive interrupt-driven events as long
@@ -933,72 +917,6 @@ struct rpc_task *rpc_run_task(struct rpc_clnt *clnt, int flags,
933} 917}
934EXPORT_SYMBOL(rpc_run_task); 918EXPORT_SYMBOL(rpc_run_task);
935 919
936/**
937 * rpc_find_parent - find the parent of a child task.
938 * @child: child task
939 * @parent: parent task
940 *
941 * Checks that the parent task is still sleeping on the
942 * queue 'childq'. If so returns a pointer to the parent.
943 * Upon failure returns NULL.
944 *
945 * Caller must hold childq.lock
946 */
947static inline struct rpc_task *rpc_find_parent(struct rpc_task *child, struct rpc_task *parent)
948{
949 struct rpc_task *task;
950 struct list_head *le;
951
952 task_for_each(task, le, &childq.tasks[0])
953 if (task == parent)
954 return parent;
955
956 return NULL;
957}
958
959static void rpc_child_exit(struct rpc_task *child, void *calldata)
960{
961 struct rpc_task *parent;
962
963 spin_lock_bh(&childq.lock);
964 if ((parent = rpc_find_parent(child, calldata)) != NULL) {
965 parent->tk_status = child->tk_status;
966 __rpc_wake_up_task(parent);
967 }
968 spin_unlock_bh(&childq.lock);
969}
970
971static const struct rpc_call_ops rpc_child_ops = {
972 .rpc_call_done = rpc_child_exit,
973};
974
975/*
976 * Note: rpc_new_task releases the client after a failure.
977 */
978struct rpc_task *
979rpc_new_child(struct rpc_clnt *clnt, struct rpc_task *parent)
980{
981 struct rpc_task *task;
982
983 task = rpc_new_task(clnt, RPC_TASK_ASYNC | RPC_TASK_CHILD, &rpc_child_ops, parent);
984 if (!task)
985 goto fail;
986 return task;
987
988fail:
989 parent->tk_status = -ENOMEM;
990 return NULL;
991}
992
993void rpc_run_child(struct rpc_task *task, struct rpc_task *child, rpc_action func)
994{
995 spin_lock_bh(&childq.lock);
996 /* N.B. Is it possible for the child to have already finished? */
997 __rpc_sleep_on(&childq, task, func, NULL);
998 rpc_schedule_run(child);
999 spin_unlock_bh(&childq.lock);
1000}
1001
1002/* 920/*
1003 * Kill all tasks for the given client. 921 * Kill all tasks for the given client.
1004 * XXX: kill their descendants as well? 922 * XXX: kill their descendants as well?