diff options
Diffstat (limited to 'net/sunrpc/sched.c')
-rw-r--r-- | net/sunrpc/sched.c | 99 |
1 files changed, 6 insertions, 93 deletions
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 5c3eee768504..6390461a9756 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
@@ -21,7 +21,6 @@ | |||
21 | #include <linux/mutex.h> | 21 | #include <linux/mutex.h> |
22 | 22 | ||
23 | #include <linux/sunrpc/clnt.h> | 23 | #include <linux/sunrpc/clnt.h> |
24 | #include <linux/sunrpc/xprt.h> | ||
25 | 24 | ||
26 | #ifdef RPC_DEBUG | 25 | #ifdef RPC_DEBUG |
27 | #define RPCDBG_FACILITY RPCDBG_SCHED | 26 | #define RPCDBG_FACILITY RPCDBG_SCHED |
@@ -45,12 +44,6 @@ static void rpciod_killall(void); | |||
45 | static void rpc_async_schedule(void *); | 44 | static void rpc_async_schedule(void *); |
46 | 45 | ||
47 | /* | 46 | /* |
48 | * RPC tasks that create another task (e.g. for contacting the portmapper) | ||
49 | * will wait on this queue for their child's completion | ||
50 | */ | ||
51 | static RPC_WAITQ(childq, "childq"); | ||
52 | |||
53 | /* | ||
54 | * RPC tasks sit here while waiting for conditions to improve. | 47 | * RPC tasks sit here while waiting for conditions to improve. |
55 | */ | 48 | */ |
56 | static RPC_WAITQ(delay_queue, "delayq"); | 49 | static RPC_WAITQ(delay_queue, "delayq"); |
@@ -324,16 +317,6 @@ static void rpc_make_runnable(struct rpc_task *task) | |||
324 | } | 317 | } |
325 | 318 | ||
326 | /* | 319 | /* |
327 | * Place a newly initialized task on the workqueue. | ||
328 | */ | ||
329 | static inline void | ||
330 | rpc_schedule_run(struct rpc_task *task) | ||
331 | { | ||
332 | rpc_set_active(task); | ||
333 | rpc_make_runnable(task); | ||
334 | } | ||
335 | |||
336 | /* | ||
337 | * Prepare for sleeping on a wait queue. | 320 | * Prepare for sleeping on a wait queue. |
338 | * By always appending tasks to the list we ensure FIFO behavior. | 321 | * By always appending tasks to the list we ensure FIFO behavior. |
339 | * NB: An RPC task will only receive interrupt-driven events as long | 322 | * NB: An RPC task will only receive interrupt-driven events as long |
@@ -559,24 +542,20 @@ void rpc_wake_up_status(struct rpc_wait_queue *queue, int status) | |||
559 | spin_unlock_bh(&queue->lock); | 542 | spin_unlock_bh(&queue->lock); |
560 | } | 543 | } |
561 | 544 | ||
545 | static void __rpc_atrun(struct rpc_task *task) | ||
546 | { | ||
547 | rpc_wake_up_task(task); | ||
548 | } | ||
549 | |||
562 | /* | 550 | /* |
563 | * Run a task at a later time | 551 | * Run a task at a later time |
564 | */ | 552 | */ |
565 | static void __rpc_atrun(struct rpc_task *); | 553 | void rpc_delay(struct rpc_task *task, unsigned long delay) |
566 | void | ||
567 | rpc_delay(struct rpc_task *task, unsigned long delay) | ||
568 | { | 554 | { |
569 | task->tk_timeout = delay; | 555 | task->tk_timeout = delay; |
570 | rpc_sleep_on(&delay_queue, task, NULL, __rpc_atrun); | 556 | rpc_sleep_on(&delay_queue, task, NULL, __rpc_atrun); |
571 | } | 557 | } |
572 | 558 | ||
573 | static void | ||
574 | __rpc_atrun(struct rpc_task *task) | ||
575 | { | ||
576 | task->tk_status = 0; | ||
577 | rpc_wake_up_task(task); | ||
578 | } | ||
579 | |||
580 | /* | 559 | /* |
581 | * Helper to call task->tk_ops->rpc_call_prepare | 560 | * Helper to call task->tk_ops->rpc_call_prepare |
582 | */ | 561 | */ |
@@ -933,72 +912,6 @@ struct rpc_task *rpc_run_task(struct rpc_clnt *clnt, int flags, | |||
933 | } | 912 | } |
934 | EXPORT_SYMBOL(rpc_run_task); | 913 | EXPORT_SYMBOL(rpc_run_task); |
935 | 914 | ||
936 | /** | ||
937 | * rpc_find_parent - find the parent of a child task. | ||
938 | * @child: child task | ||
939 | * @parent: parent task | ||
940 | * | ||
941 | * Checks that the parent task is still sleeping on the | ||
942 | * queue 'childq'. If so returns a pointer to the parent. | ||
943 | * Upon failure returns NULL. | ||
944 | * | ||
945 | * Caller must hold childq.lock | ||
946 | */ | ||
947 | static inline struct rpc_task *rpc_find_parent(struct rpc_task *child, struct rpc_task *parent) | ||
948 | { | ||
949 | struct rpc_task *task; | ||
950 | struct list_head *le; | ||
951 | |||
952 | task_for_each(task, le, &childq.tasks[0]) | ||
953 | if (task == parent) | ||
954 | return parent; | ||
955 | |||
956 | return NULL; | ||
957 | } | ||
958 | |||
959 | static void rpc_child_exit(struct rpc_task *child, void *calldata) | ||
960 | { | ||
961 | struct rpc_task *parent; | ||
962 | |||
963 | spin_lock_bh(&childq.lock); | ||
964 | if ((parent = rpc_find_parent(child, calldata)) != NULL) { | ||
965 | parent->tk_status = child->tk_status; | ||
966 | __rpc_wake_up_task(parent); | ||
967 | } | ||
968 | spin_unlock_bh(&childq.lock); | ||
969 | } | ||
970 | |||
971 | static const struct rpc_call_ops rpc_child_ops = { | ||
972 | .rpc_call_done = rpc_child_exit, | ||
973 | }; | ||
974 | |||
975 | /* | ||
976 | * Note: rpc_new_task releases the client after a failure. | ||
977 | */ | ||
978 | struct rpc_task * | ||
979 | rpc_new_child(struct rpc_clnt *clnt, struct rpc_task *parent) | ||
980 | { | ||
981 | struct rpc_task *task; | ||
982 | |||
983 | task = rpc_new_task(clnt, RPC_TASK_ASYNC | RPC_TASK_CHILD, &rpc_child_ops, parent); | ||
984 | if (!task) | ||
985 | goto fail; | ||
986 | return task; | ||
987 | |||
988 | fail: | ||
989 | parent->tk_status = -ENOMEM; | ||
990 | return NULL; | ||
991 | } | ||
992 | |||
993 | void rpc_run_child(struct rpc_task *task, struct rpc_task *child, rpc_action func) | ||
994 | { | ||
995 | spin_lock_bh(&childq.lock); | ||
996 | /* N.B. Is it possible for the child to have already finished? */ | ||
997 | __rpc_sleep_on(&childq, task, func, NULL); | ||
998 | rpc_schedule_run(child); | ||
999 | spin_unlock_bh(&childq.lock); | ||
1000 | } | ||
1001 | |||
1002 | /* | 915 | /* |
1003 | * Kill all tasks for the given client. | 916 | * Kill all tasks for the given client. |
1004 | * XXX: kill their descendants as well? | 917 | * XXX: kill their descendants as well? |