aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/sunrpc/sched.c')
-rw-r--r--net/sunrpc/sched.c78
1 files changed, 40 insertions, 38 deletions
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 54a6b92525ea..6d87320074b1 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -74,7 +74,7 @@ static DEFINE_SPINLOCK(rpc_sched_lock);
74static inline void 74static inline void
75__rpc_disable_timer(struct rpc_task *task) 75__rpc_disable_timer(struct rpc_task *task)
76{ 76{
77 dprintk("RPC: %4d disabling timer\n", task->tk_pid); 77 dprintk("RPC: %5u disabling timer\n", task->tk_pid);
78 task->tk_timeout_fn = NULL; 78 task->tk_timeout_fn = NULL;
79 task->tk_timeout = 0; 79 task->tk_timeout = 0;
80} 80}
@@ -93,7 +93,7 @@ static void rpc_run_timer(struct rpc_task *task)
93 callback = task->tk_timeout_fn; 93 callback = task->tk_timeout_fn;
94 task->tk_timeout_fn = NULL; 94 task->tk_timeout_fn = NULL;
95 if (callback && RPC_IS_QUEUED(task)) { 95 if (callback && RPC_IS_QUEUED(task)) {
96 dprintk("RPC: %4d running timer\n", task->tk_pid); 96 dprintk("RPC: %5u running timer\n", task->tk_pid);
97 callback(task); 97 callback(task);
98 } 98 }
99 smp_mb__before_clear_bit(); 99 smp_mb__before_clear_bit();
@@ -110,7 +110,7 @@ __rpc_add_timer(struct rpc_task *task, rpc_action timer)
110 if (!task->tk_timeout) 110 if (!task->tk_timeout)
111 return; 111 return;
112 112
113 dprintk("RPC: %4d setting alarm for %lu ms\n", 113 dprintk("RPC: %5u setting alarm for %lu ms\n",
114 task->tk_pid, task->tk_timeout * 1000 / HZ); 114 task->tk_pid, task->tk_timeout * 1000 / HZ);
115 115
116 if (timer) 116 if (timer)
@@ -132,7 +132,7 @@ rpc_delete_timer(struct rpc_task *task)
132 return; 132 return;
133 if (test_and_clear_bit(RPC_TASK_HAS_TIMER, &task->tk_runstate)) { 133 if (test_and_clear_bit(RPC_TASK_HAS_TIMER, &task->tk_runstate)) {
134 del_singleshot_timer_sync(&task->tk_timer); 134 del_singleshot_timer_sync(&task->tk_timer);
135 dprintk("RPC: %4d deleting timer\n", task->tk_pid); 135 dprintk("RPC: %5u deleting timer\n", task->tk_pid);
136 } 136 }
137} 137}
138 138
@@ -179,8 +179,8 @@ static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *
179 queue->qlen++; 179 queue->qlen++;
180 rpc_set_queued(task); 180 rpc_set_queued(task);
181 181
182 dprintk("RPC: %4d added to queue %p \"%s\"\n", 182 dprintk("RPC: %5u added to queue %p \"%s\"\n",
183 task->tk_pid, queue, rpc_qname(queue)); 183 task->tk_pid, queue, rpc_qname(queue));
184} 184}
185 185
186/* 186/*
@@ -212,8 +212,8 @@ static void __rpc_remove_wait_queue(struct rpc_task *task)
212 else 212 else
213 list_del(&task->u.tk_wait.list); 213 list_del(&task->u.tk_wait.list);
214 queue->qlen--; 214 queue->qlen--;
215 dprintk("RPC: %4d removed from queue %p \"%s\"\n", 215 dprintk("RPC: %5u removed from queue %p \"%s\"\n",
216 task->tk_pid, queue, rpc_qname(queue)); 216 task->tk_pid, queue, rpc_qname(queue));
217} 217}
218 218
219static inline void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority) 219static inline void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
@@ -344,8 +344,8 @@ static void rpc_make_runnable(struct rpc_task *task)
344static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, 344static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
345 rpc_action action, rpc_action timer) 345 rpc_action action, rpc_action timer)
346{ 346{
347 dprintk("RPC: %4d sleep_on(queue \"%s\" time %ld)\n", task->tk_pid, 347 dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n",
348 rpc_qname(q), jiffies); 348 task->tk_pid, rpc_qname(q), jiffies);
349 349
350 if (!RPC_IS_ASYNC(task) && !RPC_IS_ACTIVATED(task)) { 350 if (!RPC_IS_ASYNC(task) && !RPC_IS_ACTIVATED(task)) {
351 printk(KERN_ERR "RPC: Inactive synchronous task put to sleep!\n"); 351 printk(KERN_ERR "RPC: Inactive synchronous task put to sleep!\n");
@@ -381,7 +381,8 @@ void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
381 */ 381 */
382static void __rpc_do_wake_up_task(struct rpc_task *task) 382static void __rpc_do_wake_up_task(struct rpc_task *task)
383{ 383{
384 dprintk("RPC: %4d __rpc_wake_up_task (now %ld)\n", task->tk_pid, jiffies); 384 dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n",
385 task->tk_pid, jiffies);
385 386
386#ifdef RPC_DEBUG 387#ifdef RPC_DEBUG
387 BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID); 388 BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID);
@@ -397,7 +398,7 @@ static void __rpc_do_wake_up_task(struct rpc_task *task)
397 398
398 rpc_make_runnable(task); 399 rpc_make_runnable(task);
399 400
400 dprintk("RPC: __rpc_wake_up_task done\n"); 401 dprintk("RPC: __rpc_wake_up_task done\n");
401} 402}
402 403
403/* 404/*
@@ -418,7 +419,7 @@ static void __rpc_wake_up_task(struct rpc_task *task)
418static void 419static void
419__rpc_default_timer(struct rpc_task *task) 420__rpc_default_timer(struct rpc_task *task)
420{ 421{
421 dprintk("RPC: %d timeout (default timer)\n", task->tk_pid); 422 dprintk("RPC: %5u timeout (default timer)\n", task->tk_pid);
422 task->tk_status = -ETIMEDOUT; 423 task->tk_status = -ETIMEDOUT;
423 rpc_wake_up_task(task); 424 rpc_wake_up_task(task);
424} 425}
@@ -502,7 +503,8 @@ struct rpc_task * rpc_wake_up_next(struct rpc_wait_queue *queue)
502{ 503{
503 struct rpc_task *task = NULL; 504 struct rpc_task *task = NULL;
504 505
505 dprintk("RPC: wake_up_next(%p \"%s\")\n", queue, rpc_qname(queue)); 506 dprintk("RPC: wake_up_next(%p \"%s\")\n",
507 queue, rpc_qname(queue));
506 rcu_read_lock_bh(); 508 rcu_read_lock_bh();
507 spin_lock(&queue->lock); 509 spin_lock(&queue->lock);
508 if (RPC_IS_PRIORITY(queue)) 510 if (RPC_IS_PRIORITY(queue))
@@ -625,12 +627,12 @@ void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata)
625/* 627/*
626 * This is the RPC `scheduler' (or rather, the finite state machine). 628 * This is the RPC `scheduler' (or rather, the finite state machine).
627 */ 629 */
628static int __rpc_execute(struct rpc_task *task) 630static void __rpc_execute(struct rpc_task *task)
629{ 631{
630 int status = 0; 632 int status = 0;
631 633
632 dprintk("RPC: %4d rpc_execute flgs %x\n", 634 dprintk("RPC: %5u __rpc_execute flags=0x%x\n",
633 task->tk_pid, task->tk_flags); 635 task->tk_pid, task->tk_flags);
634 636
635 BUG_ON(RPC_IS_QUEUED(task)); 637 BUG_ON(RPC_IS_QUEUED(task));
636 638
@@ -679,14 +681,14 @@ static int __rpc_execute(struct rpc_task *task)
679 if (RPC_IS_ASYNC(task)) { 681 if (RPC_IS_ASYNC(task)) {
680 /* Careful! we may have raced... */ 682 /* Careful! we may have raced... */
681 if (RPC_IS_QUEUED(task)) 683 if (RPC_IS_QUEUED(task))
682 return 0; 684 return;
683 if (rpc_test_and_set_running(task)) 685 if (rpc_test_and_set_running(task))
684 return 0; 686 return;
685 continue; 687 continue;
686 } 688 }
687 689
688 /* sync task: sleep here */ 690 /* sync task: sleep here */
689 dprintk("RPC: %4d sync task going to sleep\n", task->tk_pid); 691 dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid);
690 /* Note: Caller should be using rpc_clnt_sigmask() */ 692 /* Note: Caller should be using rpc_clnt_sigmask() */
691 status = out_of_line_wait_on_bit(&task->tk_runstate, 693 status = out_of_line_wait_on_bit(&task->tk_runstate,
692 RPC_TASK_QUEUED, rpc_wait_bit_interruptible, 694 RPC_TASK_QUEUED, rpc_wait_bit_interruptible,
@@ -698,19 +700,19 @@ static int __rpc_execute(struct rpc_task *task)
698 * clean up after sleeping on some queue, we don't 700 * clean up after sleeping on some queue, we don't
699 * break the loop here, but go around once more. 701 * break the loop here, but go around once more.
700 */ 702 */
701 dprintk("RPC: %4d got signal\n", task->tk_pid); 703 dprintk("RPC: %5u got signal\n", task->tk_pid);
702 task->tk_flags |= RPC_TASK_KILLED; 704 task->tk_flags |= RPC_TASK_KILLED;
703 rpc_exit(task, -ERESTARTSYS); 705 rpc_exit(task, -ERESTARTSYS);
704 rpc_wake_up_task(task); 706 rpc_wake_up_task(task);
705 } 707 }
706 rpc_set_running(task); 708 rpc_set_running(task);
707 dprintk("RPC: %4d sync task resuming\n", task->tk_pid); 709 dprintk("RPC: %5u sync task resuming\n", task->tk_pid);
708 } 710 }
709 711
710 dprintk("RPC: %4d, return %d, status %d\n", task->tk_pid, status, task->tk_status); 712 dprintk("RPC: %5u return %d, status %d\n", task->tk_pid, status,
713 task->tk_status);
711 /* Release all resources associated with the task */ 714 /* Release all resources associated with the task */
712 rpc_release_task(task); 715 rpc_release_task(task);
713 return status;
714} 716}
715 717
716/* 718/*
@@ -722,12 +724,11 @@ static int __rpc_execute(struct rpc_task *task)
722 * released. In particular note that tk_release() will have 724 * released. In particular note that tk_release() will have
723 * been called, so your task memory may have been freed. 725 * been called, so your task memory may have been freed.
724 */ 726 */
725int 727void rpc_execute(struct rpc_task *task)
726rpc_execute(struct rpc_task *task)
727{ 728{
728 rpc_set_active(task); 729 rpc_set_active(task);
729 rpc_set_running(task); 730 rpc_set_running(task);
730 return __rpc_execute(task); 731 __rpc_execute(task);
731} 732}
732 733
733static void rpc_async_schedule(struct work_struct *work) 734static void rpc_async_schedule(struct work_struct *work)
@@ -826,7 +827,7 @@ void rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt, int flags, cons
826 /* starting timestamp */ 827 /* starting timestamp */
827 task->tk_start = jiffies; 828 task->tk_start = jiffies;
828 829
829 dprintk("RPC: %4d new task procpid %d\n", task->tk_pid, 830 dprintk("RPC: new task initialized, procpid %u\n",
830 current->pid); 831 current->pid);
831} 832}
832 833
@@ -839,7 +840,7 @@ rpc_alloc_task(void)
839static void rpc_free_task(struct rcu_head *rcu) 840static void rpc_free_task(struct rcu_head *rcu)
840{ 841{
841 struct rpc_task *task = container_of(rcu, struct rpc_task, u.tk_rcu); 842 struct rpc_task *task = container_of(rcu, struct rpc_task, u.tk_rcu);
842 dprintk("RPC: %4d freeing task\n", task->tk_pid); 843 dprintk("RPC: %5u freeing task\n", task->tk_pid);
843 mempool_free(task, rpc_task_mempool); 844 mempool_free(task, rpc_task_mempool);
844} 845}
845 846
@@ -858,7 +859,7 @@ struct rpc_task *rpc_new_task(struct rpc_clnt *clnt, int flags, const struct rpc
858 859
859 rpc_init_task(task, clnt, flags, tk_ops, calldata); 860 rpc_init_task(task, clnt, flags, tk_ops, calldata);
860 861
861 dprintk("RPC: %4d allocated task\n", task->tk_pid); 862 dprintk("RPC: allocated task %p\n", task);
862 task->tk_flags |= RPC_TASK_DYNAMIC; 863 task->tk_flags |= RPC_TASK_DYNAMIC;
863out: 864out:
864 return task; 865 return task;
@@ -902,7 +903,7 @@ static void rpc_release_task(struct rpc_task *task)
902#ifdef RPC_DEBUG 903#ifdef RPC_DEBUG
903 BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID); 904 BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID);
904#endif 905#endif
905 dprintk("RPC: %4d release task\n", task->tk_pid); 906 dprintk("RPC: %5u release task\n", task->tk_pid);
906 907
907 /* Remove from global task list */ 908 /* Remove from global task list */
908 spin_lock(&rpc_sched_lock); 909 spin_lock(&rpc_sched_lock);
@@ -955,7 +956,7 @@ void rpc_killall_tasks(struct rpc_clnt *clnt)
955 struct rpc_task *rovr; 956 struct rpc_task *rovr;
956 struct list_head *le; 957 struct list_head *le;
957 958
958 dprintk("RPC: killing all tasks for client %p\n", clnt); 959 dprintk("RPC: killing all tasks for client %p\n", clnt);
959 960
960 /* 961 /*
961 * Spin lock all_tasks to prevent changes... 962 * Spin lock all_tasks to prevent changes...
@@ -984,7 +985,8 @@ static void rpciod_killall(void)
984 rpc_killall_tasks(NULL); 985 rpc_killall_tasks(NULL);
985 flush_workqueue(rpciod_workqueue); 986 flush_workqueue(rpciod_workqueue);
986 if (!list_empty(&all_tasks)) { 987 if (!list_empty(&all_tasks)) {
987 dprintk("rpciod_killall: waiting for tasks to exit\n"); 988 dprintk("RPC: rpciod_killall: waiting for tasks "
989 "to exit\n");
988 yield(); 990 yield();
989 } 991 }
990 } 992 }
@@ -1004,7 +1006,7 @@ rpciod_up(void)
1004 int error = 0; 1006 int error = 0;
1005 1007
1006 mutex_lock(&rpciod_mutex); 1008 mutex_lock(&rpciod_mutex);
1007 dprintk("rpciod_up: users %d\n", rpciod_users); 1009 dprintk("RPC: rpciod_up: users %u\n", rpciod_users);
1008 rpciod_users++; 1010 rpciod_users++;
1009 if (rpciod_workqueue) 1011 if (rpciod_workqueue)
1010 goto out; 1012 goto out;
@@ -1012,7 +1014,7 @@ rpciod_up(void)
1012 * If there's no pid, we should be the first user. 1014 * If there's no pid, we should be the first user.
1013 */ 1015 */
1014 if (rpciod_users > 1) 1016 if (rpciod_users > 1)
1015 printk(KERN_WARNING "rpciod_up: no workqueue, %d users??\n", rpciod_users); 1017 printk(KERN_WARNING "rpciod_up: no workqueue, %u users??\n", rpciod_users);
1016 /* 1018 /*
1017 * Create the rpciod thread and wait for it to start. 1019 * Create the rpciod thread and wait for it to start.
1018 */ 1020 */
@@ -1034,7 +1036,7 @@ void
1034rpciod_down(void) 1036rpciod_down(void)
1035{ 1037{
1036 mutex_lock(&rpciod_mutex); 1038 mutex_lock(&rpciod_mutex);
1037 dprintk("rpciod_down sema %d\n", rpciod_users); 1039 dprintk("RPC: rpciod_down sema %u\n", rpciod_users);
1038 if (rpciod_users) { 1040 if (rpciod_users) {
1039 if (--rpciod_users) 1041 if (--rpciod_users)
1040 goto out; 1042 goto out;
@@ -1042,7 +1044,7 @@ rpciod_down(void)
1042 printk(KERN_WARNING "rpciod_down: no users??\n"); 1044 printk(KERN_WARNING "rpciod_down: no users??\n");
1043 1045
1044 if (!rpciod_workqueue) { 1046 if (!rpciod_workqueue) {
1045 dprintk("rpciod_down: Nothing to do!\n"); 1047 dprintk("RPC: rpciod_down: Nothing to do!\n");
1046 goto out; 1048 goto out;
1047 } 1049 }
1048 rpciod_killall(); 1050 rpciod_killall();
@@ -1072,7 +1074,7 @@ void rpc_show_tasks(void)
1072 if (RPC_IS_QUEUED(t)) 1074 if (RPC_IS_QUEUED(t))
1073 rpc_waitq = rpc_qname(t->u.tk_wait.rpc_waitq); 1075 rpc_waitq = rpc_qname(t->u.tk_wait.rpc_waitq);
1074 1076
1075 printk("%05d %04d %04x %06d %8p %6d %8p %08ld %8s %8p %8p\n", 1077 printk("%5u %04d %04x %6d %8p %6d %8p %8ld %8s %8p %8p\n",
1076 t->tk_pid, 1078 t->tk_pid,
1077 (t->tk_msg.rpc_proc ? t->tk_msg.rpc_proc->p_proc : -1), 1079 (t->tk_msg.rpc_proc ? t->tk_msg.rpc_proc->p_proc : -1),
1078 t->tk_flags, t->tk_status, 1080 t->tk_flags, t->tk_status,