aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/sunrpc/sched.c')
-rw-r--r--net/sunrpc/sched.c73
1 files changed, 57 insertions, 16 deletions
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 3341d896278..994cfea2bad 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -28,6 +28,9 @@
28#define RPCDBG_FACILITY RPCDBG_SCHED 28#define RPCDBG_FACILITY RPCDBG_SCHED
29#endif 29#endif
30 30
31#define CREATE_TRACE_POINTS
32#include <trace/events/sunrpc.h>
33
31/* 34/*
32 * RPC slabs and memory pools 35 * RPC slabs and memory pools
33 */ 36 */
@@ -205,9 +208,7 @@ static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const c
205 queue->qlen = 0; 208 queue->qlen = 0;
206 setup_timer(&queue->timer_list.timer, __rpc_queue_timer_fn, (unsigned long)queue); 209 setup_timer(&queue->timer_list.timer, __rpc_queue_timer_fn, (unsigned long)queue);
207 INIT_LIST_HEAD(&queue->timer_list.list); 210 INIT_LIST_HEAD(&queue->timer_list.list);
208#ifdef RPC_DEBUG 211 rpc_assign_waitqueue_name(queue, qname);
209 queue->name = qname;
210#endif
211} 212}
212 213
213void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname) 214void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname)
@@ -251,6 +252,8 @@ static inline void rpc_task_set_debuginfo(struct rpc_task *task)
251 252
252static void rpc_set_active(struct rpc_task *task) 253static void rpc_set_active(struct rpc_task *task)
253{ 254{
255 trace_rpc_task_begin(task->tk_client, task, NULL);
256
254 rpc_task_set_debuginfo(task); 257 rpc_task_set_debuginfo(task);
255 set_bit(RPC_TASK_ACTIVE, &task->tk_runstate); 258 set_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
256} 259}
@@ -267,6 +270,8 @@ static int rpc_complete_task(struct rpc_task *task)
267 unsigned long flags; 270 unsigned long flags;
268 int ret; 271 int ret;
269 272
273 trace_rpc_task_complete(task->tk_client, task, NULL);
274
270 spin_lock_irqsave(&wq->lock, flags); 275 spin_lock_irqsave(&wq->lock, flags);
271 clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate); 276 clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
272 ret = atomic_dec_and_test(&task->tk_count); 277 ret = atomic_dec_and_test(&task->tk_count);
@@ -324,6 +329,8 @@ static void __rpc_sleep_on_priority(struct rpc_wait_queue *q,
324 dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n", 329 dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n",
325 task->tk_pid, rpc_qname(q), jiffies); 330 task->tk_pid, rpc_qname(q), jiffies);
326 331
332 trace_rpc_task_sleep(task->tk_client, task, q);
333
327 __rpc_add_wait_queue(q, task, queue_priority); 334 __rpc_add_wait_queue(q, task, queue_priority);
328 335
329 BUG_ON(task->tk_callback != NULL); 336 BUG_ON(task->tk_callback != NULL);
@@ -378,6 +385,8 @@ static void __rpc_do_wake_up_task(struct rpc_wait_queue *queue, struct rpc_task
378 return; 385 return;
379 } 386 }
380 387
388 trace_rpc_task_wakeup(task->tk_client, task, queue);
389
381 __rpc_remove_wait_queue(queue, task); 390 __rpc_remove_wait_queue(queue, task);
382 391
383 rpc_make_runnable(task); 392 rpc_make_runnable(task);
@@ -422,7 +431,7 @@ EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task);
422/* 431/*
423 * Wake up the next task on a priority queue. 432 * Wake up the next task on a priority queue.
424 */ 433 */
425static struct rpc_task * __rpc_wake_up_next_priority(struct rpc_wait_queue *queue) 434static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *queue)
426{ 435{
427 struct list_head *q; 436 struct list_head *q;
428 struct rpc_task *task; 437 struct rpc_task *task;
@@ -467,30 +476,54 @@ new_queue:
467new_owner: 476new_owner:
468 rpc_set_waitqueue_owner(queue, task->tk_owner); 477 rpc_set_waitqueue_owner(queue, task->tk_owner);
469out: 478out:
470 rpc_wake_up_task_queue_locked(queue, task);
471 return task; 479 return task;
472} 480}
473 481
482static struct rpc_task *__rpc_find_next_queued(struct rpc_wait_queue *queue)
483{
484 if (RPC_IS_PRIORITY(queue))
485 return __rpc_find_next_queued_priority(queue);
486 if (!list_empty(&queue->tasks[0]))
487 return list_first_entry(&queue->tasks[0], struct rpc_task, u.tk_wait.list);
488 return NULL;
489}
490
474/* 491/*
475 * Wake up the next task on the wait queue. 492 * Wake up the first task on the wait queue.
476 */ 493 */
477struct rpc_task * rpc_wake_up_next(struct rpc_wait_queue *queue) 494struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue *queue,
495 bool (*func)(struct rpc_task *, void *), void *data)
478{ 496{
479 struct rpc_task *task = NULL; 497 struct rpc_task *task = NULL;
480 498
481 dprintk("RPC: wake_up_next(%p \"%s\")\n", 499 dprintk("RPC: wake_up_first(%p \"%s\")\n",
482 queue, rpc_qname(queue)); 500 queue, rpc_qname(queue));
483 spin_lock_bh(&queue->lock); 501 spin_lock_bh(&queue->lock);
484 if (RPC_IS_PRIORITY(queue)) 502 task = __rpc_find_next_queued(queue);
485 task = __rpc_wake_up_next_priority(queue); 503 if (task != NULL) {
486 else { 504 if (func(task, data))
487 task_for_first(task, &queue->tasks[0])
488 rpc_wake_up_task_queue_locked(queue, task); 505 rpc_wake_up_task_queue_locked(queue, task);
506 else
507 task = NULL;
489 } 508 }
490 spin_unlock_bh(&queue->lock); 509 spin_unlock_bh(&queue->lock);
491 510
492 return task; 511 return task;
493} 512}
513EXPORT_SYMBOL_GPL(rpc_wake_up_first);
514
515static bool rpc_wake_up_next_func(struct rpc_task *task, void *data)
516{
517 return true;
518}
519
520/*
521 * Wake up the next task on the wait queue.
522*/
523struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *queue)
524{
525 return rpc_wake_up_first(queue, rpc_wake_up_next_func, NULL);
526}
494EXPORT_SYMBOL_GPL(rpc_wake_up_next); 527EXPORT_SYMBOL_GPL(rpc_wake_up_next);
495 528
496/** 529/**
@@ -501,14 +534,18 @@ EXPORT_SYMBOL_GPL(rpc_wake_up_next);
501 */ 534 */
502void rpc_wake_up(struct rpc_wait_queue *queue) 535void rpc_wake_up(struct rpc_wait_queue *queue)
503{ 536{
504 struct rpc_task *task, *next;
505 struct list_head *head; 537 struct list_head *head;
506 538
507 spin_lock_bh(&queue->lock); 539 spin_lock_bh(&queue->lock);
508 head = &queue->tasks[queue->maxpriority]; 540 head = &queue->tasks[queue->maxpriority];
509 for (;;) { 541 for (;;) {
510 list_for_each_entry_safe(task, next, head, u.tk_wait.list) 542 while (!list_empty(head)) {
543 struct rpc_task *task;
544 task = list_first_entry(head,
545 struct rpc_task,
546 u.tk_wait.list);
511 rpc_wake_up_task_queue_locked(queue, task); 547 rpc_wake_up_task_queue_locked(queue, task);
548 }
512 if (head == &queue->tasks[0]) 549 if (head == &queue->tasks[0])
513 break; 550 break;
514 head--; 551 head--;
@@ -526,13 +563,16 @@ EXPORT_SYMBOL_GPL(rpc_wake_up);
526 */ 563 */
527void rpc_wake_up_status(struct rpc_wait_queue *queue, int status) 564void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
528{ 565{
529 struct rpc_task *task, *next;
530 struct list_head *head; 566 struct list_head *head;
531 567
532 spin_lock_bh(&queue->lock); 568 spin_lock_bh(&queue->lock);
533 head = &queue->tasks[queue->maxpriority]; 569 head = &queue->tasks[queue->maxpriority];
534 for (;;) { 570 for (;;) {
535 list_for_each_entry_safe(task, next, head, u.tk_wait.list) { 571 while (!list_empty(head)) {
572 struct rpc_task *task;
573 task = list_first_entry(head,
574 struct rpc_task,
575 u.tk_wait.list);
536 task->tk_status = status; 576 task->tk_status = status;
537 rpc_wake_up_task_queue_locked(queue, task); 577 rpc_wake_up_task_queue_locked(queue, task);
538 } 578 }
@@ -677,6 +717,7 @@ static void __rpc_execute(struct rpc_task *task)
677 if (do_action == NULL) 717 if (do_action == NULL)
678 break; 718 break;
679 } 719 }
720 trace_rpc_task_run_action(task->tk_client, task, task->tk_action);
680 do_action(task); 721 do_action(task);
681 722
682 /* 723 /*