summaryrefslogtreecommitdiffstats
path: root/net/sunrpc/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/sunrpc/sched.c')
-rw-r--r--net/sunrpc/sched.c72
1 files changed, 41 insertions, 31 deletions
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index f8ea362fae91..f820780280b5 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -47,7 +47,7 @@ static mempool_t *rpc_buffer_mempool __read_mostly;
47 47
48static void rpc_async_schedule(struct work_struct *); 48static void rpc_async_schedule(struct work_struct *);
49static void rpc_release_task(struct rpc_task *task); 49static void rpc_release_task(struct rpc_task *task);
50static void __rpc_queue_timer_fn(struct timer_list *t); 50static void __rpc_queue_timer_fn(struct work_struct *);
51 51
52/* 52/*
53 * RPC tasks sit here while waiting for conditions to improve. 53 * RPC tasks sit here while waiting for conditions to improve.
@@ -88,13 +88,19 @@ __rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
88 task->tk_timeout = 0; 88 task->tk_timeout = 0;
89 list_del(&task->u.tk_wait.timer_list); 89 list_del(&task->u.tk_wait.timer_list);
90 if (list_empty(&queue->timer_list.list)) 90 if (list_empty(&queue->timer_list.list))
91 del_timer(&queue->timer_list.timer); 91 cancel_delayed_work(&queue->timer_list.dwork);
92} 92}
93 93
94static void 94static void
95rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires) 95rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires)
96{ 96{
97 timer_reduce(&queue->timer_list.timer, expires); 97 unsigned long now = jiffies;
98 queue->timer_list.expires = expires;
99 if (time_before_eq(expires, now))
100 expires = 0;
101 else
102 expires -= now;
103 mod_delayed_work(rpciod_workqueue, &queue->timer_list.dwork, expires);
98} 104}
99 105
100/* 106/*
@@ -108,7 +114,8 @@ __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task,
108 task->tk_pid, jiffies_to_msecs(timeout - jiffies)); 114 task->tk_pid, jiffies_to_msecs(timeout - jiffies));
109 115
110 task->tk_timeout = timeout; 116 task->tk_timeout = timeout;
111 rpc_set_queue_timer(queue, timeout); 117 if (list_empty(&queue->timer_list.list) || time_before(timeout, queue->timer_list.expires))
118 rpc_set_queue_timer(queue, timeout);
112 list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list); 119 list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list);
113} 120}
114 121
@@ -251,7 +258,8 @@ static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const c
251 queue->maxpriority = nr_queues - 1; 258 queue->maxpriority = nr_queues - 1;
252 rpc_reset_waitqueue_priority(queue); 259 rpc_reset_waitqueue_priority(queue);
253 queue->qlen = 0; 260 queue->qlen = 0;
254 timer_setup(&queue->timer_list.timer, __rpc_queue_timer_fn, 0); 261 queue->timer_list.expires = 0;
262 INIT_DEFERRABLE_WORK(&queue->timer_list.dwork, __rpc_queue_timer_fn);
255 INIT_LIST_HEAD(&queue->timer_list.list); 263 INIT_LIST_HEAD(&queue->timer_list.list);
256 rpc_assign_waitqueue_name(queue, qname); 264 rpc_assign_waitqueue_name(queue, qname);
257} 265}
@@ -270,7 +278,7 @@ EXPORT_SYMBOL_GPL(rpc_init_wait_queue);
270 278
271void rpc_destroy_wait_queue(struct rpc_wait_queue *queue) 279void rpc_destroy_wait_queue(struct rpc_wait_queue *queue)
272{ 280{
273 del_timer_sync(&queue->timer_list.timer); 281 cancel_delayed_work_sync(&queue->timer_list.dwork);
274} 282}
275EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue); 283EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue);
276 284
@@ -425,9 +433,9 @@ void rpc_sleep_on_timeout(struct rpc_wait_queue *q, struct rpc_task *task,
425 /* 433 /*
426 * Protect the queue operations. 434 * Protect the queue operations.
427 */ 435 */
428 spin_lock_bh(&q->lock); 436 spin_lock(&q->lock);
429 __rpc_sleep_on_priority_timeout(q, task, timeout, task->tk_priority); 437 __rpc_sleep_on_priority_timeout(q, task, timeout, task->tk_priority);
430 spin_unlock_bh(&q->lock); 438 spin_unlock(&q->lock);
431} 439}
432EXPORT_SYMBOL_GPL(rpc_sleep_on_timeout); 440EXPORT_SYMBOL_GPL(rpc_sleep_on_timeout);
433 441
@@ -443,9 +451,9 @@ void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
443 /* 451 /*
444 * Protect the queue operations. 452 * Protect the queue operations.
445 */ 453 */
446 spin_lock_bh(&q->lock); 454 spin_lock(&q->lock);
447 __rpc_sleep_on_priority(q, task, task->tk_priority); 455 __rpc_sleep_on_priority(q, task, task->tk_priority);
448 spin_unlock_bh(&q->lock); 456 spin_unlock(&q->lock);
449} 457}
450EXPORT_SYMBOL_GPL(rpc_sleep_on); 458EXPORT_SYMBOL_GPL(rpc_sleep_on);
451 459
@@ -459,9 +467,9 @@ void rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q,
459 /* 467 /*
460 * Protect the queue operations. 468 * Protect the queue operations.
461 */ 469 */
462 spin_lock_bh(&q->lock); 470 spin_lock(&q->lock);
463 __rpc_sleep_on_priority_timeout(q, task, timeout, priority); 471 __rpc_sleep_on_priority_timeout(q, task, timeout, priority);
464 spin_unlock_bh(&q->lock); 472 spin_unlock(&q->lock);
465} 473}
466EXPORT_SYMBOL_GPL(rpc_sleep_on_priority_timeout); 474EXPORT_SYMBOL_GPL(rpc_sleep_on_priority_timeout);
467 475
@@ -476,9 +484,9 @@ void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task,
476 /* 484 /*
477 * Protect the queue operations. 485 * Protect the queue operations.
478 */ 486 */
479 spin_lock_bh(&q->lock); 487 spin_lock(&q->lock);
480 __rpc_sleep_on_priority(q, task, priority); 488 __rpc_sleep_on_priority(q, task, priority);
481 spin_unlock_bh(&q->lock); 489 spin_unlock(&q->lock);
482} 490}
483EXPORT_SYMBOL_GPL(rpc_sleep_on_priority); 491EXPORT_SYMBOL_GPL(rpc_sleep_on_priority);
484 492
@@ -556,9 +564,9 @@ void rpc_wake_up_queued_task_on_wq(struct workqueue_struct *wq,
556{ 564{
557 if (!RPC_IS_QUEUED(task)) 565 if (!RPC_IS_QUEUED(task))
558 return; 566 return;
559 spin_lock_bh(&queue->lock); 567 spin_lock(&queue->lock);
560 rpc_wake_up_task_on_wq_queue_locked(wq, queue, task); 568 rpc_wake_up_task_on_wq_queue_locked(wq, queue, task);
561 spin_unlock_bh(&queue->lock); 569 spin_unlock(&queue->lock);
562} 570}
563 571
564/* 572/*
@@ -568,9 +576,9 @@ void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task
568{ 576{
569 if (!RPC_IS_QUEUED(task)) 577 if (!RPC_IS_QUEUED(task))
570 return; 578 return;
571 spin_lock_bh(&queue->lock); 579 spin_lock(&queue->lock);
572 rpc_wake_up_task_queue_locked(queue, task); 580 rpc_wake_up_task_queue_locked(queue, task);
573 spin_unlock_bh(&queue->lock); 581 spin_unlock(&queue->lock);
574} 582}
575EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task); 583EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task);
576 584
@@ -603,9 +611,9 @@ rpc_wake_up_queued_task_set_status(struct rpc_wait_queue *queue,
603{ 611{
604 if (!RPC_IS_QUEUED(task)) 612 if (!RPC_IS_QUEUED(task))
605 return; 613 return;
606 spin_lock_bh(&queue->lock); 614 spin_lock(&queue->lock);
607 rpc_wake_up_task_queue_set_status_locked(queue, task, status); 615 rpc_wake_up_task_queue_set_status_locked(queue, task, status);
608 spin_unlock_bh(&queue->lock); 616 spin_unlock(&queue->lock);
609} 617}
610 618
611/* 619/*
@@ -668,12 +676,12 @@ struct rpc_task *rpc_wake_up_first_on_wq(struct workqueue_struct *wq,
668 676
669 dprintk("RPC: wake_up_first(%p \"%s\")\n", 677 dprintk("RPC: wake_up_first(%p \"%s\")\n",
670 queue, rpc_qname(queue)); 678 queue, rpc_qname(queue));
671 spin_lock_bh(&queue->lock); 679 spin_lock(&queue->lock);
672 task = __rpc_find_next_queued(queue); 680 task = __rpc_find_next_queued(queue);
673 if (task != NULL) 681 if (task != NULL)
674 task = rpc_wake_up_task_on_wq_queue_action_locked(wq, queue, 682 task = rpc_wake_up_task_on_wq_queue_action_locked(wq, queue,
675 task, func, data); 683 task, func, data);
676 spin_unlock_bh(&queue->lock); 684 spin_unlock(&queue->lock);
677 685
678 return task; 686 return task;
679} 687}
@@ -712,7 +720,7 @@ void rpc_wake_up(struct rpc_wait_queue *queue)
712{ 720{
713 struct list_head *head; 721 struct list_head *head;
714 722
715 spin_lock_bh(&queue->lock); 723 spin_lock(&queue->lock);
716 head = &queue->tasks[queue->maxpriority]; 724 head = &queue->tasks[queue->maxpriority];
717 for (;;) { 725 for (;;) {
718 while (!list_empty(head)) { 726 while (!list_empty(head)) {
@@ -726,7 +734,7 @@ void rpc_wake_up(struct rpc_wait_queue *queue)
726 break; 734 break;
727 head--; 735 head--;
728 } 736 }
729 spin_unlock_bh(&queue->lock); 737 spin_unlock(&queue->lock);
730} 738}
731EXPORT_SYMBOL_GPL(rpc_wake_up); 739EXPORT_SYMBOL_GPL(rpc_wake_up);
732 740
@@ -741,7 +749,7 @@ void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
741{ 749{
742 struct list_head *head; 750 struct list_head *head;
743 751
744 spin_lock_bh(&queue->lock); 752 spin_lock(&queue->lock);
745 head = &queue->tasks[queue->maxpriority]; 753 head = &queue->tasks[queue->maxpriority];
746 for (;;) { 754 for (;;) {
747 while (!list_empty(head)) { 755 while (!list_empty(head)) {
@@ -756,13 +764,15 @@ void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
756 break; 764 break;
757 head--; 765 head--;
758 } 766 }
759 spin_unlock_bh(&queue->lock); 767 spin_unlock(&queue->lock);
760} 768}
761EXPORT_SYMBOL_GPL(rpc_wake_up_status); 769EXPORT_SYMBOL_GPL(rpc_wake_up_status);
762 770
763static void __rpc_queue_timer_fn(struct timer_list *t) 771static void __rpc_queue_timer_fn(struct work_struct *work)
764{ 772{
765 struct rpc_wait_queue *queue = from_timer(queue, t, timer_list.timer); 773 struct rpc_wait_queue *queue = container_of(work,
774 struct rpc_wait_queue,
775 timer_list.dwork.work);
766 struct rpc_task *task, *n; 776 struct rpc_task *task, *n;
767 unsigned long expires, now, timeo; 777 unsigned long expires, now, timeo;
768 778
@@ -932,13 +942,13 @@ static void __rpc_execute(struct rpc_task *task)
932 * rpc_task pointer may still be dereferenced. 942 * rpc_task pointer may still be dereferenced.
933 */ 943 */
934 queue = task->tk_waitqueue; 944 queue = task->tk_waitqueue;
935 spin_lock_bh(&queue->lock); 945 spin_lock(&queue->lock);
936 if (!RPC_IS_QUEUED(task)) { 946 if (!RPC_IS_QUEUED(task)) {
937 spin_unlock_bh(&queue->lock); 947 spin_unlock(&queue->lock);
938 continue; 948 continue;
939 } 949 }
940 rpc_clear_running(task); 950 rpc_clear_running(task);
941 spin_unlock_bh(&queue->lock); 951 spin_unlock(&queue->lock);
942 if (task_is_async) 952 if (task_is_async)
943 return; 953 return;
944 954