aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2012-03-19 13:39:35 -0400
committerTrond Myklebust <Trond.Myklebust@netapp.com>2012-03-19 14:15:02 -0400
commit540a0f7584169651f485e8ab67461fcb06934e38 (patch)
tree5a5dede243b80f1dca55f651ff2a78eaccff5c50 /net/sunrpc
parente49a29bd0eacce9d4956c4daf777a330115b369d (diff)
SUNRPC: We must not use list_for_each_entry_safe() in rpc_wake_up()
The problem is that for the case of priority queues, we have to assume that __rpc_remove_wait_queue_priority will move new elements from the tk_wait.links lists into the queue->tasks[] list. We therefore cannot use list_for_each_entry_safe() on queue->tasks[], since that will skip these new tasks that __rpc_remove_wait_queue_priority is adding. Without this fix, rpc_wake_up and rpc_wake_up_status will both fail to wake up all functions on priority wait queues, which can result in some nasty hangs. Reported-by: Andy Adamson <andros@netapp.com> Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com> Cc: stable@vger.kernel.org
Diffstat (limited to 'net/sunrpc')
-rw-r--r--net/sunrpc/sched.c15
1 files changed, 11 insertions, 4 deletions
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 1c570a81096a..994cfea2bad6 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -534,14 +534,18 @@ EXPORT_SYMBOL_GPL(rpc_wake_up_next);
534 */ 534 */
535void rpc_wake_up(struct rpc_wait_queue *queue) 535void rpc_wake_up(struct rpc_wait_queue *queue)
536{ 536{
537 struct rpc_task *task, *next;
538 struct list_head *head; 537 struct list_head *head;
539 538
540 spin_lock_bh(&queue->lock); 539 spin_lock_bh(&queue->lock);
541 head = &queue->tasks[queue->maxpriority]; 540 head = &queue->tasks[queue->maxpriority];
542 for (;;) { 541 for (;;) {
543 list_for_each_entry_safe(task, next, head, u.tk_wait.list) 542 while (!list_empty(head)) {
543 struct rpc_task *task;
544 task = list_first_entry(head,
545 struct rpc_task,
546 u.tk_wait.list);
544 rpc_wake_up_task_queue_locked(queue, task); 547 rpc_wake_up_task_queue_locked(queue, task);
548 }
545 if (head == &queue->tasks[0]) 549 if (head == &queue->tasks[0])
546 break; 550 break;
547 head--; 551 head--;
@@ -559,13 +563,16 @@ EXPORT_SYMBOL_GPL(rpc_wake_up);
559 */ 563 */
560void rpc_wake_up_status(struct rpc_wait_queue *queue, int status) 564void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
561{ 565{
562 struct rpc_task *task, *next;
563 struct list_head *head; 566 struct list_head *head;
564 567
565 spin_lock_bh(&queue->lock); 568 spin_lock_bh(&queue->lock);
566 head = &queue->tasks[queue->maxpriority]; 569 head = &queue->tasks[queue->maxpriority];
567 for (;;) { 570 for (;;) {
568 list_for_each_entry_safe(task, next, head, u.tk_wait.list) { 571 while (!list_empty(head)) {
572 struct rpc_task *task;
573 task = list_first_entry(head,
574 struct rpc_task,
575 u.tk_wait.list);
569 task->tk_status = status; 576 task->tk_status = status;
570 rpc_wake_up_task_queue_locked(queue, task); 577 rpc_wake_up_task_queue_locked(queue, task);
571 } 578 }