aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-core.c
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.de>2011-04-11 08:13:10 -0400
committerJens Axboe <jaxboe@fusionio.com>2011-04-11 08:13:10 -0400
commit109b81296c63228578d4760794d8dd46e02eddfb (patch)
tree2f723e1b69ab08e12528f677f04edbfc7473b7c5 /block/blk-core.c
parent4263a2f1dad8c8e7ce2352a0cbc882c2b0c044a9 (diff)
block: splice plug list to local context
If the request_fn ends up blocking, we could be re-entering the plug flush. Since the list is protected by explicitly not allowing schedule events, this isn't a terribly good idea. Additionally, it can cause us to recurse. As request_fn called by __blk_run_queue is allowed to 'schedule()' (after dropping the queue lock of course), it is possible to get a recursive call: schedule -> blk_flush_plug -> __blk_finish_plug -> flush_plug_list -> __blk_run_queue -> request_fn -> schedule We must make sure that the second schedule does not call into blk_flush_plug again. So instead of leaving the list of requests on blk_plug->list, move them to a separate list leaving blk_plug->list empty. Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c14
1 files changed, 9 insertions, 5 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 90f22cc30799..eeaca0998df5 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -2673,19 +2673,24 @@ static void flush_plug_list(struct blk_plug *plug)
2673 struct request_queue *q; 2673 struct request_queue *q;
2674 unsigned long flags; 2674 unsigned long flags;
2675 struct request *rq; 2675 struct request *rq;
2676 LIST_HEAD(list);
2676 2677
2677 BUG_ON(plug->magic != PLUG_MAGIC); 2678 BUG_ON(plug->magic != PLUG_MAGIC);
2678 2679
2679 if (list_empty(&plug->list)) 2680 if (list_empty(&plug->list))
2680 return; 2681 return;
2681 2682
2682 if (plug->should_sort) 2683 list_splice_init(&plug->list, &list);
2683 list_sort(NULL, &plug->list, plug_rq_cmp); 2684
2685 if (plug->should_sort) {
2686 list_sort(NULL, &list, plug_rq_cmp);
2687 plug->should_sort = 0;
2688 }
2684 2689
2685 q = NULL; 2690 q = NULL;
2686 local_irq_save(flags); 2691 local_irq_save(flags);
2687 while (!list_empty(&plug->list)) { 2692 while (!list_empty(&list)) {
2688 rq = list_entry_rq(plug->list.next); 2693 rq = list_entry_rq(list.next);
2689 list_del_init(&rq->queuelist); 2694 list_del_init(&rq->queuelist);
2690 BUG_ON(!(rq->cmd_flags & REQ_ON_PLUG)); 2695 BUG_ON(!(rq->cmd_flags & REQ_ON_PLUG));
2691 BUG_ON(!rq->q); 2696 BUG_ON(!rq->q);
@@ -2713,7 +2718,6 @@ static void flush_plug_list(struct blk_plug *plug)
2713 spin_unlock(q->queue_lock); 2718 spin_unlock(q->queue_lock);
2714 } 2719 }
2715 2720
2716 BUG_ON(!list_empty(&plug->list));
2717 local_irq_restore(flags); 2721 local_irq_restore(flags);
2718} 2722}
2719 2723