aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-core.c
diff options
context:
space:
mode:
authorJens Axboe <jaxboe@fusionio.com>2011-04-16 07:51:05 -0400
committerJens Axboe <jaxboe@fusionio.com>2011-04-16 07:51:05 -0400
commit49cac01e1fa74174d72adb0e872504a7fefd7c01 (patch)
treea1ab1974eceea3179a604413955ad8369ba715d7 /block/blk-core.c
parenta237c1c5bc5dc5c76a21be922dca4826f3eca8ca (diff)
block: make unplug timer trace event correspond to the schedule() unplug
It's a pretty close match to what we had before - the timer triggering would mean that nobody unplugged the plug in due time, in the new scheme this matches very closely what the schedule() unplug now is. It's essentially the difference between an explicit unplug (IO unplug) or an implicit unplug (timer unplug, we scheduled with pending IO queued). Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c18
1 files changed, 12 insertions, 6 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 3c8121072507..78b7b0cb7216 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -2662,17 +2662,23 @@ static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
2662 return !(rqa->q <= rqb->q); 2662 return !(rqa->q <= rqb->q);
2663} 2663}
2664 2664
2665/*
2666 * If 'from_schedule' is true, then postpone the dispatch of requests
2667 * until a safe kblockd context. We due this to avoid accidental big
2668 * additional stack usage in driver dispatch, in places where the originally
2669 * plugger did not intend it.
2670 */
2665static void queue_unplugged(struct request_queue *q, unsigned int depth, 2671static void queue_unplugged(struct request_queue *q, unsigned int depth,
2666 bool force_kblockd) 2672 bool from_schedule)
2667{ 2673{
2668 trace_block_unplug_io(q, depth); 2674 trace_block_unplug(q, depth, !from_schedule);
2669 __blk_run_queue(q, force_kblockd); 2675 __blk_run_queue(q, from_schedule);
2670 2676
2671 if (q->unplugged_fn) 2677 if (q->unplugged_fn)
2672 q->unplugged_fn(q); 2678 q->unplugged_fn(q);
2673} 2679}
2674 2680
2675void blk_flush_plug_list(struct blk_plug *plug, bool force_kblockd) 2681void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
2676{ 2682{
2677 struct request_queue *q; 2683 struct request_queue *q;
2678 unsigned long flags; 2684 unsigned long flags;
@@ -2707,7 +2713,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool force_kblockd)
2707 BUG_ON(!rq->q); 2713 BUG_ON(!rq->q);
2708 if (rq->q != q) { 2714 if (rq->q != q) {
2709 if (q) { 2715 if (q) {
2710 queue_unplugged(q, depth, force_kblockd); 2716 queue_unplugged(q, depth, from_schedule);
2711 spin_unlock(q->queue_lock); 2717 spin_unlock(q->queue_lock);
2712 } 2718 }
2713 q = rq->q; 2719 q = rq->q;
@@ -2728,7 +2734,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool force_kblockd)
2728 } 2734 }
2729 2735
2730 if (q) { 2736 if (q) {
2731 queue_unplugged(q, depth, force_kblockd); 2737 queue_unplugged(q, depth, from_schedule);
2732 spin_unlock(q->queue_lock); 2738 spin_unlock(q->queue_lock);
2733 } 2739 }
2734 2740