aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-04-16 13:33:41 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-04-16 13:33:41 -0400
commitd733ed6c34be3aef0517a04e4103eed6b369ec50 (patch)
treeb49723304c48c2f877b9cb284a4b39361934c670
parent08150c533c57981054324b9e87dbf686006d890f (diff)
parent49cac01e1fa74174d72adb0e872504a7fefd7c01 (diff)
Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block
* 'for-linus' of git://git.kernel.dk/linux-2.6-block: block: make unplug timer trace event correspond to the schedule() unplug block: let io_schedule() flush the plug inline
-rw-r--r--block/blk-core.c18
-rw-r--r--include/linux/blkdev.h13
-rw-r--r--include/trace/events/block.h13
-rw-r--r--kernel/sched.c2
-rw-r--r--kernel/trace/blktrace.c18
5 files changed, 45 insertions, 19 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 3c8121072507..78b7b0cb7216 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -2662,17 +2662,23 @@ static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
2662 return !(rqa->q <= rqb->q); 2662 return !(rqa->q <= rqb->q);
2663} 2663}
2664 2664
2665/*
2666 * If 'from_schedule' is true, then postpone the dispatch of requests
2667 * until a safe kblockd context. We due this to avoid accidental big
2668 * additional stack usage in driver dispatch, in places where the originally
2669 * plugger did not intend it.
2670 */
2665static void queue_unplugged(struct request_queue *q, unsigned int depth, 2671static void queue_unplugged(struct request_queue *q, unsigned int depth,
2666 bool force_kblockd) 2672 bool from_schedule)
2667{ 2673{
2668 trace_block_unplug_io(q, depth); 2674 trace_block_unplug(q, depth, !from_schedule);
2669 __blk_run_queue(q, force_kblockd); 2675 __blk_run_queue(q, from_schedule);
2670 2676
2671 if (q->unplugged_fn) 2677 if (q->unplugged_fn)
2672 q->unplugged_fn(q); 2678 q->unplugged_fn(q);
2673} 2679}
2674 2680
2675void blk_flush_plug_list(struct blk_plug *plug, bool force_kblockd) 2681void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
2676{ 2682{
2677 struct request_queue *q; 2683 struct request_queue *q;
2678 unsigned long flags; 2684 unsigned long flags;
@@ -2707,7 +2713,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool force_kblockd)
2707 BUG_ON(!rq->q); 2713 BUG_ON(!rq->q);
2708 if (rq->q != q) { 2714 if (rq->q != q) {
2709 if (q) { 2715 if (q) {
2710 queue_unplugged(q, depth, force_kblockd); 2716 queue_unplugged(q, depth, from_schedule);
2711 spin_unlock(q->queue_lock); 2717 spin_unlock(q->queue_lock);
2712 } 2718 }
2713 q = rq->q; 2719 q = rq->q;
@@ -2728,7 +2734,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool force_kblockd)
2728 } 2734 }
2729 2735
2730 if (q) { 2736 if (q) {
2731 queue_unplugged(q, depth, force_kblockd); 2737 queue_unplugged(q, depth, from_schedule);
2732 spin_unlock(q->queue_lock); 2738 spin_unlock(q->queue_lock);
2733 } 2739 }
2734 2740
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 1c76506fcf11..ec0357d8c4a5 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -872,6 +872,14 @@ static inline void blk_flush_plug(struct task_struct *tsk)
872 struct blk_plug *plug = tsk->plug; 872 struct blk_plug *plug = tsk->plug;
873 873
874 if (plug) 874 if (plug)
875 blk_flush_plug_list(plug, false);
876}
877
878static inline void blk_schedule_flush_plug(struct task_struct *tsk)
879{
880 struct blk_plug *plug = tsk->plug;
881
882 if (plug)
875 blk_flush_plug_list(plug, true); 883 blk_flush_plug_list(plug, true);
876} 884}
877 885
@@ -1317,6 +1325,11 @@ static inline void blk_flush_plug(struct task_struct *task)
1317{ 1325{
1318} 1326}
1319 1327
1328static inline void blk_schedule_flush_plug(struct task_struct *task)
1329{
1330}
1331
1332
1320static inline bool blk_needs_flush_plug(struct task_struct *tsk) 1333static inline bool blk_needs_flush_plug(struct task_struct *tsk)
1321{ 1334{
1322 return false; 1335 return false;
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index 006e60b58306..bf366547da25 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -401,9 +401,9 @@ TRACE_EVENT(block_plug,
401 401
402DECLARE_EVENT_CLASS(block_unplug, 402DECLARE_EVENT_CLASS(block_unplug,
403 403
404 TP_PROTO(struct request_queue *q, unsigned int depth), 404 TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
405 405
406 TP_ARGS(q, depth), 406 TP_ARGS(q, depth, explicit),
407 407
408 TP_STRUCT__entry( 408 TP_STRUCT__entry(
409 __field( int, nr_rq ) 409 __field( int, nr_rq )
@@ -419,18 +419,19 @@ DECLARE_EVENT_CLASS(block_unplug,
419); 419);
420 420
421/** 421/**
422 * block_unplug_io - release of operations requests in request queue 422 * block_unplug - release of operations requests in request queue
423 * @q: request queue to unplug 423 * @q: request queue to unplug
424 * @depth: number of requests just added to the queue 424 * @depth: number of requests just added to the queue
425 * @explicit: whether this was an explicit unplug, or one from schedule()
425 * 426 *
426 * Unplug request queue @q because device driver is scheduled to work 427 * Unplug request queue @q because device driver is scheduled to work
427 * on elements in the request queue. 428 * on elements in the request queue.
428 */ 429 */
429DEFINE_EVENT(block_unplug, block_unplug_io, 430DEFINE_EVENT(block_unplug, block_unplug,
430 431
431 TP_PROTO(struct request_queue *q, unsigned int depth), 432 TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
432 433
433 TP_ARGS(q, depth) 434 TP_ARGS(q, depth, explicit)
434); 435);
435 436
436/** 437/**
diff --git a/kernel/sched.c b/kernel/sched.c
index a187c3fe027b..312f8b95c2d4 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4118,7 +4118,7 @@ need_resched:
4118 */ 4118 */
4119 if (blk_needs_flush_plug(prev)) { 4119 if (blk_needs_flush_plug(prev)) {
4120 raw_spin_unlock(&rq->lock); 4120 raw_spin_unlock(&rq->lock);
4121 blk_flush_plug(prev); 4121 blk_schedule_flush_plug(prev);
4122 raw_spin_lock(&rq->lock); 4122 raw_spin_lock(&rq->lock);
4123 } 4123 }
4124 } 4124 }
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 3e3970d53d14..6957aa298dfa 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -850,16 +850,21 @@ static void blk_add_trace_plug(void *ignore, struct request_queue *q)
850 __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL); 850 __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL);
851} 851}
852 852
853static void blk_add_trace_unplug_io(void *ignore, struct request_queue *q, 853static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
854 unsigned int depth) 854 unsigned int depth, bool explicit)
855{ 855{
856 struct blk_trace *bt = q->blk_trace; 856 struct blk_trace *bt = q->blk_trace;
857 857
858 if (bt) { 858 if (bt) {
859 __be64 rpdu = cpu_to_be64(depth); 859 __be64 rpdu = cpu_to_be64(depth);
860 u32 what;
860 861
861 __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_IO, 0, 862 if (explicit)
862 sizeof(rpdu), &rpdu); 863 what = BLK_TA_UNPLUG_IO;
864 else
865 what = BLK_TA_UNPLUG_TIMER;
866
867 __blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu);
863 } 868 }
864} 869}
865 870
@@ -1002,7 +1007,7 @@ static void blk_register_tracepoints(void)
1002 WARN_ON(ret); 1007 WARN_ON(ret);
1003 ret = register_trace_block_plug(blk_add_trace_plug, NULL); 1008 ret = register_trace_block_plug(blk_add_trace_plug, NULL);
1004 WARN_ON(ret); 1009 WARN_ON(ret);
1005 ret = register_trace_block_unplug_io(blk_add_trace_unplug_io, NULL); 1010 ret = register_trace_block_unplug(blk_add_trace_unplug, NULL);
1006 WARN_ON(ret); 1011 WARN_ON(ret);
1007 ret = register_trace_block_split(blk_add_trace_split, NULL); 1012 ret = register_trace_block_split(blk_add_trace_split, NULL);
1008 WARN_ON(ret); 1013 WARN_ON(ret);
@@ -1017,7 +1022,7 @@ static void blk_unregister_tracepoints(void)
1017 unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL); 1022 unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
1018 unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL); 1023 unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
1019 unregister_trace_block_split(blk_add_trace_split, NULL); 1024 unregister_trace_block_split(blk_add_trace_split, NULL);
1020 unregister_trace_block_unplug_io(blk_add_trace_unplug_io, NULL); 1025 unregister_trace_block_unplug(blk_add_trace_unplug, NULL);
1021 unregister_trace_block_plug(blk_add_trace_plug, NULL); 1026 unregister_trace_block_plug(blk_add_trace_plug, NULL);
1022 unregister_trace_block_sleeprq(blk_add_trace_sleeprq, NULL); 1027 unregister_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
1023 unregister_trace_block_getrq(blk_add_trace_getrq, NULL); 1028 unregister_trace_block_getrq(blk_add_trace_getrq, NULL);
@@ -1332,6 +1337,7 @@ static const struct {
1332 [__BLK_TA_COMPLETE] = {{ "C", "complete" }, blk_log_with_error }, 1337 [__BLK_TA_COMPLETE] = {{ "C", "complete" }, blk_log_with_error },
1333 [__BLK_TA_PLUG] = {{ "P", "plug" }, blk_log_plug }, 1338 [__BLK_TA_PLUG] = {{ "P", "plug" }, blk_log_plug },
1334 [__BLK_TA_UNPLUG_IO] = {{ "U", "unplug_io" }, blk_log_unplug }, 1339 [__BLK_TA_UNPLUG_IO] = {{ "U", "unplug_io" }, blk_log_unplug },
1340 [__BLK_TA_UNPLUG_TIMER] = {{ "UT", "unplug_timer" }, blk_log_unplug },
1335 [__BLK_TA_INSERT] = {{ "I", "insert" }, blk_log_generic }, 1341 [__BLK_TA_INSERT] = {{ "I", "insert" }, blk_log_generic },
1336 [__BLK_TA_SPLIT] = {{ "X", "split" }, blk_log_split }, 1342 [__BLK_TA_SPLIT] = {{ "X", "split" }, blk_log_split },
1337 [__BLK_TA_BOUNCE] = {{ "B", "bounce" }, blk_log_generic }, 1343 [__BLK_TA_BOUNCE] = {{ "B", "bounce" }, blk_log_generic },