aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--block/blk-core.c15
-rw-r--r--include/trace/events/block.h11
-rw-r--r--kernel/trace/blktrace.c6
3 files changed, 22 insertions, 10 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index eeaca0998df5..d20ce1e849c8 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -2668,12 +2668,19 @@ static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
2668 return !(rqa->q <= rqb->q); 2668 return !(rqa->q <= rqb->q);
2669} 2669}
2670 2670
2671static void queue_unplugged(struct request_queue *q, unsigned int depth)
2672{
2673 trace_block_unplug_io(q, depth);
2674 __blk_run_queue(q, false);
2675}
2676
2671static void flush_plug_list(struct blk_plug *plug) 2677static void flush_plug_list(struct blk_plug *plug)
2672{ 2678{
2673 struct request_queue *q; 2679 struct request_queue *q;
2674 unsigned long flags; 2680 unsigned long flags;
2675 struct request *rq; 2681 struct request *rq;
2676 LIST_HEAD(list); 2682 LIST_HEAD(list);
2683 unsigned int depth;
2677 2684
2678 BUG_ON(plug->magic != PLUG_MAGIC); 2685 BUG_ON(plug->magic != PLUG_MAGIC);
2679 2686
@@ -2688,6 +2695,7 @@ static void flush_plug_list(struct blk_plug *plug)
2688 } 2695 }
2689 2696
2690 q = NULL; 2697 q = NULL;
2698 depth = 0;
2691 local_irq_save(flags); 2699 local_irq_save(flags);
2692 while (!list_empty(&list)) { 2700 while (!list_empty(&list)) {
2693 rq = list_entry_rq(list.next); 2701 rq = list_entry_rq(list.next);
@@ -2696,10 +2704,11 @@ static void flush_plug_list(struct blk_plug *plug)
2696 BUG_ON(!rq->q); 2704 BUG_ON(!rq->q);
2697 if (rq->q != q) { 2705 if (rq->q != q) {
2698 if (q) { 2706 if (q) {
2699 __blk_run_queue(q, false); 2707 queue_unplugged(q, depth);
2700 spin_unlock(q->queue_lock); 2708 spin_unlock(q->queue_lock);
2701 } 2709 }
2702 q = rq->q; 2710 q = rq->q;
2711 depth = 0;
2703 spin_lock(q->queue_lock); 2712 spin_lock(q->queue_lock);
2704 } 2713 }
2705 rq->cmd_flags &= ~REQ_ON_PLUG; 2714 rq->cmd_flags &= ~REQ_ON_PLUG;
@@ -2711,10 +2720,12 @@ static void flush_plug_list(struct blk_plug *plug)
2711 __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH); 2720 __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH);
2712 else 2721 else
2713 __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE); 2722 __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE);
2723
2724 depth++;
2714 } 2725 }
2715 2726
2716 if (q) { 2727 if (q) {
2717 __blk_run_queue(q, false); 2728 queue_unplugged(q, depth);
2718 spin_unlock(q->queue_lock); 2729 spin_unlock(q->queue_lock);
2719 } 2730 }
2720 2731
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index 43a985390bb6..006e60b58306 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -401,9 +401,9 @@ TRACE_EVENT(block_plug,
401 401
402DECLARE_EVENT_CLASS(block_unplug, 402DECLARE_EVENT_CLASS(block_unplug,
403 403
404 TP_PROTO(struct request_queue *q), 404 TP_PROTO(struct request_queue *q, unsigned int depth),
405 405
406 TP_ARGS(q), 406 TP_ARGS(q, depth),
407 407
408 TP_STRUCT__entry( 408 TP_STRUCT__entry(
409 __field( int, nr_rq ) 409 __field( int, nr_rq )
@@ -411,7 +411,7 @@ DECLARE_EVENT_CLASS(block_unplug,
411 ), 411 ),
412 412
413 TP_fast_assign( 413 TP_fast_assign(
414 __entry->nr_rq = q->rq.count[READ] + q->rq.count[WRITE]; 414 __entry->nr_rq = depth;
415 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 415 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
416 ), 416 ),
417 417
@@ -421,15 +421,16 @@ DECLARE_EVENT_CLASS(block_unplug,
421/** 421/**
422 * block_unplug_io - release of operations requests in request queue 422 * block_unplug_io - release of operations requests in request queue
423 * @q: request queue to unplug 423 * @q: request queue to unplug
424 * @depth: number of requests just added to the queue
424 * 425 *
425 * Unplug request queue @q because device driver is scheduled to work 426 * Unplug request queue @q because device driver is scheduled to work
426 * on elements in the request queue. 427 * on elements in the request queue.
427 */ 428 */
428DEFINE_EVENT(block_unplug, block_unplug_io, 429DEFINE_EVENT(block_unplug, block_unplug_io,
429 430
430 TP_PROTO(struct request_queue *q), 431 TP_PROTO(struct request_queue *q, unsigned int depth),
431 432
432 TP_ARGS(q) 433 TP_ARGS(q, depth)
433); 434);
434 435
435/** 436/**
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 824708cbfb7b..3e3970d53d14 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -850,13 +850,13 @@ static void blk_add_trace_plug(void *ignore, struct request_queue *q)
850 __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL); 850 __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL);
851} 851}
852 852
853static void blk_add_trace_unplug_io(void *ignore, struct request_queue *q) 853static void blk_add_trace_unplug_io(void *ignore, struct request_queue *q,
854 unsigned int depth)
854{ 855{
855 struct blk_trace *bt = q->blk_trace; 856 struct blk_trace *bt = q->blk_trace;
856 857
857 if (bt) { 858 if (bt) {
858 unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE]; 859 __be64 rpdu = cpu_to_be64(depth);
859 __be64 rpdu = cpu_to_be64(pdu);
860 860
861 __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_IO, 0, 861 __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_IO, 0,
862 sizeof(rpdu), &rpdu); 862 sizeof(rpdu), &rpdu);