aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/xen-blkback/blkback.c
diff options
context:
space:
mode:
authorDaniel Stodden <daniel.stodden@citrix.com>2011-05-28 16:21:10 -0400
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2011-06-30 12:48:06 -0400
commitb4726a9df270859898e254b6eee67a28f38b34d3 (patch)
tree2512fcce88e4ad97ea1068864d716fd105d86c39 /drivers/block/xen-blkback/blkback.c
parent2b727c6300b49352f80f63704bb50c256949e95e (diff)
xen/blkback: Don't let in-flight requests defer pending ones.
Running RING_FINAL_CHECK_FOR_REQUESTS from make_response is a bad idea. It means that in-flight I/O is essentially blocking continued batches. This essentially kills throughput on frontends which unplug (or even just notify) early and rightfully assume addtional requests will be picked up on time, not synchronously. Signed-off-by: Daniel Stodden <daniel.stodden@citrix.com> [v1: Rebased and fixed compile problems] Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'drivers/block/xen-blkback/blkback.c')
-rw-r--r--drivers/block/xen-blkback/blkback.c36
1 files changed, 19 insertions, 17 deletions
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 5cf2993a8338..f0537f3f6366 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -458,7 +458,8 @@ static void end_block_io_op(struct bio *bio, int error)
458 * (which has the sectors we want, number of them, grant references, etc), 458 * (which has the sectors we want, number of them, grant references, etc),
459 * and transmute it to the block API to hand it over to the proper block disk. 459 * and transmute it to the block API to hand it over to the proper block disk.
460 */ 460 */
461static int do_block_io_op(struct xen_blkif *blkif) 461static int
462__do_block_io_op(struct xen_blkif *blkif)
462{ 463{
463 union blkif_back_rings *blk_rings = &blkif->blk_rings; 464 union blkif_back_rings *blk_rings = &blkif->blk_rings;
464 struct blkif_request req; 465 struct blkif_request req;
@@ -515,6 +516,23 @@ static int do_block_io_op(struct xen_blkif *blkif)
515 return more_to_do; 516 return more_to_do;
516} 517}
517 518
519static int
520do_block_io_op(struct xen_blkif *blkif)
521{
522 union blkif_back_rings *blk_rings = &blkif->blk_rings;
523 int more_to_do;
524
525 do {
526 more_to_do = __do_block_io_op(blkif);
527 if (more_to_do)
528 break;
529
530 RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
531 } while (more_to_do);
532
533 return more_to_do;
534}
535
518/* 536/*
519 * Transmutation of the 'struct blkif_request' to a proper 'struct bio' 537 * Transmutation of the 'struct blkif_request' to a proper 'struct bio'
520 * and call the 'submit_bio' to pass it to the underlying storage. 538 * and call the 'submit_bio' to pass it to the underlying storage.
@@ -700,7 +718,6 @@ static void make_response(struct xen_blkif *blkif, u64 id,
700 struct blkif_response resp; 718 struct blkif_response resp;
701 unsigned long flags; 719 unsigned long flags;
702 union blkif_back_rings *blk_rings = &blkif->blk_rings; 720 union blkif_back_rings *blk_rings = &blkif->blk_rings;
703 int more_to_do = 0;
704 int notify; 721 int notify;
705 722
706 resp.id = id; 723 resp.id = id;
@@ -727,22 +744,7 @@ static void make_response(struct xen_blkif *blkif, u64 id,
727 } 744 }
728 blk_rings->common.rsp_prod_pvt++; 745 blk_rings->common.rsp_prod_pvt++;
729 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify); 746 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
730 if (blk_rings->common.rsp_prod_pvt == blk_rings->common.req_cons) {
731 /*
732 * Tail check for pending requests. Allows frontend to avoid
733 * notifications if requests are already in flight (lower
734 * overheads and promotes batching).
735 */
736 RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
737
738 } else if (RING_HAS_UNCONSUMED_REQUESTS(&blk_rings->common)) {
739 more_to_do = 1;
740 }
741
742 spin_unlock_irqrestore(&blkif->blk_ring_lock, flags); 747 spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
743
744 if (more_to_do)
745 blkif_notify_work(blkif);
746 if (notify) 748 if (notify)
747 notify_remote_via_irq(blkif->irq); 749 notify_remote_via_irq(blkif->irq);
748} 750}