aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2011-04-26 12:57:59 -0400
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2011-04-26 13:01:32 -0400
commit97961ef46b9b5a6a7c918a38b898a7b3e49869f4 (patch)
tree5ba59239707b467095a84e743724f29085eb8858 /drivers
parent8b6bf747d70e5bac1a34c8fd773230e1cfdd7546 (diff)
xen/blkback: Move the plugging/unplugging to a higher level.
We used to the plug/unplug on the submit_bio. But that means if within a stream of WRITE, WRITE, WRITE,...,WRITE we have one READ, it could stall the pipeline (as the 'submio_bio' could trigger the unplug_fnc to be called and stall/sync when doing the READ). Instead we want to move the unplugging when the whole (or as a much as possible) ring buffer has been processed. This also eliminates us doing plug/unplug for each request. Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/xen-blkback/blkback.c13
1 files changed, 6 insertions, 7 deletions
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index c4bc85e69d33..ed85ba94b2e0 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -276,6 +276,8 @@ int xen_blkif_schedule(void *arg)
276 printk(KERN_DEBUG "%s: started\n", current->comm); 276 printk(KERN_DEBUG "%s: started\n", current->comm);
277 277
278 while (!kthread_should_stop()) { 278 while (!kthread_should_stop()) {
279 struct blk_plug plug;
280
279 if (try_to_freeze()) 281 if (try_to_freeze())
280 continue; 282 continue;
281 if (unlikely(vbd->size != vbd_sz(vbd))) 283 if (unlikely(vbd->size != vbd_sz(vbd)))
@@ -292,9 +294,13 @@ int xen_blkif_schedule(void *arg)
292 blkif->waiting_reqs = 0; 294 blkif->waiting_reqs = 0;
293 smp_mb(); /* clear flag *before* checking for work */ 295 smp_mb(); /* clear flag *before* checking for work */
294 296
297 blk_start_plug(&plug);
298
295 if (do_block_io_op(blkif)) 299 if (do_block_io_op(blkif))
296 blkif->waiting_reqs = 1; 300 blkif->waiting_reqs = 1;
297 301
302 blk_finish_plug(&plug);
303
298 if (log_stats && time_after(jiffies, blkif->st_print)) 304 if (log_stats && time_after(jiffies, blkif->st_print))
299 print_stats(blkif); 305 print_stats(blkif);
300 } 306 }
@@ -547,7 +553,6 @@ static void dispatch_rw_block_io(struct blkif_st *blkif,
547 struct bio *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 553 struct bio *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST];
548 int i, nbio = 0; 554 int i, nbio = 0;
549 int operation; 555 int operation;
550 struct blk_plug plug;
551 556
552 switch (req->operation) { 557 switch (req->operation) {
553 case BLKIF_OP_READ: 558 case BLKIF_OP_READ:
@@ -660,15 +665,9 @@ static void dispatch_rw_block_io(struct blkif_st *blkif,
660 */ 665 */
661 atomic_set(&pending_req->pendcnt, nbio); 666 atomic_set(&pending_req->pendcnt, nbio);
662 667
663 /* Get a reference count for the disk queue and start sending I/O */
664 blk_start_plug(&plug);
665
666 for (i = 0; i < nbio; i++) 668 for (i = 0; i < nbio; i++)
667 submit_bio(operation, biolist[i]); 669 submit_bio(operation, biolist[i]);
668 670
669 blk_finish_plug(&plug);
670 /* Let the I/Os go.. */
671
672 if (operation == READ) 671 if (operation == READ)
673 blkif->st_rd_sect += preq.nr_sects; 672 blkif->st_rd_sect += preq.nr_sects;
674 else if (operation == WRITE || operation == WRITE_BARRIER) 673 else if (operation == WRITE || operation == WRITE_BARRIER)