aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/xen-blkback
diff options
context:
space:
mode:
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2011-04-27 12:40:11 -0400
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2011-04-27 12:40:11 -0400
commita19be5f0f073525306f6a4b000d90dc84065ed93 (patch)
tree3c2b4f2dfda149838f0060d3f38c7494d8612f4d /drivers/block/xen-blkback
parent013c3ca184851078b9c04744efd4d47e52c6ecf8 (diff)
Revert "xen/blkback: Move the plugging/unplugging to a higher level."
This reverts commit 97961ef46b9b5a6a7c918a38b898a7b3e49869f4 b/c we lose about 15% performance if we do the unplugging and the end of the reading the ring buffer.
Diffstat (limited to 'drivers/block/xen-blkback')
-rw-r--r--drivers/block/xen-blkback/blkback.c13
1 files changed, 7 insertions, 6 deletions
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 8583b130499a..eb068d0b47ea 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -276,8 +276,6 @@ int xen_blkif_schedule(void *arg)
276 printk(KERN_DEBUG "%s: started\n", current->comm); 276 printk(KERN_DEBUG "%s: started\n", current->comm);
277 277
278 while (!kthread_should_stop()) { 278 while (!kthread_should_stop()) {
279 struct blk_plug plug;
280
281 if (try_to_freeze()) 279 if (try_to_freeze())
282 continue; 280 continue;
283 if (unlikely(vbd->size != vbd_sz(vbd))) 281 if (unlikely(vbd->size != vbd_sz(vbd)))
@@ -294,13 +292,9 @@ int xen_blkif_schedule(void *arg)
294 blkif->waiting_reqs = 0; 292 blkif->waiting_reqs = 0;
295 smp_mb(); /* clear flag *before* checking for work */ 293 smp_mb(); /* clear flag *before* checking for work */
296 294
297 blk_start_plug(&plug);
298
299 if (do_block_io_op(blkif)) 295 if (do_block_io_op(blkif))
300 blkif->waiting_reqs = 1; 296 blkif->waiting_reqs = 1;
301 297
302 blk_finish_plug(&plug);
303
304 if (log_stats && time_after(jiffies, blkif->st_print)) 298 if (log_stats && time_after(jiffies, blkif->st_print))
305 print_stats(blkif); 299 print_stats(blkif);
306 } 300 }
@@ -553,6 +547,7 @@ static void dispatch_rw_block_io(struct blkif_st *blkif,
553 struct bio *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 547 struct bio *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST];
554 int i, nbio = 0; 548 int i, nbio = 0;
555 int operation; 549 int operation;
550 struct blk_plug plug;
556 551
557 switch (req->operation) { 552 switch (req->operation) {
558 case BLKIF_OP_READ: 553 case BLKIF_OP_READ:
@@ -665,9 +660,15 @@ static void dispatch_rw_block_io(struct blkif_st *blkif,
665 */ 660 */
666 atomic_set(&pending_req->pendcnt, nbio); 661 atomic_set(&pending_req->pendcnt, nbio);
667 662
663 /* Get a reference count for the disk queue and start sending I/O */
664 blk_start_plug(&plug);
665
668 for (i = 0; i < nbio; i++) 666 for (i = 0; i < nbio; i++)
669 submit_bio(operation, biolist[i]); 667 submit_bio(operation, biolist[i]);
670 668
669 blk_finish_plug(&plug);
670 /* Let the I/Os go.. */
671
671 if (operation == READ) 672 if (operation == READ)
672 blkif->st_rd_sect += preq.nr_sects; 673 blkif->st_rd_sect += preq.nr_sects;
673 else if (operation == WRITE || operation == WRITE_BARRIER) 674 else if (operation == WRITE || operation == WRITE_BARRIER)