aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2011-04-18 14:17:49 -0400
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2011-04-18 14:26:17 -0400
commitd2436eda2e81f1993bfe6349f17f52503bffeff5 (patch)
tree1da4a8c0f15497afeaf955f9af28c816408ad285
parent6fd17b5643bf05c29fc226a5aee96328056fca10 (diff)
block, xen/blkback: remove blk_[get|put]_queue calls.
They were used to check if the queue does not have QUEUE_FLAG_DEAD set. That is not necessary anymore as the 'submit_io' call ends up doing that for us. Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-rw-r--r--block/blk-core.c2
-rw-r--r--drivers/xen/blkback/blkback.c6
2 files changed, 0 insertions, 8 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 9b60e69a5400..90f22cc30799 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -351,7 +351,6 @@ void blk_put_queue(struct request_queue *q)
351{ 351{
352 kobject_put(&q->kobj); 352 kobject_put(&q->kobj);
353} 353}
354EXPORT_SYMBOL_GPL(blk_put_queue);
355 354
356/* 355/*
357 * Note: If a driver supplied the queue lock, it should not zap that lock 356 * Note: If a driver supplied the queue lock, it should not zap that lock
@@ -573,7 +572,6 @@ int blk_get_queue(struct request_queue *q)
573 572
574 return 1; 573 return 1;
575} 574}
576EXPORT_SYMBOL_GPL(blk_get_queue);
577 575
578static inline void blk_free_request(struct request_queue *q, struct request *rq) 576static inline void blk_free_request(struct request_queue *q, struct request *rq)
579{ 577{
diff --git a/drivers/xen/blkback/blkback.c b/drivers/xen/blkback/blkback.c
index 3751325bfc32..59a2bae0f35e 100644
--- a/drivers/xen/blkback/blkback.c
+++ b/drivers/xen/blkback/blkback.c
@@ -479,7 +479,6 @@ static void dispatch_rw_block_io(struct blkif_st *blkif,
479 int i, nbio = 0; 479 int i, nbio = 0;
480 int operation; 480 int operation;
481 struct blk_plug plug; 481 struct blk_plug plug;
482 struct request_queue *q;
483 482
484 switch (req->operation) { 483 switch (req->operation) {
485 case BLKIF_OP_READ: 484 case BLKIF_OP_READ:
@@ -542,9 +541,6 @@ static void dispatch_rw_block_io(struct blkif_st *blkif,
542 goto fail_response; 541 goto fail_response;
543 } 542 }
544 } 543 }
545 q = bdev_get_queue(preq.bdev);
546 if (!q)
547 goto fail_response;
548 /* If we have failed at this point, we need to undo the M2P override, 544 /* If we have failed at this point, we need to undo the M2P override,
549 * set gnttab_set_unmap_op on all of the grant references and perform 545 * set gnttab_set_unmap_op on all of the grant references and perform
550 * the hypercall to unmap the grants - that is all done in 546 * the hypercall to unmap the grants - that is all done in
@@ -596,7 +592,6 @@ static void dispatch_rw_block_io(struct blkif_st *blkif,
596 atomic_set(&pending_req->pendcnt, nbio); 592 atomic_set(&pending_req->pendcnt, nbio);
597 593
598 /* Get a reference count for the disk queue and start sending I/O */ 594 /* Get a reference count for the disk queue and start sending I/O */
599 blk_get_queue(q);
600 blk_start_plug(&plug); 595 blk_start_plug(&plug);
601 596
602 for (i = 0; i < nbio; i++) 597 for (i = 0; i < nbio; i++)
@@ -604,7 +599,6 @@ static void dispatch_rw_block_io(struct blkif_st *blkif,
604 599
605 blk_finish_plug(&plug); 600 blk_finish_plug(&plug);
606 /* Let the I/Os go.. */ 601 /* Let the I/Os go.. */
607 blk_put_queue(q);
608 602
609 if (operation == READ) 603 if (operation == READ)
610 blkif->st_rd_sect += preq.nr_sects; 604 blkif->st_rd_sect += preq.nr_sects;