aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/xen-blkback
diff options
context:
space:
mode:
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2011-10-12 17:26:47 -0400
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2011-11-18 13:28:03 -0500
commit421463526fd1d8b5cb575baca12667c1005a110b (patch)
tree7ac39116a15841d70126f92a6587972ecdadeaed /drivers/block/xen-blkback
parent5ea42986694a96542644f9cae8b122d3a00c508f (diff)
xen/blkback: Move processing of BLKIF_OP_DISCARD from dispatch_rw_block_io
.. and move it to its own function that will deal with the discard operation. Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'drivers/block/xen-blkback')
-rw-r--r--drivers/block/xen-blkback/blkback.c54
1 files changed, 24 insertions, 30 deletions
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 9d2261b02f24..b058de7825f5 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -416,12 +416,16 @@ static int xen_blkbk_map(struct blkif_request *req,
416 return ret; 416 return ret;
417} 417}
418 418
419static void xen_blk_discard(struct xen_blkif *blkif, struct blkif_request *req) 419static int dispatch_discard_io(struct xen_blkif *blkif,
420 struct blkif_request *req)
420{ 421{
421 int err = 0; 422 int err = 0;
422 int status = BLKIF_RSP_OKAY; 423 int status = BLKIF_RSP_OKAY;
423 struct block_device *bdev = blkif->vbd.bdev; 424 struct block_device *bdev = blkif->vbd.bdev;
424 425
426 blkif->st_ds_req++;
427
428 xen_blkif_get(blkif);
425 if (blkif->blk_backend_type == BLKIF_BACKEND_PHY) { 429 if (blkif->blk_backend_type == BLKIF_BACKEND_PHY) {
426 unsigned long secure = (blkif->vbd.discard_secure && 430 unsigned long secure = (blkif->vbd.discard_secure &&
427 (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ? 431 (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
@@ -453,6 +457,8 @@ static void xen_blk_discard(struct xen_blkif *blkif, struct blkif_request *req)
453 status = BLKIF_RSP_ERROR; 457 status = BLKIF_RSP_ERROR;
454 458
455 make_response(blkif, req->u.discard.id, req->operation, status); 459 make_response(blkif, req->u.discard.id, req->operation, status);
460 xen_blkif_put(blkif);
461 return err;
456} 462}
457 463
458static void xen_blk_drain_io(struct xen_blkif *blkif) 464static void xen_blk_drain_io(struct xen_blkif *blkif)
@@ -576,8 +582,11 @@ __do_block_io_op(struct xen_blkif *blkif)
576 582
577 /* Apply all sanity checks to /private copy/ of request. */ 583 /* Apply all sanity checks to /private copy/ of request. */
578 barrier(); 584 barrier();
579 585 if (unlikely(req.operation == BLKIF_OP_DISCARD)) {
580 if (dispatch_rw_block_io(blkif, &req, pending_req)) 586 free_req(pending_req);
587 if (dispatch_discard_io(blkif, &req))
588 break;
589 } else if (dispatch_rw_block_io(blkif, &req, pending_req))
581 break; 590 break;
582 591
583 /* Yield point for this unbounded loop. */ 592 /* Yield point for this unbounded loop. */
@@ -636,24 +645,16 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
636 blkif->st_f_req++; 645 blkif->st_f_req++;
637 operation = WRITE_FLUSH; 646 operation = WRITE_FLUSH;
638 break; 647 break;
639 case BLKIF_OP_DISCARD:
640 blkif->st_ds_req++;
641 operation = REQ_DISCARD;
642 break;
643 default: 648 default:
644 operation = 0; /* make gcc happy */ 649 operation = 0; /* make gcc happy */
645 goto fail_response; 650 goto fail_response;
646 break; 651 break;
647 } 652 }
648 653
649 if (unlikely(operation == REQ_DISCARD)) 654 /* Check that the number of segments is sane. */
650 nseg = 0; 655 nseg = req->u.rw.nr_segments;
651 else
652 /* Check that the number of segments is sane. */
653 nseg = req->u.rw.nr_segments;
654 656
655 if (unlikely(nseg == 0 && operation != WRITE_FLUSH && 657 if (unlikely(nseg == 0 && operation != WRITE_FLUSH) ||
656 operation != REQ_DISCARD) ||
657 unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) { 658 unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
658 pr_debug(DRV_PFX "Bad number of segments in request (%d)\n", 659 pr_debug(DRV_PFX "Bad number of segments in request (%d)\n",
659 nseg); 660 nseg);
@@ -714,7 +715,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
714 * the hypercall to unmap the grants - that is all done in 715 * the hypercall to unmap the grants - that is all done in
715 * xen_blkbk_unmap. 716 * xen_blkbk_unmap.
716 */ 717 */
717 if (nseg && xen_blkbk_map(req, pending_req, seg)) 718 if (xen_blkbk_map(req, pending_req, seg))
718 goto fail_flush; 719 goto fail_flush;
719 720
720 /* 721 /*
@@ -746,23 +747,16 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
746 747
747 /* This will be hit if the operation was a flush or discard. */ 748 /* This will be hit if the operation was a flush or discard. */
748 if (!bio) { 749 if (!bio) {
749 BUG_ON(operation != WRITE_FLUSH && operation != REQ_DISCARD); 750 BUG_ON(operation != WRITE_FLUSH);
750 751
751 if (operation == WRITE_FLUSH) { 752 bio = bio_alloc(GFP_KERNEL, 0);
752 bio = bio_alloc(GFP_KERNEL, 0); 753 if (unlikely(bio == NULL))
753 if (unlikely(bio == NULL)) 754 goto fail_put_bio;
754 goto fail_put_bio;
755 755
756 biolist[nbio++] = bio; 756 biolist[nbio++] = bio;
757 bio->bi_bdev = preq.bdev; 757 bio->bi_bdev = preq.bdev;
758 bio->bi_private = pending_req; 758 bio->bi_private = pending_req;
759 bio->bi_end_io = end_block_io_op; 759 bio->bi_end_io = end_block_io_op;
760 } else if (operation == REQ_DISCARD) {
761 xen_blk_discard(blkif, req);
762 xen_blkif_put(blkif);
763 free_req(pending_req);
764 return 0;
765 }
766 } 760 }
767 761
768 /* 762 /*