summaryrefslogtreecommitdiffstats
path: root/drivers/block/xen-blkfront.c
diff options
context:
space:
mode:
authorJulien Grall <julien.grall@citrix.com>2015-08-13 14:23:10 -0400
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2016-01-04 12:21:24 -0500
commit2e073969d57f60fc0b863985779657624cbd4886 (patch)
tree49964e6c600fcab04fa359b7a9ee238e8363b253 /drivers/block/xen-blkfront.c
parenta6e7af1288eeb7fca8361356998d31a92a291531 (diff)
xen-blkfront: Introduce blkif_ring_get_request
The code to get a request is always the same. Therefore we can factorize it in a single function. Signed-off-by: Julien Grall <julien.grall@citrix.com> Acked-by: Roger Pau Monné <roger.pau@citrix.com> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'drivers/block/xen-blkfront.c')
-rw-r--r--drivers/block/xen-blkfront.c30
1 files changed, 19 insertions, 11 deletions
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index ef5ce43e307a..0b32c90ffc3f 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -481,6 +481,23 @@ static int blkif_ioctl(struct block_device *bdev, fmode_t mode,
481 return 0; 481 return 0;
482} 482}
483 483
484static unsigned long blkif_ring_get_request(struct blkfront_ring_info *rinfo,
485 struct request *req,
486 struct blkif_request **ring_req)
487{
488 unsigned long id;
489
490 *ring_req = RING_GET_REQUEST(&rinfo->ring, rinfo->ring.req_prod_pvt);
491 rinfo->ring.req_prod_pvt++;
492
493 id = get_id_from_freelist(rinfo);
494 rinfo->shadow[id].request = req;
495
496 (*ring_req)->u.rw.id = id;
497
498 return id;
499}
500
484static int blkif_queue_discard_req(struct request *req, struct blkfront_ring_info *rinfo) 501static int blkif_queue_discard_req(struct request *req, struct blkfront_ring_info *rinfo)
485{ 502{
486 struct blkfront_info *info = rinfo->dev_info; 503 struct blkfront_info *info = rinfo->dev_info;
@@ -488,9 +505,7 @@ static int blkif_queue_discard_req(struct request *req, struct blkfront_ring_inf
488 unsigned long id; 505 unsigned long id;
489 506
490 /* Fill out a communications ring structure. */ 507 /* Fill out a communications ring structure. */
491 ring_req = RING_GET_REQUEST(&rinfo->ring, rinfo->ring.req_prod_pvt); 508 id = blkif_ring_get_request(rinfo, req, &ring_req);
492 id = get_id_from_freelist(rinfo);
493 rinfo->shadow[id].request = req;
494 509
495 ring_req->operation = BLKIF_OP_DISCARD; 510 ring_req->operation = BLKIF_OP_DISCARD;
496 ring_req->u.discard.nr_sectors = blk_rq_sectors(req); 511 ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
@@ -501,8 +516,6 @@ static int blkif_queue_discard_req(struct request *req, struct blkfront_ring_inf
501 else 516 else
502 ring_req->u.discard.flag = 0; 517 ring_req->u.discard.flag = 0;
503 518
504 rinfo->ring.req_prod_pvt++;
505
506 /* Keep a private copy so we can reissue requests when recovering. */ 519 /* Keep a private copy so we can reissue requests when recovering. */
507 rinfo->shadow[id].req = *ring_req; 520 rinfo->shadow[id].req = *ring_req;
508 521
@@ -635,9 +648,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
635 } 648 }
636 649
637 /* Fill out a communications ring structure. */ 650 /* Fill out a communications ring structure. */
638 ring_req = RING_GET_REQUEST(&rinfo->ring, rinfo->ring.req_prod_pvt); 651 id = blkif_ring_get_request(rinfo, req, &ring_req);
639 id = get_id_from_freelist(rinfo);
640 rinfo->shadow[id].request = req;
641 652
642 BUG_ON(info->max_indirect_segments == 0 && 653 BUG_ON(info->max_indirect_segments == 0 &&
643 GREFS(req->nr_phys_segments) > BLKIF_MAX_SEGMENTS_PER_REQUEST); 654 GREFS(req->nr_phys_segments) > BLKIF_MAX_SEGMENTS_PER_REQUEST);
@@ -650,7 +661,6 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
650 for_each_sg(rinfo->shadow[id].sg, sg, num_sg, i) 661 for_each_sg(rinfo->shadow[id].sg, sg, num_sg, i)
651 num_grant += gnttab_count_grant(sg->offset, sg->length); 662 num_grant += gnttab_count_grant(sg->offset, sg->length);
652 663
653 ring_req->u.rw.id = id;
654 rinfo->shadow[id].num_sg = num_sg; 664 rinfo->shadow[id].num_sg = num_sg;
655 if (num_grant > BLKIF_MAX_SEGMENTS_PER_REQUEST) { 665 if (num_grant > BLKIF_MAX_SEGMENTS_PER_REQUEST) {
656 /* 666 /*
@@ -716,8 +726,6 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
716 if (setup.segments) 726 if (setup.segments)
717 kunmap_atomic(setup.segments); 727 kunmap_atomic(setup.segments);
718 728
719 rinfo->ring.req_prod_pvt++;
720
721 /* Keep a private copy so we can reissue requests when recovering. */ 729 /* Keep a private copy so we can reissue requests when recovering. */
722 rinfo->shadow[id].req = *ring_req; 730 rinfo->shadow[id].req = *ring_req;
723 731