aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/xen-blkfront.c
diff options
context:
space:
mode:
authorRoger Pau Monne <roger.pau@citrix.com>2013-05-02 04:58:50 -0400
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2013-05-08 08:46:51 -0400
commitb7649158a0d241f8d53d13ff7441858539e16656 (patch)
treeacee055fae1b48f1c986dec29481d2b999dd3127 /drivers/block/xen-blkfront.c
parentbb642e8315fd573795e8b6fa9b9629064d73add1 (diff)
xen-blkfront: use a different scatterlist for each request
In blkif_queue_request blkfront iterates over the scatterlist in order to set the segments of the request, and in blkif_completion blkfront iterates over the raw request, which makes it hard to know the exact position of the source and destination memory positions. This can be solved by allocating a scatterlist for each request, that will be keep until the request is finished, allowing us to copy the data back to the original memory without having to iterate over the raw request. Oracle-Bug: 16660413 - LARGE ASYNCHRONOUS READS APPEAR BROKEN ON 2.6.39-400 CC: stable@vger.kernel.org Signed-off-by: Roger Pau Monné <roger.pau@citrix.com> Reported-and-Tested-by: Anne Milicia <anne.milicia@oracle.com> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'drivers/block/xen-blkfront.c')
-rw-r--r--drivers/block/xen-blkfront.c43
1 files changed, 18 insertions, 25 deletions
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 82d63d5b1750..bac8cf31319b 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -76,6 +76,7 @@ struct blk_shadow {
76 struct request *request; 76 struct request *request;
77 struct grant **grants_used; 77 struct grant **grants_used;
78 struct grant **indirect_grants; 78 struct grant **indirect_grants;
79 struct scatterlist *sg;
79}; 80};
80 81
81struct split_bio { 82struct split_bio {
@@ -113,7 +114,6 @@ struct blkfront_info
113 enum blkif_state connected; 114 enum blkif_state connected;
114 int ring_ref; 115 int ring_ref;
115 struct blkif_front_ring ring; 116 struct blkif_front_ring ring;
116 struct scatterlist *sg;
117 unsigned int evtchn, irq; 117 unsigned int evtchn, irq;
118 struct request_queue *rq; 118 struct request_queue *rq;
119 struct work_struct work; 119 struct work_struct work;
@@ -438,7 +438,7 @@ static int blkif_queue_request(struct request *req)
438 req->nr_phys_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST); 438 req->nr_phys_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
439 BUG_ON(info->max_indirect_segments && 439 BUG_ON(info->max_indirect_segments &&
440 req->nr_phys_segments > info->max_indirect_segments); 440 req->nr_phys_segments > info->max_indirect_segments);
441 nseg = blk_rq_map_sg(req->q, req, info->sg); 441 nseg = blk_rq_map_sg(req->q, req, info->shadow[id].sg);
442 ring_req->u.rw.id = id; 442 ring_req->u.rw.id = id;
443 if (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST) { 443 if (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST) {
444 /* 444 /*
@@ -469,7 +469,7 @@ static int blkif_queue_request(struct request *req)
469 } 469 }
470 ring_req->u.rw.nr_segments = nseg; 470 ring_req->u.rw.nr_segments = nseg;
471 } 471 }
472 for_each_sg(info->sg, sg, nseg, i) { 472 for_each_sg(info->shadow[id].sg, sg, nseg, i) {
473 fsect = sg->offset >> 9; 473 fsect = sg->offset >> 9;
474 lsect = fsect + (sg->length >> 9) - 1; 474 lsect = fsect + (sg->length >> 9) - 1;
475 475
@@ -914,8 +914,6 @@ static void blkif_free(struct blkfront_info *info, int suspend)
914 } 914 }
915 BUG_ON(info->persistent_gnts_c != 0); 915 BUG_ON(info->persistent_gnts_c != 0);
916 916
917 kfree(info->sg);
918 info->sg = NULL;
919 for (i = 0; i < BLK_RING_SIZE; i++) { 917 for (i = 0; i < BLK_RING_SIZE; i++) {
920 /* 918 /*
921 * Clear persistent grants present in requests already 919 * Clear persistent grants present in requests already
@@ -953,6 +951,8 @@ free_shadow:
953 info->shadow[i].grants_used = NULL; 951 info->shadow[i].grants_used = NULL;
954 kfree(info->shadow[i].indirect_grants); 952 kfree(info->shadow[i].indirect_grants);
955 info->shadow[i].indirect_grants = NULL; 953 info->shadow[i].indirect_grants = NULL;
954 kfree(info->shadow[i].sg);
955 info->shadow[i].sg = NULL;
956 } 956 }
957 957
958 /* No more gnttab callback work. */ 958 /* No more gnttab callback work. */
@@ -979,12 +979,9 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
979 struct blkif_response *bret) 979 struct blkif_response *bret)
980{ 980{
981 int i = 0; 981 int i = 0;
982 struct bio_vec *bvec; 982 struct scatterlist *sg;
983 struct req_iterator iter;
984 unsigned long flags;
985 char *bvec_data; 983 char *bvec_data;
986 void *shared_data; 984 void *shared_data;
987 unsigned int offset = 0;
988 int nseg; 985 int nseg;
989 986
990 nseg = s->req.operation == BLKIF_OP_INDIRECT ? 987 nseg = s->req.operation == BLKIF_OP_INDIRECT ?
@@ -997,19 +994,16 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
997 * than PAGE_SIZE, we have to keep track of the current offset, 994 * than PAGE_SIZE, we have to keep track of the current offset,
998 * to be sure we are copying the data from the right shared page. 995 * to be sure we are copying the data from the right shared page.
999 */ 996 */
1000 rq_for_each_segment(bvec, s->request, iter) { 997 for_each_sg(s->sg, sg, nseg, i) {
1001 BUG_ON((bvec->bv_offset + bvec->bv_len) > PAGE_SIZE); 998 BUG_ON(sg->offset + sg->length > PAGE_SIZE);
1002 if (bvec->bv_offset < offset)
1003 i++;
1004 BUG_ON(i >= nseg);
1005 shared_data = kmap_atomic( 999 shared_data = kmap_atomic(
1006 pfn_to_page(s->grants_used[i]->pfn)); 1000 pfn_to_page(s->grants_used[i]->pfn));
1007 bvec_data = bvec_kmap_irq(bvec, &flags); 1001 bvec_data = kmap_atomic(sg_page(sg));
1008 memcpy(bvec_data, shared_data + bvec->bv_offset, 1002 memcpy(bvec_data + sg->offset,
1009 bvec->bv_len); 1003 shared_data + sg->offset,
1010 bvec_kunmap_irq(bvec_data, &flags); 1004 sg->length);
1005 kunmap_atomic(bvec_data);
1011 kunmap_atomic(shared_data); 1006 kunmap_atomic(shared_data);
1012 offset = bvec->bv_offset + bvec->bv_len;
1013 } 1007 }
1014 } 1008 }
1015 /* Add the persistent grant into the list of free grants */ 1009 /* Add the persistent grant into the list of free grants */
@@ -1656,10 +1650,6 @@ static int blkfront_setup_indirect(struct blkfront_info *info)
1656 xen_blkif_max_segments); 1650 xen_blkif_max_segments);
1657 segs = info->max_indirect_segments; 1651 segs = info->max_indirect_segments;
1658 } 1652 }
1659 info->sg = kzalloc(sizeof(info->sg[0]) * segs, GFP_KERNEL);
1660 if (info->sg == NULL)
1661 goto out_of_memory;
1662 sg_init_table(info->sg, segs);
1663 1653
1664 err = fill_grant_buffer(info, (segs + INDIRECT_GREFS(segs)) * BLK_RING_SIZE); 1654 err = fill_grant_buffer(info, (segs + INDIRECT_GREFS(segs)) * BLK_RING_SIZE);
1665 if (err) 1655 if (err)
@@ -1669,26 +1659,29 @@ static int blkfront_setup_indirect(struct blkfront_info *info)
1669 info->shadow[i].grants_used = kzalloc( 1659 info->shadow[i].grants_used = kzalloc(
1670 sizeof(info->shadow[i].grants_used[0]) * segs, 1660 sizeof(info->shadow[i].grants_used[0]) * segs,
1671 GFP_NOIO); 1661 GFP_NOIO);
1662 info->shadow[i].sg = kzalloc(sizeof(info->shadow[i].sg[0]) * segs, GFP_NOIO);
1672 if (info->max_indirect_segments) 1663 if (info->max_indirect_segments)
1673 info->shadow[i].indirect_grants = kzalloc( 1664 info->shadow[i].indirect_grants = kzalloc(
1674 sizeof(info->shadow[i].indirect_grants[0]) * 1665 sizeof(info->shadow[i].indirect_grants[0]) *
1675 INDIRECT_GREFS(segs), 1666 INDIRECT_GREFS(segs),
1676 GFP_NOIO); 1667 GFP_NOIO);
1677 if ((info->shadow[i].grants_used == NULL) || 1668 if ((info->shadow[i].grants_used == NULL) ||
1669 (info->shadow[i].sg == NULL) ||
1678 (info->max_indirect_segments && 1670 (info->max_indirect_segments &&
1679 (info->shadow[i].indirect_grants == NULL))) 1671 (info->shadow[i].indirect_grants == NULL)))
1680 goto out_of_memory; 1672 goto out_of_memory;
1673 sg_init_table(info->shadow[i].sg, segs);
1681 } 1674 }
1682 1675
1683 1676
1684 return 0; 1677 return 0;
1685 1678
1686out_of_memory: 1679out_of_memory:
1687 kfree(info->sg);
1688 info->sg = NULL;
1689 for (i = 0; i < BLK_RING_SIZE; i++) { 1680 for (i = 0; i < BLK_RING_SIZE; i++) {
1690 kfree(info->shadow[i].grants_used); 1681 kfree(info->shadow[i].grants_used);
1691 info->shadow[i].grants_used = NULL; 1682 info->shadow[i].grants_used = NULL;
1683 kfree(info->shadow[i].sg);
1684 info->shadow[i].sg = NULL;
1692 kfree(info->shadow[i].indirect_grants); 1685 kfree(info->shadow[i].indirect_grants);
1693 info->shadow[i].indirect_grants = NULL; 1686 info->shadow[i].indirect_grants = NULL;
1694 } 1687 }