aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block
diff options
context:
space:
mode:
authorRoger Pau Monne <roger.pau@citrix.com>2013-05-02 04:58:50 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-08-11 21:35:24 -0400
commit7b2b160da7661bb2ade3f924b1bd3e3084e53341 (patch)
treec4a6be4110a7572be202ac5a3bc35765c240bfae /drivers/block
parentd56b0022c1cc6c63ab85f3bebcd8bf3dfe4be5ca (diff)
xen-blkfront: use a different scatterlist for each request
commit b7649158a0d241f8d53d13ff7441858539e16656 upstream. In blkif_queue_request blkfront iterates over the scatterlist in order to set the segments of the request, and in blkif_completion blkfront iterates over the raw request, which makes it hard to know the exact position of the source and destination memory positions. This can be solved by allocating a scatterlist for each request, that will be keep until the request is finished, allowing us to copy the data back to the original memory without having to iterate over the raw request. Oracle-Bug: 16660413 - LARGE ASYNCHRONOUS READS APPEAR BROKEN ON 2.6.39-400 CC: stable@vger.kernel.org Signed-off-by: Roger Pau Monné <roger.pau@citrix.com> Reported-and-Tested-by: Anne Milicia <anne.milicia@oracle.com> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/xen-blkfront.c36
1 files changed, 17 insertions, 19 deletions
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index d89ef86220f4..69b45fc97276 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -75,6 +75,7 @@ struct blk_shadow {
75 struct blkif_request req; 75 struct blkif_request req;
76 struct request *request; 76 struct request *request;
77 struct grant *grants_used[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 77 struct grant *grants_used[BLKIF_MAX_SEGMENTS_PER_REQUEST];
78 struct scatterlist sg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
78}; 79};
79 80
80static DEFINE_MUTEX(blkfront_mutex); 81static DEFINE_MUTEX(blkfront_mutex);
@@ -98,7 +99,6 @@ struct blkfront_info
98 enum blkif_state connected; 99 enum blkif_state connected;
99 int ring_ref; 100 int ring_ref;
100 struct blkif_front_ring ring; 101 struct blkif_front_ring ring;
101 struct scatterlist sg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
102 unsigned int evtchn, irq; 102 unsigned int evtchn, irq;
103 struct request_queue *rq; 103 struct request_queue *rq;
104 struct work_struct work; 104 struct work_struct work;
@@ -422,11 +422,11 @@ static int blkif_queue_request(struct request *req)
422 ring_req->u.discard.flag = 0; 422 ring_req->u.discard.flag = 0;
423 } else { 423 } else {
424 ring_req->u.rw.nr_segments = blk_rq_map_sg(req->q, req, 424 ring_req->u.rw.nr_segments = blk_rq_map_sg(req->q, req,
425 info->sg); 425 info->shadow[id].sg);
426 BUG_ON(ring_req->u.rw.nr_segments > 426 BUG_ON(ring_req->u.rw.nr_segments >
427 BLKIF_MAX_SEGMENTS_PER_REQUEST); 427 BLKIF_MAX_SEGMENTS_PER_REQUEST);
428 428
429 for_each_sg(info->sg, sg, ring_req->u.rw.nr_segments, i) { 429 for_each_sg(info->shadow[id].sg, sg, ring_req->u.rw.nr_segments, i) {
430 fsect = sg->offset >> 9; 430 fsect = sg->offset >> 9;
431 lsect = fsect + (sg->length >> 9) - 1; 431 lsect = fsect + (sg->length >> 9) - 1;
432 432
@@ -867,12 +867,12 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
867 struct blkif_response *bret) 867 struct blkif_response *bret)
868{ 868{
869 int i = 0; 869 int i = 0;
870 struct bio_vec *bvec; 870 struct scatterlist *sg;
871 struct req_iterator iter;
872 unsigned long flags;
873 char *bvec_data; 871 char *bvec_data;
874 void *shared_data; 872 void *shared_data;
875 unsigned int offset = 0; 873 int nseg;
874
875 nseg = s->req.u.rw.nr_segments;
876 876
877 if (bret->operation == BLKIF_OP_READ) { 877 if (bret->operation == BLKIF_OP_READ) {
878 /* 878 /*
@@ -881,19 +881,16 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
881 * than PAGE_SIZE, we have to keep track of the current offset, 881 * than PAGE_SIZE, we have to keep track of the current offset,
882 * to be sure we are copying the data from the right shared page. 882 * to be sure we are copying the data from the right shared page.
883 */ 883 */
884 rq_for_each_segment(bvec, s->request, iter) { 884 for_each_sg(s->sg, sg, nseg, i) {
885 BUG_ON((bvec->bv_offset + bvec->bv_len) > PAGE_SIZE); 885 BUG_ON(sg->offset + sg->length > PAGE_SIZE);
886 if (bvec->bv_offset < offset)
887 i++;
888 BUG_ON(i >= s->req.u.rw.nr_segments);
889 shared_data = kmap_atomic( 886 shared_data = kmap_atomic(
890 pfn_to_page(s->grants_used[i]->pfn)); 887 pfn_to_page(s->grants_used[i]->pfn));
891 bvec_data = bvec_kmap_irq(bvec, &flags); 888 bvec_data = kmap_atomic(sg_page(sg));
892 memcpy(bvec_data, shared_data + bvec->bv_offset, 889 memcpy(bvec_data + sg->offset,
893 bvec->bv_len); 890 shared_data + sg->offset,
894 bvec_kunmap_irq(bvec_data, &flags); 891 sg->length);
892 kunmap_atomic(bvec_data);
895 kunmap_atomic(shared_data); 893 kunmap_atomic(shared_data);
896 offset = bvec->bv_offset + bvec->bv_len;
897 } 894 }
898 } 895 }
899 /* Add the persistent grant into the list of free grants */ 896 /* Add the persistent grant into the list of free grants */
@@ -1022,7 +1019,7 @@ static int setup_blkring(struct xenbus_device *dev,
1022 struct blkfront_info *info) 1019 struct blkfront_info *info)
1023{ 1020{
1024 struct blkif_sring *sring; 1021 struct blkif_sring *sring;
1025 int err; 1022 int err, i;
1026 1023
1027 info->ring_ref = GRANT_INVALID_REF; 1024 info->ring_ref = GRANT_INVALID_REF;
1028 1025
@@ -1034,7 +1031,8 @@ static int setup_blkring(struct xenbus_device *dev,
1034 SHARED_RING_INIT(sring); 1031 SHARED_RING_INIT(sring);
1035 FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE); 1032 FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
1036 1033
1037 sg_init_table(info->sg, BLKIF_MAX_SEGMENTS_PER_REQUEST); 1034 for (i = 0; i < BLK_RING_SIZE; i++)
1035 sg_init_table(info->shadow[i].sg, BLKIF_MAX_SEGMENTS_PER_REQUEST);
1038 1036
1039 /* Allocate memory for grants */ 1037 /* Allocate memory for grants */
1040 err = fill_grant_buffer(info, BLK_RING_SIZE * 1038 err = fill_grant_buffer(info, BLK_RING_SIZE *