aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/xen-blkfront.c
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2009-02-24 02:10:09 -0500
committerJens Axboe <jens.axboe@oracle.com>2009-02-26 04:45:48 -0500
commit9e973e64ac6dc504e6447d52193d4fff1a670156 (patch)
tree518cb0f34f9229f7faa81754733ce3926b84625c /drivers/block/xen-blkfront.c
parent1e42807918d17e8c93bf14fbb74be84b141334c1 (diff)
xen/blkfront: use blk_rq_map_sg to generate ring entries
On occasion, the request will apparently have more segments than we fit into the ring. Jens says: > The second problem is that the block layer then appears to create one > too many segments, but from the dump it has rq->nr_phys_segments == > BLKIF_MAX_SEGMENTS_PER_REQUEST. I suspect the latter is due to > xen-blkfront not handling the merging on its own. It should check that > the new page doesn't form part of the previous page. The > rq_for_each_segment() iterates all single bits in the request, not dma > segments. The "easiest" way to do this is to call blk_rq_map_sg() and > then iterate the mapped sg list. That will give you what you are > looking for. > Here's a test patch, compiles but otherwise untested. I spent more > time figuring out how to enable XEN than to code it up, so YMMV! > Probably the sg list wants to be put inside the ring and only > initialized on allocation, then you can get rid of the sg on stack and > sg_init_table() loop call in the function. I'll leave that, and the > testing, to you. [Moved sg array into info structure, and initialize once. -J] Signed-off-by: Jens Axboe <jens.axboe@oracle.com> Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Diffstat (limited to 'drivers/block/xen-blkfront.c')
-rw-r--r--drivers/block/xen-blkfront.c30
1 files changed, 15 insertions, 15 deletions
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 918ef725de41..b6c8ce254359 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -40,6 +40,7 @@
40#include <linux/hdreg.h> 40#include <linux/hdreg.h>
41#include <linux/cdrom.h> 41#include <linux/cdrom.h>
42#include <linux/module.h> 42#include <linux/module.h>
43#include <linux/scatterlist.h>
43 44
44#include <xen/xenbus.h> 45#include <xen/xenbus.h>
45#include <xen/grant_table.h> 46#include <xen/grant_table.h>
@@ -82,6 +83,7 @@ struct blkfront_info
82 enum blkif_state connected; 83 enum blkif_state connected;
83 int ring_ref; 84 int ring_ref;
84 struct blkif_front_ring ring; 85 struct blkif_front_ring ring;
86 struct scatterlist sg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
85 unsigned int evtchn, irq; 87 unsigned int evtchn, irq;
86 struct request_queue *rq; 88 struct request_queue *rq;
87 struct work_struct work; 89 struct work_struct work;
@@ -204,12 +206,11 @@ static int blkif_queue_request(struct request *req)
204 struct blkfront_info *info = req->rq_disk->private_data; 206 struct blkfront_info *info = req->rq_disk->private_data;
205 unsigned long buffer_mfn; 207 unsigned long buffer_mfn;
206 struct blkif_request *ring_req; 208 struct blkif_request *ring_req;
207 struct req_iterator iter;
208 struct bio_vec *bvec;
209 unsigned long id; 209 unsigned long id;
210 unsigned int fsect, lsect; 210 unsigned int fsect, lsect;
211 int ref; 211 int i, ref;
212 grant_ref_t gref_head; 212 grant_ref_t gref_head;
213 struct scatterlist *sg;
213 214
214 if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) 215 if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
215 return 1; 216 return 1;
@@ -238,12 +239,13 @@ static int blkif_queue_request(struct request *req)
238 if (blk_barrier_rq(req)) 239 if (blk_barrier_rq(req))
239 ring_req->operation = BLKIF_OP_WRITE_BARRIER; 240 ring_req->operation = BLKIF_OP_WRITE_BARRIER;
240 241
241 ring_req->nr_segments = 0; 242 ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg);
242 rq_for_each_segment(bvec, req, iter) { 243 BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
243 BUG_ON(ring_req->nr_segments == BLKIF_MAX_SEGMENTS_PER_REQUEST); 244
244 buffer_mfn = pfn_to_mfn(page_to_pfn(bvec->bv_page)); 245 for_each_sg(info->sg, sg, ring_req->nr_segments, i) {
245 fsect = bvec->bv_offset >> 9; 246 buffer_mfn = pfn_to_mfn(page_to_pfn(sg_page(sg)));
246 lsect = fsect + (bvec->bv_len >> 9) - 1; 247 fsect = sg->offset >> 9;
248 lsect = fsect + (sg->length >> 9) - 1;
247 /* install a grant reference. */ 249 /* install a grant reference. */
248 ref = gnttab_claim_grant_reference(&gref_head); 250 ref = gnttab_claim_grant_reference(&gref_head);
249 BUG_ON(ref == -ENOSPC); 251 BUG_ON(ref == -ENOSPC);
@@ -254,16 +256,12 @@ static int blkif_queue_request(struct request *req)
254 buffer_mfn, 256 buffer_mfn,
255 rq_data_dir(req) ); 257 rq_data_dir(req) );
256 258
257 info->shadow[id].frame[ring_req->nr_segments] = 259 info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn);
258 mfn_to_pfn(buffer_mfn); 260 ring_req->seg[i] =
259
260 ring_req->seg[ring_req->nr_segments] =
261 (struct blkif_request_segment) { 261 (struct blkif_request_segment) {
262 .gref = ref, 262 .gref = ref,
263 .first_sect = fsect, 263 .first_sect = fsect,
264 .last_sect = lsect }; 264 .last_sect = lsect };
265
266 ring_req->nr_segments++;
267 } 265 }
268 266
269 info->ring.req_prod_pvt++; 267 info->ring.req_prod_pvt++;
@@ -622,6 +620,8 @@ static int setup_blkring(struct xenbus_device *dev,
622 SHARED_RING_INIT(sring); 620 SHARED_RING_INIT(sring);
623 FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE); 621 FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
624 622
623 sg_init_table(info->sg, BLKIF_MAX_SEGMENTS_PER_REQUEST);
624
625 err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring)); 625 err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring));
626 if (err < 0) { 626 if (err < 0) {
627 free_page((unsigned long)sring); 627 free_page((unsigned long)sring);