diff options
author | Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | 2011-04-15 11:35:13 -0400 |
---|---|---|
committer | Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | 2011-04-15 11:35:13 -0400 |
commit | 1a95fe6e42cefc52c62c471ad87d7fe8643231df (patch) | |
tree | c291f0877992d08f8129e56a8e58dfbfbb00c072 /drivers/xen/blkback/blkback.c | |
parent | b0aef17924a06646403cae8eecf6c73219a63c19 (diff) |
xen/blkback: Shuffle code around (vbd_translate moved higher).
We take out the chunk of code dealing with mapping to the guest
of pages into the xen_blk_map_buf code. And we also move the
vbd_translate to be done much earlier.
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'drivers/xen/blkback/blkback.c')
-rw-r--r-- | drivers/xen/blkback/blkback.c | 129 |
1 files changed, 70 insertions, 59 deletions
diff --git a/drivers/xen/blkback/blkback.c b/drivers/xen/blkback/blkback.c index f282463d7b5c..211b2005f963 100644 --- a/drivers/xen/blkback/blkback.c +++ b/drivers/xen/blkback/blkback.c | |||
@@ -241,6 +241,10 @@ int blkif_schedule(void *arg) | |||
241 | return 0; | 241 | return 0; |
242 | } | 242 | } |
243 | 243 | ||
244 | struct seg_buf { | ||
245 | unsigned long buf; | ||
246 | unsigned int nsec; | ||
247 | }; | ||
244 | /* | 248 | /* |
245 | * Unmap the grant references, and also remove the M2P over-rides | 249 | * Unmap the grant references, and also remove the M2P over-rides |
246 | * used in the 'pending_req'. | 250 | * used in the 'pending_req'. |
@@ -278,6 +282,62 @@ static void fast_flush_area(struct pending_req *req) | |||
278 | } | 282 | } |
279 | } | 283 | } |
280 | } | 284 | } |
285 | static int xen_blk_map_buf(struct blkif_request *req, struct pending_req *pending_req, | ||
286 | struct seg_buf seg[]) | ||
287 | { | ||
288 | struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | ||
289 | int i; | ||
290 | int nseg = req->nr_segments; | ||
291 | int ret = 0; | ||
292 | /* Fill out preq.nr_sects with proper amount of sectors, and setup | ||
293 | * assign map[..] with the PFN of the page in our domain with the | ||
294 | * corresponding grant reference for each page. | ||
295 | */ | ||
296 | for (i = 0; i < nseg; i++) { | ||
297 | uint32_t flags; | ||
298 | |||
299 | flags = GNTMAP_host_map; | ||
300 | if (pending_req->operation != BLKIF_OP_READ) | ||
301 | flags |= GNTMAP_readonly; | ||
302 | gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags, | ||
303 | req->u.rw.seg[i].gref, pending_req->blkif->domid); | ||
304 | } | ||
305 | |||
306 | ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nseg); | ||
307 | BUG_ON(ret); | ||
308 | |||
309 | /* Now swizzel the MFN in our domain with the MFN from the other domain | ||
310 | * so that when we access vaddr(pending_req,i) it has the contents of | ||
311 | * the page from the other domain. | ||
312 | */ | ||
313 | for (i = 0; i < nseg; i++) { | ||
314 | if (unlikely(map[i].status != 0)) { | ||
315 | DPRINTK("invalid buffer -- could not remap it\n"); | ||
316 | map[i].handle = BLKBACK_INVALID_HANDLE; | ||
317 | ret |= 1; | ||
318 | } | ||
319 | |||
320 | pending_handle(pending_req, i) = map[i].handle; | ||
321 | |||
322 | if (ret) | ||
323 | continue; | ||
324 | |||
325 | ret = m2p_add_override(PFN_DOWN(map[i].dev_bus_addr), | ||
326 | blkbk->pending_page(pending_req, i), false); | ||
327 | if (ret) { | ||
328 | printk(KERN_ALERT "Failed to install M2P override for"\ | ||
329 | " %lx (ret: %d)\n", (unsigned long) | ||
330 | map[i].dev_bus_addr, ret); | ||
331 | /* We could switch over to GNTTABOP_copy */ | ||
332 | continue; | ||
333 | } | ||
334 | |||
335 | seg[i].buf = map[i].dev_bus_addr | | ||
336 | (req->u.rw.seg[i].first_sect << 9); | ||
337 | } | ||
338 | return ret; | ||
339 | } | ||
340 | |||
281 | /* | 341 | /* |
282 | * Completion callback on the bio's. Called as bh->b_end_io() | 342 | * Completion callback on the bio's. Called as bh->b_end_io() |
283 | */ | 343 | */ |
@@ -411,15 +471,12 @@ static void dispatch_rw_block_io(struct blkif_st *blkif, | |||
411 | struct blkif_request *req, | 471 | struct blkif_request *req, |
412 | struct pending_req *pending_req) | 472 | struct pending_req *pending_req) |
413 | { | 473 | { |
414 | struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | ||
415 | struct phys_req preq; | 474 | struct phys_req preq; |
416 | struct { | 475 | struct seg_buf seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; |
417 | unsigned long buf; unsigned int nsec; | ||
418 | } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | ||
419 | unsigned int nseg; | 476 | unsigned int nseg; |
420 | struct bio *bio = NULL; | 477 | struct bio *bio = NULL; |
421 | struct bio *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | 478 | struct bio *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST]; |
422 | int ret, i, nbio = 0; | 479 | int i, nbio = 0; |
423 | int operation; | 480 | int operation; |
424 | struct blk_plug plug; | 481 | struct blk_plug plug; |
425 | struct request_queue *q; | 482 | struct request_queue *q; |
@@ -444,6 +501,7 @@ static void dispatch_rw_block_io(struct blkif_st *blkif, | |||
444 | if (unlikely(nseg == 0 && operation != WRITE_BARRIER) || | 501 | if (unlikely(nseg == 0 && operation != WRITE_BARRIER) || |
445 | unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) { | 502 | unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) { |
446 | DPRINTK("Bad number of segments in request (%d)\n", nseg); | 503 | DPRINTK("Bad number of segments in request (%d)\n", nseg); |
504 | /* Haven't submitted any bio's yet. */ | ||
447 | goto fail_response; | 505 | goto fail_response; |
448 | } | 506 | } |
449 | 507 | ||
@@ -456,77 +514,30 @@ static void dispatch_rw_block_io(struct blkif_st *blkif, | |||
456 | pending_req->operation = req->operation; | 514 | pending_req->operation = req->operation; |
457 | pending_req->status = BLKIF_RSP_OKAY; | 515 | pending_req->status = BLKIF_RSP_OKAY; |
458 | pending_req->nr_pages = nseg; | 516 | pending_req->nr_pages = nseg; |
459 | |||
460 | /* Fill out preq.nr_sects with proper amount of sectors, and setup | ||
461 | * assign map[..] with the PFN of the page in our domain with the | ||
462 | * corresponding grant reference for each page. | ||
463 | */ | ||
464 | for (i = 0; i < nseg; i++) { | 517 | for (i = 0; i < nseg; i++) { |
465 | uint32_t flags; | ||
466 | |||
467 | seg[i].nsec = req->u.rw.seg[i].last_sect - | 518 | seg[i].nsec = req->u.rw.seg[i].last_sect - |
468 | req->u.rw.seg[i].first_sect + 1; | 519 | req->u.rw.seg[i].first_sect + 1; |
469 | if ((req->u.rw.seg[i].last_sect >= (PAGE_SIZE >> 9)) || | 520 | if ((req->u.rw.seg[i].last_sect >= (PAGE_SIZE >> 9)) || |
470 | (req->u.rw.seg[i].last_sect < req->u.rw.seg[i].first_sect)) | 521 | (req->u.rw.seg[i].last_sect < req->u.rw.seg[i].first_sect)) |
471 | goto fail_response; | 522 | goto fail_response; |
472 | preq.nr_sects += seg[i].nsec; | 523 | preq.nr_sects += seg[i].nsec; |
473 | |||
474 | flags = GNTMAP_host_map; | ||
475 | if (operation != READ) | ||
476 | flags |= GNTMAP_readonly; | ||
477 | gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags, | ||
478 | req->u.rw.seg[i].gref, blkif->domid); | ||
479 | } | 524 | } |
480 | 525 | ||
481 | ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nseg); | 526 | if (vbd_translate(&preq, blkif, operation) != 0) { |
482 | BUG_ON(ret); | 527 | DPRINTK("access denied: %s of [%llu,%llu] on dev=%04x\n", |
483 | 528 | operation == READ ? "read" : "write", | |
484 | /* Now swizzel the MFN in our domain with the MFN from the other domain | 529 | preq.sector_number, |
485 | * so that when we access vaddr(pending_req,i) it has the contents of | 530 | preq.sector_number + preq.nr_sects, preq.dev); |
486 | * the page from the other domain. | 531 | goto fail_response; |
487 | */ | ||
488 | for (i = 0; i < nseg; i++) { | ||
489 | if (unlikely(map[i].status != 0)) { | ||
490 | DPRINTK("invalid buffer -- could not remap it\n"); | ||
491 | map[i].handle = BLKBACK_INVALID_HANDLE; | ||
492 | ret |= 1; | ||
493 | } | ||
494 | |||
495 | pending_handle(pending_req, i) = map[i].handle; | ||
496 | |||
497 | if (ret) | ||
498 | continue; | ||
499 | |||
500 | ret = m2p_add_override(PFN_DOWN(map[i].dev_bus_addr), | ||
501 | blkbk->pending_page(pending_req, i), false); | ||
502 | if (ret) { | ||
503 | printk(KERN_ALERT "Failed to install M2P override for"\ | ||
504 | " %lx (ret: %d)\n", (unsigned long) | ||
505 | map[i].dev_bus_addr, ret); | ||
506 | /* We could switch over to GNTTABOP_copy */ | ||
507 | continue; | ||
508 | } | ||
509 | |||
510 | seg[i].buf = map[i].dev_bus_addr | | ||
511 | (req->u.rw.seg[i].first_sect << 9); | ||
512 | } | 532 | } |
513 | |||
514 | /* If we have failed at this point, we need to undo the M2P override, | 533 | /* If we have failed at this point, we need to undo the M2P override, |
515 | * set gnttab_set_unmap_op on all of the grant references and perform | 534 | * set gnttab_set_unmap_op on all of the grant references and perform |
516 | * the hypercall to unmap the grants - that is all done in | 535 | * the hypercall to unmap the grants - that is all done in |
517 | * fast_flush_area. | 536 | * fast_flush_area. |
518 | */ | 537 | */ |
519 | if (ret) | 538 | if (xen_blk_map_buf(req, pending_req, seg)) |
520 | goto fail_flush; | 539 | goto fail_flush; |
521 | 540 | ||
522 | if (vbd_translate(&preq, blkif, operation) != 0) { | ||
523 | DPRINTK("access denied: %s of [%llu,%llu] on dev=%04x\n", | ||
524 | operation == READ ? "read" : "write", | ||
525 | preq.sector_number, | ||
526 | preq.sector_number + preq.nr_sects, preq.dev); | ||
527 | goto fail_flush; | ||
528 | } | ||
529 | |||
530 | /* This corresponding blkif_put is done in __end_block_io_op */ | 541 | /* This corresponding blkif_put is done in __end_block_io_op */ |
531 | blkif_get(blkif); | 542 | blkif_get(blkif); |
532 | 543 | ||