diff options
Diffstat (limited to 'drivers/block/xen-blkback/blkback.c')
| -rw-r--r-- | drivers/block/xen-blkback/blkback.c | 130 |
1 files changed, 109 insertions, 21 deletions
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index 1540792b1e54..15ec4db194d1 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c | |||
| @@ -39,6 +39,9 @@ | |||
| 39 | #include <linux/list.h> | 39 | #include <linux/list.h> |
| 40 | #include <linux/delay.h> | 40 | #include <linux/delay.h> |
| 41 | #include <linux/freezer.h> | 41 | #include <linux/freezer.h> |
| 42 | #include <linux/loop.h> | ||
| 43 | #include <linux/falloc.h> | ||
| 44 | #include <linux/fs.h> | ||
| 42 | 45 | ||
| 43 | #include <xen/events.h> | 46 | #include <xen/events.h> |
| 44 | #include <xen/page.h> | 47 | #include <xen/page.h> |
| @@ -258,13 +261,16 @@ irqreturn_t xen_blkif_be_int(int irq, void *dev_id) | |||
| 258 | 261 | ||
| 259 | static void print_stats(struct xen_blkif *blkif) | 262 | static void print_stats(struct xen_blkif *blkif) |
| 260 | { | 263 | { |
| 261 | pr_info("xen-blkback (%s): oo %3d | rd %4d | wr %4d | f %4d\n", | 264 | pr_info("xen-blkback (%s): oo %3d | rd %4d | wr %4d | f %4d" |
| 265 | " | ds %4d\n", | ||
| 262 | current->comm, blkif->st_oo_req, | 266 | current->comm, blkif->st_oo_req, |
| 263 | blkif->st_rd_req, blkif->st_wr_req, blkif->st_f_req); | 267 | blkif->st_rd_req, blkif->st_wr_req, |
| 268 | blkif->st_f_req, blkif->st_ds_req); | ||
| 264 | blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000); | 269 | blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000); |
| 265 | blkif->st_rd_req = 0; | 270 | blkif->st_rd_req = 0; |
| 266 | blkif->st_wr_req = 0; | 271 | blkif->st_wr_req = 0; |
| 267 | blkif->st_oo_req = 0; | 272 | blkif->st_oo_req = 0; |
| 273 | blkif->st_ds_req = 0; | ||
| 268 | } | 274 | } |
| 269 | 275 | ||
| 270 | int xen_blkif_schedule(void *arg) | 276 | int xen_blkif_schedule(void *arg) |
| @@ -410,6 +416,59 @@ static int xen_blkbk_map(struct blkif_request *req, | |||
| 410 | return ret; | 416 | return ret; |
| 411 | } | 417 | } |
| 412 | 418 | ||
| 419 | static void xen_blk_discard(struct xen_blkif *blkif, struct blkif_request *req) | ||
| 420 | { | ||
| 421 | int err = 0; | ||
| 422 | int status = BLKIF_RSP_OKAY; | ||
| 423 | struct block_device *bdev = blkif->vbd.bdev; | ||
| 424 | |||
| 425 | if (blkif->blk_backend_type == BLKIF_BACKEND_PHY) | ||
| 426 | /* just forward the discard request */ | ||
| 427 | err = blkdev_issue_discard(bdev, | ||
| 428 | req->u.discard.sector_number, | ||
| 429 | req->u.discard.nr_sectors, | ||
| 430 | GFP_KERNEL, 0); | ||
| 431 | else if (blkif->blk_backend_type == BLKIF_BACKEND_FILE) { | ||
| 432 | /* punch a hole in the backing file */ | ||
| 433 | struct loop_device *lo = bdev->bd_disk->private_data; | ||
| 434 | struct file *file = lo->lo_backing_file; | ||
| 435 | |||
| 436 | if (file->f_op->fallocate) | ||
| 437 | err = file->f_op->fallocate(file, | ||
| 438 | FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE, | ||
| 439 | req->u.discard.sector_number << 9, | ||
| 440 | req->u.discard.nr_sectors << 9); | ||
| 441 | else | ||
| 442 | err = -EOPNOTSUPP; | ||
| 443 | } else | ||
| 444 | err = -EOPNOTSUPP; | ||
| 445 | |||
| 446 | if (err == -EOPNOTSUPP) { | ||
| 447 | pr_debug(DRV_PFX "discard op failed, not supported\n"); | ||
| 448 | status = BLKIF_RSP_EOPNOTSUPP; | ||
| 449 | } else if (err) | ||
| 450 | status = BLKIF_RSP_ERROR; | ||
| 451 | |||
| 452 | make_response(blkif, req->id, req->operation, status); | ||
| 453 | } | ||
| 454 | |||
| 455 | static void xen_blk_drain_io(struct xen_blkif *blkif) | ||
| 456 | { | ||
| 457 | atomic_set(&blkif->drain, 1); | ||
| 458 | do { | ||
| 459 | /* The initial value is one, and one refcnt taken at the | ||
| 460 | * start of the xen_blkif_schedule thread. */ | ||
| 461 | if (atomic_read(&blkif->refcnt) <= 2) | ||
| 462 | break; | ||
| 463 | wait_for_completion_interruptible_timeout( | ||
| 464 | &blkif->drain_complete, HZ); | ||
| 465 | |||
| 466 | if (!atomic_read(&blkif->drain)) | ||
| 467 | break; | ||
| 468 | } while (!kthread_should_stop()); | ||
| 469 | atomic_set(&blkif->drain, 0); | ||
| 470 | } | ||
| 471 | |||
| 413 | /* | 472 | /* |
| 414 | * Completion callback on the bio's. Called as bh->b_end_io() | 473 | * Completion callback on the bio's. Called as bh->b_end_io() |
| 415 | */ | 474 | */ |
| @@ -422,6 +481,11 @@ static void __end_block_io_op(struct pending_req *pending_req, int error) | |||
| 422 | pr_debug(DRV_PFX "flush diskcache op failed, not supported\n"); | 481 | pr_debug(DRV_PFX "flush diskcache op failed, not supported\n"); |
| 423 | xen_blkbk_flush_diskcache(XBT_NIL, pending_req->blkif->be, 0); | 482 | xen_blkbk_flush_diskcache(XBT_NIL, pending_req->blkif->be, 0); |
| 424 | pending_req->status = BLKIF_RSP_EOPNOTSUPP; | 483 | pending_req->status = BLKIF_RSP_EOPNOTSUPP; |
| 484 | } else if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) && | ||
| 485 | (error == -EOPNOTSUPP)) { | ||
| 486 | pr_debug(DRV_PFX "write barrier op failed, not supported\n"); | ||
| 487 | xen_blkbk_barrier(XBT_NIL, pending_req->blkif->be, 0); | ||
| 488 | pending_req->status = BLKIF_RSP_EOPNOTSUPP; | ||
| 425 | } else if (error) { | 489 | } else if (error) { |
| 426 | pr_debug(DRV_PFX "Buffer not up-to-date at end of operation," | 490 | pr_debug(DRV_PFX "Buffer not up-to-date at end of operation," |
| 427 | " error=%d\n", error); | 491 | " error=%d\n", error); |
| @@ -438,6 +502,10 @@ static void __end_block_io_op(struct pending_req *pending_req, int error) | |||
| 438 | make_response(pending_req->blkif, pending_req->id, | 502 | make_response(pending_req->blkif, pending_req->id, |
| 439 | pending_req->operation, pending_req->status); | 503 | pending_req->operation, pending_req->status); |
| 440 | xen_blkif_put(pending_req->blkif); | 504 | xen_blkif_put(pending_req->blkif); |
| 505 | if (atomic_read(&pending_req->blkif->refcnt) <= 2) { | ||
| 506 | if (atomic_read(&pending_req->blkif->drain)) | ||
| 507 | complete(&pending_req->blkif->drain_complete); | ||
| 508 | } | ||
| 441 | free_req(pending_req); | 509 | free_req(pending_req); |
| 442 | } | 510 | } |
| 443 | } | 511 | } |
| @@ -532,7 +600,6 @@ do_block_io_op(struct xen_blkif *blkif) | |||
| 532 | 600 | ||
| 533 | return more_to_do; | 601 | return more_to_do; |
| 534 | } | 602 | } |
| 535 | |||
| 536 | /* | 603 | /* |
| 537 | * Transmutation of the 'struct blkif_request' to a proper 'struct bio' | 604 | * Transmutation of the 'struct blkif_request' to a proper 'struct bio' |
| 538 | * and call the 'submit_bio' to pass it to the underlying storage. | 605 | * and call the 'submit_bio' to pass it to the underlying storage. |
| @@ -549,6 +616,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, | |||
| 549 | int i, nbio = 0; | 616 | int i, nbio = 0; |
| 550 | int operation; | 617 | int operation; |
| 551 | struct blk_plug plug; | 618 | struct blk_plug plug; |
| 619 | bool drain = false; | ||
| 552 | 620 | ||
| 553 | switch (req->operation) { | 621 | switch (req->operation) { |
| 554 | case BLKIF_OP_READ: | 622 | case BLKIF_OP_READ: |
| @@ -559,11 +627,16 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, | |||
| 559 | blkif->st_wr_req++; | 627 | blkif->st_wr_req++; |
| 560 | operation = WRITE_ODIRECT; | 628 | operation = WRITE_ODIRECT; |
| 561 | break; | 629 | break; |
| 630 | case BLKIF_OP_WRITE_BARRIER: | ||
| 631 | drain = true; | ||
| 562 | case BLKIF_OP_FLUSH_DISKCACHE: | 632 | case BLKIF_OP_FLUSH_DISKCACHE: |
| 563 | blkif->st_f_req++; | 633 | blkif->st_f_req++; |
| 564 | operation = WRITE_FLUSH; | 634 | operation = WRITE_FLUSH; |
| 565 | break; | 635 | break; |
| 566 | case BLKIF_OP_WRITE_BARRIER: | 636 | case BLKIF_OP_DISCARD: |
| 637 | blkif->st_ds_req++; | ||
| 638 | operation = REQ_DISCARD; | ||
| 639 | break; | ||
| 567 | default: | 640 | default: |
| 568 | operation = 0; /* make gcc happy */ | 641 | operation = 0; /* make gcc happy */ |
| 569 | goto fail_response; | 642 | goto fail_response; |
| @@ -572,7 +645,8 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, | |||
| 572 | 645 | ||
| 573 | /* Check that the number of segments is sane. */ | 646 | /* Check that the number of segments is sane. */ |
| 574 | nseg = req->nr_segments; | 647 | nseg = req->nr_segments; |
| 575 | if (unlikely(nseg == 0 && operation != WRITE_FLUSH) || | 648 | if (unlikely(nseg == 0 && operation != WRITE_FLUSH && |
| 649 | operation != REQ_DISCARD) || | ||
| 576 | unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) { | 650 | unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) { |
| 577 | pr_debug(DRV_PFX "Bad number of segments in request (%d)\n", | 651 | pr_debug(DRV_PFX "Bad number of segments in request (%d)\n", |
| 578 | nseg); | 652 | nseg); |
| @@ -621,16 +695,25 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, | |||
| 621 | } | 695 | } |
| 622 | } | 696 | } |
| 623 | 697 | ||
| 698 | /* Wait on all outstanding I/O's and once that has been completed | ||
| 699 | * issue the WRITE_FLUSH. | ||
| 700 | */ | ||
| 701 | if (drain) | ||
| 702 | xen_blk_drain_io(pending_req->blkif); | ||
| 703 | |||
| 624 | /* | 704 | /* |
| 625 | * If we have failed at this point, we need to undo the M2P override, | 705 | * If we have failed at this point, we need to undo the M2P override, |
| 626 | * set gnttab_set_unmap_op on all of the grant references and perform | 706 | * set gnttab_set_unmap_op on all of the grant references and perform |
| 627 | * the hypercall to unmap the grants - that is all done in | 707 | * the hypercall to unmap the grants - that is all done in |
| 628 | * xen_blkbk_unmap. | 708 | * xen_blkbk_unmap. |
| 629 | */ | 709 | */ |
| 630 | if (xen_blkbk_map(req, pending_req, seg)) | 710 | if (operation != REQ_DISCARD && xen_blkbk_map(req, pending_req, seg)) |
| 631 | goto fail_flush; | 711 | goto fail_flush; |
| 632 | 712 | ||
| 633 | /* This corresponding xen_blkif_put is done in __end_block_io_op */ | 713 | /* |
| 714 | * This corresponding xen_blkif_put is done in __end_block_io_op, or | ||
| 715 | * below (in "!bio") if we are handling a BLKIF_OP_DISCARD. | ||
| 716 | */ | ||
| 634 | xen_blkif_get(blkif); | 717 | xen_blkif_get(blkif); |
| 635 | 718 | ||
| 636 | for (i = 0; i < nseg; i++) { | 719 | for (i = 0; i < nseg; i++) { |
| @@ -654,18 +737,25 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, | |||
| 654 | preq.sector_number += seg[i].nsec; | 737 | preq.sector_number += seg[i].nsec; |
| 655 | } | 738 | } |
| 656 | 739 | ||
| 657 | /* This will be hit if the operation was a flush. */ | 740 | /* This will be hit if the operation was a flush or discard. */ |
| 658 | if (!bio) { | 741 | if (!bio) { |
| 659 | BUG_ON(operation != WRITE_FLUSH); | 742 | BUG_ON(operation != WRITE_FLUSH && operation != REQ_DISCARD); |
| 660 | 743 | ||
| 661 | bio = bio_alloc(GFP_KERNEL, 0); | 744 | if (operation == WRITE_FLUSH) { |
| 662 | if (unlikely(bio == NULL)) | 745 | bio = bio_alloc(GFP_KERNEL, 0); |
| 663 | goto fail_put_bio; | 746 | if (unlikely(bio == NULL)) |
| 747 | goto fail_put_bio; | ||
| 664 | 748 | ||
| 665 | biolist[nbio++] = bio; | 749 | biolist[nbio++] = bio; |
| 666 | bio->bi_bdev = preq.bdev; | 750 | bio->bi_bdev = preq.bdev; |
| 667 | bio->bi_private = pending_req; | 751 | bio->bi_private = pending_req; |
| 668 | bio->bi_end_io = end_block_io_op; | 752 | bio->bi_end_io = end_block_io_op; |
| 753 | } else if (operation == REQ_DISCARD) { | ||
| 754 | xen_blk_discard(blkif, req); | ||
| 755 | xen_blkif_put(blkif); | ||
| 756 | free_req(pending_req); | ||
| 757 | return 0; | ||
| 758 | } | ||
| 669 | } | 759 | } |
| 670 | 760 | ||
| 671 | /* | 761 | /* |
| @@ -685,7 +775,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, | |||
| 685 | 775 | ||
| 686 | if (operation == READ) | 776 | if (operation == READ) |
| 687 | blkif->st_rd_sect += preq.nr_sects; | 777 | blkif->st_rd_sect += preq.nr_sects; |
| 688 | else if (operation == WRITE || operation == WRITE_FLUSH) | 778 | else if (operation & WRITE) |
| 689 | blkif->st_wr_sect += preq.nr_sects; | 779 | blkif->st_wr_sect += preq.nr_sects; |
| 690 | 780 | ||
| 691 | return 0; | 781 | return 0; |
| @@ -765,9 +855,9 @@ static int __init xen_blkif_init(void) | |||
| 765 | 855 | ||
| 766 | mmap_pages = xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST; | 856 | mmap_pages = xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST; |
| 767 | 857 | ||
| 768 | blkbk->pending_reqs = kmalloc(sizeof(blkbk->pending_reqs[0]) * | 858 | blkbk->pending_reqs = kzalloc(sizeof(blkbk->pending_reqs[0]) * |
| 769 | xen_blkif_reqs, GFP_KERNEL); | 859 | xen_blkif_reqs, GFP_KERNEL); |
| 770 | blkbk->pending_grant_handles = kzalloc(sizeof(blkbk->pending_grant_handles[0]) * | 860 | blkbk->pending_grant_handles = kmalloc(sizeof(blkbk->pending_grant_handles[0]) * |
| 771 | mmap_pages, GFP_KERNEL); | 861 | mmap_pages, GFP_KERNEL); |
| 772 | blkbk->pending_pages = kzalloc(sizeof(blkbk->pending_pages[0]) * | 862 | blkbk->pending_pages = kzalloc(sizeof(blkbk->pending_pages[0]) * |
| 773 | mmap_pages, GFP_KERNEL); | 863 | mmap_pages, GFP_KERNEL); |
| @@ -790,8 +880,6 @@ static int __init xen_blkif_init(void) | |||
| 790 | if (rc) | 880 | if (rc) |
| 791 | goto failed_init; | 881 | goto failed_init; |
| 792 | 882 | ||
| 793 | memset(blkbk->pending_reqs, 0, sizeof(blkbk->pending_reqs)); | ||
| 794 | |||
| 795 | INIT_LIST_HEAD(&blkbk->pending_free); | 883 | INIT_LIST_HEAD(&blkbk->pending_free); |
| 796 | spin_lock_init(&blkbk->pending_free_lock); | 884 | spin_lock_init(&blkbk->pending_free_lock); |
| 797 | init_waitqueue_head(&blkbk->pending_free_wq); | 885 | init_waitqueue_head(&blkbk->pending_free_wq); |
