diff options
author | Javier González <jg@lightnvm.io> | 2017-04-15 14:55:37 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2017-04-16 12:06:25 -0400 |
commit | 17912c49edfa6ab552329bf63d1b757eb874673b (patch) | |
tree | c523ceb84476721084dfb4f68cac0496c8cbb383 /drivers/lightnvm/core.c | |
parent | 2849a7becb9586a09553680a8b752fdb517b26e3 (diff) |
lightnvm: submit erases using the I/O path
Until now erases have been submitted as synchronous commands through a
dedicated erase function. In order to enable targets implementing
asynchronous erases, refactor the erase path so that it uses the normal
async I/O submission functions. If a target requires sync I/O, it can
implement it internally. Also, adapt rrpc to use the new erase path.
Signed-off-by: Javier González <javier@cnexlabs.com>
Fixed spelling error.
Signed-off-by: Matias Bjørling <matias@cnexlabs.com>
Signed-off-by: Matias Bjørling <matias@cnexlabs.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers/lightnvm/core.c')
-rw-r--r-- | drivers/lightnvm/core.c | 54 |
1 files changed, 33 insertions, 21 deletions
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c index 5262ba66a7a7..95105c47e082 100644 --- a/drivers/lightnvm/core.c +++ b/drivers/lightnvm/core.c | |||
@@ -590,11 +590,11 @@ int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas, | |||
590 | 590 | ||
591 | memset(&rqd, 0, sizeof(struct nvm_rq)); | 591 | memset(&rqd, 0, sizeof(struct nvm_rq)); |
592 | 592 | ||
593 | nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1); | 593 | nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas, 1); |
594 | nvm_rq_tgt_to_dev(tgt_dev, &rqd); | 594 | nvm_rq_tgt_to_dev(tgt_dev, &rqd); |
595 | 595 | ||
596 | ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type); | 596 | ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type); |
597 | nvm_free_rqd_ppalist(dev, &rqd); | 597 | nvm_free_rqd_ppalist(tgt_dev, &rqd); |
598 | if (ret) { | 598 | if (ret) { |
599 | pr_err("nvm: failed bb mark\n"); | 599 | pr_err("nvm: failed bb mark\n"); |
600 | return -EINVAL; | 600 | return -EINVAL; |
@@ -626,34 +626,45 @@ int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd) | |||
626 | } | 626 | } |
627 | EXPORT_SYMBOL(nvm_submit_io); | 627 | EXPORT_SYMBOL(nvm_submit_io); |
628 | 628 | ||
629 | int nvm_erase_blk(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas, int flags) | 629 | static void nvm_end_io_sync(struct nvm_rq *rqd) |
630 | { | 630 | { |
631 | struct nvm_dev *dev = tgt_dev->parent; | 631 | struct completion *waiting = rqd->private; |
632 | struct nvm_rq rqd; | ||
633 | int ret; | ||
634 | 632 | ||
635 | if (!dev->ops->erase_block) | 633 | complete(waiting); |
636 | return 0; | 634 | } |
637 | 635 | ||
638 | nvm_map_to_dev(tgt_dev, ppas); | 636 | int nvm_erase_sync(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas, |
637 | int nr_ppas) | ||
638 | { | ||
639 | struct nvm_geo *geo = &tgt_dev->geo; | ||
640 | struct nvm_rq rqd; | ||
641 | int ret; | ||
642 | DECLARE_COMPLETION_ONSTACK(wait); | ||
639 | 643 | ||
640 | memset(&rqd, 0, sizeof(struct nvm_rq)); | 644 | memset(&rqd, 0, sizeof(struct nvm_rq)); |
641 | 645 | ||
642 | ret = nvm_set_rqd_ppalist(dev, &rqd, ppas, 1, 1); | 646 | rqd.opcode = NVM_OP_ERASE; |
647 | rqd.end_io = nvm_end_io_sync; | ||
648 | rqd.private = &wait; | ||
649 | rqd.flags = geo->plane_mode >> 1; | ||
650 | |||
651 | ret = nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas, 1); | ||
643 | if (ret) | 652 | if (ret) |
644 | return ret; | 653 | return ret; |
645 | 654 | ||
646 | nvm_rq_tgt_to_dev(tgt_dev, &rqd); | 655 | ret = nvm_submit_io(tgt_dev, &rqd); |
647 | 656 | if (ret) { | |
648 | rqd.flags = flags; | 657 | pr_err("rrpr: erase I/O submission failed: %d\n", ret); |
649 | 658 | goto free_ppa_list; | |
650 | ret = dev->ops->erase_block(dev, &rqd); | 659 | } |
660 | wait_for_completion_io(&wait); | ||
651 | 661 | ||
652 | nvm_free_rqd_ppalist(dev, &rqd); | 662 | free_ppa_list: |
663 | nvm_free_rqd_ppalist(tgt_dev, &rqd); | ||
653 | 664 | ||
654 | return ret; | 665 | return ret; |
655 | } | 666 | } |
656 | EXPORT_SYMBOL(nvm_erase_blk); | 667 | EXPORT_SYMBOL(nvm_erase_sync); |
657 | 668 | ||
658 | int nvm_get_l2p_tbl(struct nvm_tgt_dev *tgt_dev, u64 slba, u32 nlb, | 669 | int nvm_get_l2p_tbl(struct nvm_tgt_dev *tgt_dev, u64 slba, u32 nlb, |
659 | nvm_l2p_update_fn *update_l2p, void *priv) | 670 | nvm_l2p_update_fn *update_l2p, void *priv) |
@@ -732,10 +743,11 @@ void nvm_put_area(struct nvm_tgt_dev *tgt_dev, sector_t begin) | |||
732 | } | 743 | } |
733 | EXPORT_SYMBOL(nvm_put_area); | 744 | EXPORT_SYMBOL(nvm_put_area); |
734 | 745 | ||
735 | int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd, | 746 | int nvm_set_rqd_ppalist(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd, |
736 | const struct ppa_addr *ppas, int nr_ppas, int vblk) | 747 | const struct ppa_addr *ppas, int nr_ppas, int vblk) |
737 | { | 748 | { |
738 | struct nvm_geo *geo = &dev->geo; | 749 | struct nvm_dev *dev = tgt_dev->parent; |
750 | struct nvm_geo *geo = &tgt_dev->geo; | ||
739 | int i, plane_cnt, pl_idx; | 751 | int i, plane_cnt, pl_idx; |
740 | struct ppa_addr ppa; | 752 | struct ppa_addr ppa; |
741 | 753 | ||
@@ -773,12 +785,12 @@ int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd, | |||
773 | } | 785 | } |
774 | EXPORT_SYMBOL(nvm_set_rqd_ppalist); | 786 | EXPORT_SYMBOL(nvm_set_rqd_ppalist); |
775 | 787 | ||
776 | void nvm_free_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd) | 788 | void nvm_free_rqd_ppalist(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd) |
777 | { | 789 | { |
778 | if (!rqd->ppa_list) | 790 | if (!rqd->ppa_list) |
779 | return; | 791 | return; |
780 | 792 | ||
781 | nvm_dev_dma_free(dev, rqd->ppa_list, rqd->dma_ppa_list); | 793 | nvm_dev_dma_free(tgt_dev->parent, rqd->ppa_list, rqd->dma_ppa_list); |
782 | } | 794 | } |
783 | EXPORT_SYMBOL(nvm_free_rqd_ppalist); | 795 | EXPORT_SYMBOL(nvm_free_rqd_ppalist); |
784 | 796 | ||