diff options
author | Matias Bjørling <matias@cnexlabs.com> | 2017-01-31 07:17:10 -0500 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2017-01-31 10:32:13 -0500 |
commit | 10995c3dc9d7f47b92ff3e74b4bd191ddb7991ff (patch) | |
tree | 8e2f54e1d53cafa64e6c52ce283ddc402a90b6d1 /drivers/lightnvm/core.c | |
parent | ade69e2432b795c76653e1dfa09c684549826a50 (diff) |
lightnvm: collapse nvm_erase_ppa and nvm_erase_blk
After gennvm and core have been merged, there are no more callers to
nvm_erase_ppa. Therefore collapse the device specific and target
specific erase functions.
Signed-off-by: Matias Bjørling <matias@cnexlabs.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers/lightnvm/core.c')
-rw-r--r-- | drivers/lightnvm/core.c | 57 |
1 files changed, 26 insertions, 31 deletions
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c index e9a495650dd0..a4e2e3b01ae4 100644 --- a/drivers/lightnvm/core.c +++ b/drivers/lightnvm/core.c | |||
@@ -683,12 +683,34 @@ int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd) | |||
683 | } | 683 | } |
684 | EXPORT_SYMBOL(nvm_submit_io); | 684 | EXPORT_SYMBOL(nvm_submit_io); |
685 | 685 | ||
686 | int nvm_erase_blk(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p, int flags) | 686 | int nvm_erase_blk(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas, int flags) |
687 | { | 687 | { |
688 | /* Convert address space */ | 688 | struct nvm_dev *dev = tgt_dev->parent; |
689 | nvm_map_to_dev(tgt_dev, p); | 689 | struct nvm_rq rqd; |
690 | int ret; | ||
691 | |||
692 | if (!dev->ops->erase_block) | ||
693 | return 0; | ||
694 | |||
695 | ret = nvm_map_to_dev(tgt_dev, ppas); | ||
696 | if (ret) | ||
697 | return ret; | ||
698 | |||
699 | memset(&rqd, 0, sizeof(struct nvm_rq)); | ||
690 | 700 | ||
691 | return nvm_erase_ppa(tgt_dev->parent, p, 1, flags); | 701 | ret = nvm_set_rqd_ppalist(dev, &rqd, ppas, 1, 1); |
702 | if (ret) | ||
703 | return ret; | ||
704 | |||
705 | nvm_generic_to_addr_mode(dev, &rqd); | ||
706 | |||
707 | rqd.flags = flags; | ||
708 | |||
709 | ret = dev->ops->erase_block(dev, &rqd); | ||
710 | |||
711 | nvm_free_rqd_ppalist(dev, &rqd); | ||
712 | |||
713 | return ret; | ||
692 | } | 714 | } |
693 | EXPORT_SYMBOL(nvm_erase_blk); | 715 | EXPORT_SYMBOL(nvm_erase_blk); |
694 | 716 | ||
@@ -847,33 +869,6 @@ void nvm_free_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd) | |||
847 | } | 869 | } |
848 | EXPORT_SYMBOL(nvm_free_rqd_ppalist); | 870 | EXPORT_SYMBOL(nvm_free_rqd_ppalist); |
849 | 871 | ||
850 | int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas, | ||
851 | int flags) | ||
852 | { | ||
853 | struct nvm_rq rqd; | ||
854 | int ret; | ||
855 | |||
856 | if (!dev->ops->erase_block) | ||
857 | return 0; | ||
858 | |||
859 | memset(&rqd, 0, sizeof(struct nvm_rq)); | ||
860 | |||
861 | ret = nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1); | ||
862 | if (ret) | ||
863 | return ret; | ||
864 | |||
865 | nvm_generic_to_addr_mode(dev, &rqd); | ||
866 | |||
867 | rqd.flags = flags; | ||
868 | |||
869 | ret = dev->ops->erase_block(dev, &rqd); | ||
870 | |||
871 | nvm_free_rqd_ppalist(dev, &rqd); | ||
872 | |||
873 | return ret; | ||
874 | } | ||
875 | EXPORT_SYMBOL(nvm_erase_ppa); | ||
876 | |||
877 | void nvm_end_io(struct nvm_rq *rqd, int error) | 872 | void nvm_end_io(struct nvm_rq *rqd, int error) |
878 | { | 873 | { |
879 | struct nvm_tgt_dev *tgt_dev = rqd->dev; | 874 | struct nvm_tgt_dev *tgt_dev = rqd->dev; |