aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/lightnvm/core.c
diff options
context:
space:
mode:
authorRakesh Pandit <rakesh@tuxera.com>2017-10-13 08:46:31 -0400
committerJens Axboe <axboe@kernel.dk>2017-10-13 10:34:57 -0400
commiteb6f168f97438bf1cac8b9b1301c662eace9e39f (patch)
tree7b999617ac8a977b4c10f8e16786c281c537da9a /drivers/lightnvm/core.c
parentef56b9ce562753cacf518f081a4ff3227efdab25 (diff)
lightnvm: remove stale extern and unused exported symbols
Not all exported symbols are being used outside core and there were some stale entries in lightnvm.h Signed-off-by: Rakesh Pandit <rakesh@tuxera.com> Signed-off-by: Matias Bjørling <m@bjorling.me> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers/lightnvm/core.c')
-rw-r--r--drivers/lightnvm/core.c129
1 files changed, 64 insertions, 65 deletions
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 231c92899431..0e5f77234c79 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -226,6 +226,24 @@ static const struct block_device_operations nvm_fops = {
226 .owner = THIS_MODULE, 226 .owner = THIS_MODULE,
227}; 227};
228 228
229static struct nvm_tgt_type *nvm_find_target_type(const char *name, int lock)
230{
231 struct nvm_tgt_type *tmp, *tt = NULL;
232
233 if (lock)
234 down_write(&nvm_tgtt_lock);
235
236 list_for_each_entry(tmp, &nvm_tgt_types, list)
237 if (!strcmp(name, tmp->name)) {
238 tt = tmp;
239 break;
240 }
241
242 if (lock)
243 up_write(&nvm_tgtt_lock);
244 return tt;
245}
246
229static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create) 247static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
230{ 248{
231 struct nvm_ioctl_create_simple *s = &create->conf.s; 249 struct nvm_ioctl_create_simple *s = &create->conf.s;
@@ -549,25 +567,6 @@ void nvm_part_to_tgt(struct nvm_dev *dev, sector_t *entries,
549} 567}
550EXPORT_SYMBOL(nvm_part_to_tgt); 568EXPORT_SYMBOL(nvm_part_to_tgt);
551 569
552struct nvm_tgt_type *nvm_find_target_type(const char *name, int lock)
553{
554 struct nvm_tgt_type *tmp, *tt = NULL;
555
556 if (lock)
557 down_write(&nvm_tgtt_lock);
558
559 list_for_each_entry(tmp, &nvm_tgt_types, list)
560 if (!strcmp(name, tmp->name)) {
561 tt = tmp;
562 break;
563 }
564
565 if (lock)
566 up_write(&nvm_tgtt_lock);
567 return tt;
568}
569EXPORT_SYMBOL(nvm_find_target_type);
570
571int nvm_register_tgt_type(struct nvm_tgt_type *tt) 570int nvm_register_tgt_type(struct nvm_tgt_type *tt)
572{ 571{
573 int ret = 0; 572 int ret = 0;
@@ -619,6 +618,52 @@ static struct nvm_dev *nvm_find_nvm_dev(const char *name)
619 return NULL; 618 return NULL;
620} 619}
621 620
621static int nvm_set_rqd_ppalist(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
622 const struct ppa_addr *ppas, int nr_ppas)
623{
624 struct nvm_dev *dev = tgt_dev->parent;
625 struct nvm_geo *geo = &tgt_dev->geo;
626 int i, plane_cnt, pl_idx;
627 struct ppa_addr ppa;
628
629 if (geo->plane_mode == NVM_PLANE_SINGLE && nr_ppas == 1) {
630 rqd->nr_ppas = nr_ppas;
631 rqd->ppa_addr = ppas[0];
632
633 return 0;
634 }
635
636 rqd->nr_ppas = nr_ppas;
637 rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
638 if (!rqd->ppa_list) {
639 pr_err("nvm: failed to allocate dma memory\n");
640 return -ENOMEM;
641 }
642
643 plane_cnt = geo->plane_mode;
644 rqd->nr_ppas *= plane_cnt;
645
646 for (i = 0; i < nr_ppas; i++) {
647 for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
648 ppa = ppas[i];
649 ppa.g.pl = pl_idx;
650 rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppa;
651 }
652 }
653
654 return 0;
655}
656
657static void nvm_free_rqd_ppalist(struct nvm_tgt_dev *tgt_dev,
658 struct nvm_rq *rqd)
659{
660 if (!rqd->ppa_list)
661 return;
662
663 nvm_dev_dma_free(tgt_dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
664}
665
666
622int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas, 667int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
623 int nr_ppas, int type) 668 int nr_ppas, int type)
624{ 669{
@@ -792,52 +837,6 @@ void nvm_put_area(struct nvm_tgt_dev *tgt_dev, sector_t begin)
792} 837}
793EXPORT_SYMBOL(nvm_put_area); 838EXPORT_SYMBOL(nvm_put_area);
794 839
795int nvm_set_rqd_ppalist(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
796 const struct ppa_addr *ppas, int nr_ppas)
797{
798 struct nvm_dev *dev = tgt_dev->parent;
799 struct nvm_geo *geo = &tgt_dev->geo;
800 int i, plane_cnt, pl_idx;
801 struct ppa_addr ppa;
802
803 if (geo->plane_mode == NVM_PLANE_SINGLE && nr_ppas == 1) {
804 rqd->nr_ppas = nr_ppas;
805 rqd->ppa_addr = ppas[0];
806
807 return 0;
808 }
809
810 rqd->nr_ppas = nr_ppas;
811 rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
812 if (!rqd->ppa_list) {
813 pr_err("nvm: failed to allocate dma memory\n");
814 return -ENOMEM;
815 }
816
817 plane_cnt = geo->plane_mode;
818 rqd->nr_ppas *= plane_cnt;
819
820 for (i = 0; i < nr_ppas; i++) {
821 for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
822 ppa = ppas[i];
823 ppa.g.pl = pl_idx;
824 rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppa;
825 }
826 }
827
828 return 0;
829}
830EXPORT_SYMBOL(nvm_set_rqd_ppalist);
831
832void nvm_free_rqd_ppalist(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
833{
834 if (!rqd->ppa_list)
835 return;
836
837 nvm_dev_dma_free(tgt_dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
838}
839EXPORT_SYMBOL(nvm_free_rqd_ppalist);
840
841void nvm_end_io(struct nvm_rq *rqd) 840void nvm_end_io(struct nvm_rq *rqd)
842{ 841{
843 struct nvm_tgt_dev *tgt_dev = rqd->dev; 842 struct nvm_tgt_dev *tgt_dev = rqd->dev;