aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJavier González <jg@lightnvm.io>2017-04-15 14:55:37 -0400
committerJens Axboe <axboe@fb.com>2017-04-16 12:06:25 -0400
commit17912c49edfa6ab552329bf63d1b757eb874673b (patch)
treec523ceb84476721084dfb4f68cac0496c8cbb383
parent2849a7becb9586a09553680a8b752fdb517b26e3 (diff)
lightnvm: submit erases using the I/O path
Until now erases have been submitted as synchronous commands through a dedicated erase function. In order to enable targets implementing asynchronous erases, refactor the erase path so that it uses the normal async I/O submission functions. If a target requires sync I/O, it can implement it internally. Also, adapt rrpc to use the new erase path. Signed-off-by: Javier González <javier@cnexlabs.com> Fixed spelling error. Signed-off-by: Matias Bjørling <matias@cnexlabs.com> Signed-off-by: Matias Bjørling <matias@cnexlabs.com> Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r--drivers/lightnvm/core.c54
-rw-r--r--drivers/lightnvm/rrpc.c3
-rw-r--r--drivers/nvme/host/lightnvm.c32
-rw-r--r--include/linux/lightnvm.h8
4 files changed, 47 insertions, 50 deletions
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 5262ba66a7a7..95105c47e082 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -590,11 +590,11 @@ int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
590 590
591 memset(&rqd, 0, sizeof(struct nvm_rq)); 591 memset(&rqd, 0, sizeof(struct nvm_rq));
592 592
593 nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1); 593 nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas, 1);
594 nvm_rq_tgt_to_dev(tgt_dev, &rqd); 594 nvm_rq_tgt_to_dev(tgt_dev, &rqd);
595 595
596 ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type); 596 ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
597 nvm_free_rqd_ppalist(dev, &rqd); 597 nvm_free_rqd_ppalist(tgt_dev, &rqd);
598 if (ret) { 598 if (ret) {
599 pr_err("nvm: failed bb mark\n"); 599 pr_err("nvm: failed bb mark\n");
600 return -EINVAL; 600 return -EINVAL;
@@ -626,34 +626,45 @@ int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
626} 626}
627EXPORT_SYMBOL(nvm_submit_io); 627EXPORT_SYMBOL(nvm_submit_io);
628 628
629int nvm_erase_blk(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas, int flags) 629static void nvm_end_io_sync(struct nvm_rq *rqd)
630{ 630{
631 struct nvm_dev *dev = tgt_dev->parent; 631 struct completion *waiting = rqd->private;
632 struct nvm_rq rqd;
633 int ret;
634 632
635 if (!dev->ops->erase_block) 633 complete(waiting);
636 return 0; 634}
637 635
638 nvm_map_to_dev(tgt_dev, ppas); 636int nvm_erase_sync(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
637 int nr_ppas)
638{
639 struct nvm_geo *geo = &tgt_dev->geo;
640 struct nvm_rq rqd;
641 int ret;
642 DECLARE_COMPLETION_ONSTACK(wait);
639 643
640 memset(&rqd, 0, sizeof(struct nvm_rq)); 644 memset(&rqd, 0, sizeof(struct nvm_rq));
641 645
642 ret = nvm_set_rqd_ppalist(dev, &rqd, ppas, 1, 1); 646 rqd.opcode = NVM_OP_ERASE;
647 rqd.end_io = nvm_end_io_sync;
648 rqd.private = &wait;
649 rqd.flags = geo->plane_mode >> 1;
650
651 ret = nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas, 1);
643 if (ret) 652 if (ret)
644 return ret; 653 return ret;
645 654
646 nvm_rq_tgt_to_dev(tgt_dev, &rqd); 655 ret = nvm_submit_io(tgt_dev, &rqd);
647 656 if (ret) {
648 rqd.flags = flags; 657 pr_err("rrpr: erase I/O submission failed: %d\n", ret);
649 658 goto free_ppa_list;
650 ret = dev->ops->erase_block(dev, &rqd); 659 }
660 wait_for_completion_io(&wait);
651 661
652 nvm_free_rqd_ppalist(dev, &rqd); 662free_ppa_list:
663 nvm_free_rqd_ppalist(tgt_dev, &rqd);
653 664
654 return ret; 665 return ret;
655} 666}
656EXPORT_SYMBOL(nvm_erase_blk); 667EXPORT_SYMBOL(nvm_erase_sync);
657 668
658int nvm_get_l2p_tbl(struct nvm_tgt_dev *tgt_dev, u64 slba, u32 nlb, 669int nvm_get_l2p_tbl(struct nvm_tgt_dev *tgt_dev, u64 slba, u32 nlb,
659 nvm_l2p_update_fn *update_l2p, void *priv) 670 nvm_l2p_update_fn *update_l2p, void *priv)
@@ -732,10 +743,11 @@ void nvm_put_area(struct nvm_tgt_dev *tgt_dev, sector_t begin)
732} 743}
733EXPORT_SYMBOL(nvm_put_area); 744EXPORT_SYMBOL(nvm_put_area);
734 745
735int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd, 746int nvm_set_rqd_ppalist(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
736 const struct ppa_addr *ppas, int nr_ppas, int vblk) 747 const struct ppa_addr *ppas, int nr_ppas, int vblk)
737{ 748{
738 struct nvm_geo *geo = &dev->geo; 749 struct nvm_dev *dev = tgt_dev->parent;
750 struct nvm_geo *geo = &tgt_dev->geo;
739 int i, plane_cnt, pl_idx; 751 int i, plane_cnt, pl_idx;
740 struct ppa_addr ppa; 752 struct ppa_addr ppa;
741 753
@@ -773,12 +785,12 @@ int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd,
773} 785}
774EXPORT_SYMBOL(nvm_set_rqd_ppalist); 786EXPORT_SYMBOL(nvm_set_rqd_ppalist);
775 787
776void nvm_free_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd) 788void nvm_free_rqd_ppalist(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
777{ 789{
778 if (!rqd->ppa_list) 790 if (!rqd->ppa_list)
779 return; 791 return;
780 792
781 nvm_dev_dma_free(dev, rqd->ppa_list, rqd->dma_ppa_list); 793 nvm_dev_dma_free(tgt_dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
782} 794}
783EXPORT_SYMBOL(nvm_free_rqd_ppalist); 795EXPORT_SYMBOL(nvm_free_rqd_ppalist);
784 796
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index e68efbcf1188..4e4c2997337c 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -414,7 +414,6 @@ static void rrpc_block_gc(struct work_struct *work)
414 struct rrpc *rrpc = gcb->rrpc; 414 struct rrpc *rrpc = gcb->rrpc;
415 struct rrpc_block *rblk = gcb->rblk; 415 struct rrpc_block *rblk = gcb->rblk;
416 struct rrpc_lun *rlun = rblk->rlun; 416 struct rrpc_lun *rlun = rblk->rlun;
417 struct nvm_tgt_dev *dev = rrpc->dev;
418 struct ppa_addr ppa; 417 struct ppa_addr ppa;
419 418
420 mempool_free(gcb, rrpc->gcb_pool); 419 mempool_free(gcb, rrpc->gcb_pool);
@@ -430,7 +429,7 @@ static void rrpc_block_gc(struct work_struct *work)
430 ppa.g.lun = rlun->bppa.g.lun; 429 ppa.g.lun = rlun->bppa.g.lun;
431 ppa.g.blk = rblk->id; 430 ppa.g.blk = rblk->id;
432 431
433 if (nvm_erase_blk(dev, &ppa, 0)) 432 if (nvm_erase_sync(rrpc->dev, &ppa, 1))
434 goto put_back; 433 goto put_back;
435 434
436 rrpc_put_blk(rrpc, rblk); 435 rrpc_put_blk(rrpc, rblk);
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index fd9895423f55..4ea9c93fbbe0 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -510,12 +510,16 @@ static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
510 } 510 }
511 rq->cmd_flags &= ~REQ_FAILFAST_DRIVER; 511 rq->cmd_flags &= ~REQ_FAILFAST_DRIVER;
512 512
513 rq->ioprio = bio_prio(bio); 513 if (bio) {
514 if (bio_has_data(bio)) 514 rq->ioprio = bio_prio(bio);
515 rq->nr_phys_segments = bio_phys_segments(q, bio); 515 rq->__data_len = bio->bi_iter.bi_size;
516 516 rq->bio = rq->biotail = bio;
517 rq->__data_len = bio->bi_iter.bi_size; 517 if (bio_has_data(bio))
518 rq->bio = rq->biotail = bio; 518 rq->nr_phys_segments = bio_phys_segments(q, bio);
519 } else {
520 rq->ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
521 rq->__data_len = 0;
522 }
519 523
520 nvme_nvm_rqtocmd(rq, rqd, ns, cmd); 524 nvme_nvm_rqtocmd(rq, rqd, ns, cmd);
521 525
@@ -526,21 +530,6 @@ static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
526 return 0; 530 return 0;
527} 531}
528 532
529static int nvme_nvm_erase_block(struct nvm_dev *dev, struct nvm_rq *rqd)
530{
531 struct request_queue *q = dev->q;
532 struct nvme_ns *ns = q->queuedata;
533 struct nvme_nvm_command c = {};
534
535 c.erase.opcode = NVM_OP_ERASE;
536 c.erase.nsid = cpu_to_le32(ns->ns_id);
537 c.erase.spba = cpu_to_le64(rqd->ppa_addr.ppa);
538 c.erase.length = cpu_to_le16(rqd->nr_ppas - 1);
539 c.erase.control = cpu_to_le16(rqd->flags);
540
541 return nvme_submit_sync_cmd(q, (struct nvme_command *)&c, NULL, 0);
542}
543
544static void *nvme_nvm_create_dma_pool(struct nvm_dev *nvmdev, char *name) 533static void *nvme_nvm_create_dma_pool(struct nvm_dev *nvmdev, char *name)
545{ 534{
546 struct nvme_ns *ns = nvmdev->q->queuedata; 535 struct nvme_ns *ns = nvmdev->q->queuedata;
@@ -576,7 +565,6 @@ static struct nvm_dev_ops nvme_nvm_dev_ops = {
576 .set_bb_tbl = nvme_nvm_set_bb_tbl, 565 .set_bb_tbl = nvme_nvm_set_bb_tbl,
577 566
578 .submit_io = nvme_nvm_submit_io, 567 .submit_io = nvme_nvm_submit_io,
579 .erase_block = nvme_nvm_erase_block,
580 568
581 .create_dma_pool = nvme_nvm_create_dma_pool, 569 .create_dma_pool = nvme_nvm_create_dma_pool,
582 .destroy_dma_pool = nvme_nvm_destroy_dma_pool, 570 .destroy_dma_pool = nvme_nvm_destroy_dma_pool,
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index ca45e4a088a9..e11163f9b3b7 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -56,7 +56,6 @@ typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32,
56typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *); 56typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *);
57typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int); 57typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int);
58typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *); 58typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
59typedef int (nvm_erase_blk_fn)(struct nvm_dev *, struct nvm_rq *);
60typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *); 59typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *);
61typedef void (nvm_destroy_dma_pool_fn)(void *); 60typedef void (nvm_destroy_dma_pool_fn)(void *);
62typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t, 61typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t,
@@ -70,7 +69,6 @@ struct nvm_dev_ops {
70 nvm_op_set_bb_fn *set_bb_tbl; 69 nvm_op_set_bb_fn *set_bb_tbl;
71 70
72 nvm_submit_io_fn *submit_io; 71 nvm_submit_io_fn *submit_io;
73 nvm_erase_blk_fn *erase_block;
74 72
75 nvm_create_dma_pool_fn *create_dma_pool; 73 nvm_create_dma_pool_fn *create_dma_pool;
76 nvm_destroy_dma_pool_fn *destroy_dma_pool; 74 nvm_destroy_dma_pool_fn *destroy_dma_pool;
@@ -479,10 +477,10 @@ extern int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr *,
479 int, int); 477 int, int);
480extern int nvm_max_phys_sects(struct nvm_tgt_dev *); 478extern int nvm_max_phys_sects(struct nvm_tgt_dev *);
481extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *); 479extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *);
482extern int nvm_set_rqd_ppalist(struct nvm_dev *, struct nvm_rq *, 480extern int nvm_erase_sync(struct nvm_tgt_dev *, struct ppa_addr *, int);
481extern int nvm_set_rqd_ppalist(struct nvm_tgt_dev *, struct nvm_rq *,
483 const struct ppa_addr *, int, int); 482 const struct ppa_addr *, int, int);
484extern void nvm_free_rqd_ppalist(struct nvm_dev *, struct nvm_rq *); 483extern void nvm_free_rqd_ppalist(struct nvm_tgt_dev *, struct nvm_rq *);
485extern int nvm_erase_blk(struct nvm_tgt_dev *, struct ppa_addr *, int);
486extern int nvm_get_l2p_tbl(struct nvm_tgt_dev *, u64, u32, nvm_l2p_update_fn *, 484extern int nvm_get_l2p_tbl(struct nvm_tgt_dev *, u64, u32, nvm_l2p_update_fn *,
487 void *); 485 void *);
488extern int nvm_get_area(struct nvm_tgt_dev *, sector_t *, sector_t); 486extern int nvm_get_area(struct nvm_tgt_dev *, sector_t *, sector_t);