summaryrefslogtreecommitdiffstats
path: root/drivers/lightnvm
diff options
context:
space:
mode:
authorMatias Bjørling <mb@lightnvm.io>2018-03-29 18:05:04 -0400
committerJens Axboe <axboe@kernel.dk>2018-03-29 19:29:09 -0400
commit89a09c5643e01f5e5d3c5f2e720053473a60a90b (patch)
tree4e3c80694912bc336479de619de61a0d3f5d8837 /drivers/lightnvm
parentaf569398c390810fca773c903a85b71dfd870bb0 (diff)
lightnvm: remove nvm_dev_ops->max_phys_sect
The value of max_phys_sect is always static. Instead of defining it in the nvm_dev_ops structure, declare it as a global value. Signed-off-by: Matias Bjørling <mb@lightnvm.io> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers/lightnvm')
-rw-r--r--drivers/lightnvm/core.c28
-rw-r--r--drivers/lightnvm/pblk-init.c9
-rw-r--r--drivers/lightnvm/pblk-recovery.c8
3 files changed, 13 insertions, 32 deletions
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index a59ad29600c3..9704db219866 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -407,7 +407,8 @@ static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
407 tdisk->private_data = targetdata; 407 tdisk->private_data = targetdata;
408 tqueue->queuedata = targetdata; 408 tqueue->queuedata = targetdata;
409 409
410 blk_queue_max_hw_sectors(tqueue, 8 * dev->ops->max_phys_sect); 410 blk_queue_max_hw_sectors(tqueue,
411 (dev->geo.sec_size >> 9) * NVM_MAX_VLBA);
411 412
412 set_capacity(tdisk, tt->capacity(targetdata)); 413 set_capacity(tdisk, tt->capacity(targetdata));
413 add_disk(tdisk); 414 add_disk(tdisk);
@@ -719,7 +720,7 @@ int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
719 struct nvm_rq rqd; 720 struct nvm_rq rqd;
720 int ret; 721 int ret;
721 722
722 if (nr_ppas > dev->ops->max_phys_sect) { 723 if (nr_ppas > NVM_MAX_VLBA) {
723 pr_err("nvm: unable to update all blocks atomically\n"); 724 pr_err("nvm: unable to update all blocks atomically\n");
724 return -EINVAL; 725 return -EINVAL;
725 } 726 }
@@ -740,14 +741,6 @@ int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
740} 741}
741EXPORT_SYMBOL(nvm_set_tgt_bb_tbl); 742EXPORT_SYMBOL(nvm_set_tgt_bb_tbl);
742 743
743int nvm_max_phys_sects(struct nvm_tgt_dev *tgt_dev)
744{
745 struct nvm_dev *dev = tgt_dev->parent;
746
747 return dev->ops->max_phys_sect;
748}
749EXPORT_SYMBOL(nvm_max_phys_sects);
750
751int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd) 744int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
752{ 745{
753 struct nvm_dev *dev = tgt_dev->parent; 746 struct nvm_dev *dev = tgt_dev->parent;
@@ -965,17 +958,10 @@ int nvm_register(struct nvm_dev *dev)
965 if (!dev->q || !dev->ops) 958 if (!dev->q || !dev->ops)
966 return -EINVAL; 959 return -EINVAL;
967 960
968 if (dev->ops->max_phys_sect > 256) { 961 dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist");
969 pr_info("nvm: max sectors supported is 256.\n"); 962 if (!dev->dma_pool) {
970 return -EINVAL; 963 pr_err("nvm: could not create dma pool\n");
971 } 964 return -ENOMEM;
972
973 if (dev->ops->max_phys_sect > 1) {
974 dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist");
975 if (!dev->dma_pool) {
976 pr_err("nvm: could not create dma pool\n");
977 return -ENOMEM;
978 }
979 } 965 }
980 966
981 ret = nvm_init(dev); 967 ret = nvm_init(dev);
diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c
index 141036bd6afa..43b835678f48 100644
--- a/drivers/lightnvm/pblk-init.c
+++ b/drivers/lightnvm/pblk-init.c
@@ -260,8 +260,7 @@ static int pblk_core_init(struct pblk *pblk)
260 return -ENOMEM; 260 return -ENOMEM;
261 261
262 /* Internal bios can be at most the sectors signaled by the device. */ 262 /* Internal bios can be at most the sectors signaled by the device. */
263 pblk->page_bio_pool = mempool_create_page_pool(nvm_max_phys_sects(dev), 263 pblk->page_bio_pool = mempool_create_page_pool(NVM_MAX_VLBA, 0);
264 0);
265 if (!pblk->page_bio_pool) 264 if (!pblk->page_bio_pool)
266 goto free_global_caches; 265 goto free_global_caches;
267 266
@@ -716,12 +715,12 @@ static int pblk_lines_init(struct pblk *pblk)
716 715
717 pblk->min_write_pgs = geo->sec_per_pl * (geo->sec_size / PAGE_SIZE); 716 pblk->min_write_pgs = geo->sec_per_pl * (geo->sec_size / PAGE_SIZE);
718 max_write_ppas = pblk->min_write_pgs * geo->all_luns; 717 max_write_ppas = pblk->min_write_pgs * geo->all_luns;
719 pblk->max_write_pgs = (max_write_ppas < nvm_max_phys_sects(dev)) ? 718 pblk->max_write_pgs = min_t(int, max_write_ppas, NVM_MAX_VLBA);
720 max_write_ppas : nvm_max_phys_sects(dev);
721 pblk_set_sec_per_write(pblk, pblk->min_write_pgs); 719 pblk_set_sec_per_write(pblk, pblk->min_write_pgs);
722 720
723 if (pblk->max_write_pgs > PBLK_MAX_REQ_ADDRS) { 721 if (pblk->max_write_pgs > PBLK_MAX_REQ_ADDRS) {
724 pr_err("pblk: cannot support device max_phys_sect\n"); 722 pr_err("pblk: vector list too big(%u > %u)\n",
723 pblk->max_write_pgs, PBLK_MAX_REQ_ADDRS);
725 return -EINVAL; 724 return -EINVAL;
726 } 725 }
727 726
diff --git a/drivers/lightnvm/pblk-recovery.c b/drivers/lightnvm/pblk-recovery.c
index e75a1af2eebe..aaab9a5c17cc 100644
--- a/drivers/lightnvm/pblk-recovery.c
+++ b/drivers/lightnvm/pblk-recovery.c
@@ -21,17 +21,15 @@ void pblk_submit_rec(struct work_struct *work)
21 struct pblk_rec_ctx *recovery = 21 struct pblk_rec_ctx *recovery =
22 container_of(work, struct pblk_rec_ctx, ws_rec); 22 container_of(work, struct pblk_rec_ctx, ws_rec);
23 struct pblk *pblk = recovery->pblk; 23 struct pblk *pblk = recovery->pblk;
24 struct nvm_tgt_dev *dev = pblk->dev;
25 struct nvm_rq *rqd = recovery->rqd; 24 struct nvm_rq *rqd = recovery->rqd;
26 struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd); 25 struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
27 int max_secs = nvm_max_phys_sects(dev);
28 struct bio *bio; 26 struct bio *bio;
29 unsigned int nr_rec_secs; 27 unsigned int nr_rec_secs;
30 unsigned int pgs_read; 28 unsigned int pgs_read;
31 int ret; 29 int ret;
32 30
33 nr_rec_secs = bitmap_weight((unsigned long int *)&rqd->ppa_status, 31 nr_rec_secs = bitmap_weight((unsigned long int *)&rqd->ppa_status,
34 max_secs); 32 NVM_MAX_VLBA);
35 33
36 bio = bio_alloc(GFP_KERNEL, nr_rec_secs); 34 bio = bio_alloc(GFP_KERNEL, nr_rec_secs);
37 35
@@ -74,8 +72,6 @@ int pblk_recov_setup_rq(struct pblk *pblk, struct pblk_c_ctx *c_ctx,
74 struct pblk_rec_ctx *recovery, u64 *comp_bits, 72 struct pblk_rec_ctx *recovery, u64 *comp_bits,
75 unsigned int comp) 73 unsigned int comp)
76{ 74{
77 struct nvm_tgt_dev *dev = pblk->dev;
78 int max_secs = nvm_max_phys_sects(dev);
79 struct nvm_rq *rec_rqd; 75 struct nvm_rq *rec_rqd;
80 struct pblk_c_ctx *rec_ctx; 76 struct pblk_c_ctx *rec_ctx;
81 int nr_entries = c_ctx->nr_valid + c_ctx->nr_padded; 77 int nr_entries = c_ctx->nr_valid + c_ctx->nr_padded;
@@ -86,7 +82,7 @@ int pblk_recov_setup_rq(struct pblk *pblk, struct pblk_c_ctx *c_ctx,
86 /* Copy completion bitmap, but exclude the first X completed entries */ 82 /* Copy completion bitmap, but exclude the first X completed entries */
87 bitmap_shift_right((unsigned long int *)&rec_rqd->ppa_status, 83 bitmap_shift_right((unsigned long int *)&rec_rqd->ppa_status,
88 (unsigned long int *)comp_bits, 84 (unsigned long int *)comp_bits,
89 comp, max_secs); 85 comp, NVM_MAX_VLBA);
90 86
91 /* Save the context for the entries that need to be re-written and 87 /* Save the context for the entries that need to be re-written and
92 * update current context with the completed entries. 88 * update current context with the completed entries.