aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/lightnvm/pblk-init.c
diff options
context:
space:
mode:
authorIgor Konopko <igor.j.konopko@intel.com>2018-12-11 14:16:26 -0500
committerJens Axboe <axboe@kernel.dk>2018-12-11 14:22:35 -0500
commit55d8ec35398e7ab001989473cf6ed6f40b5ef4a6 (patch)
tree69558b01d608060e20b49d32edd5d9e6ce8c5e40 /drivers/lightnvm/pblk-init.c
parenta16816b9e462e8ee86a908606bde54b53cfeca80 (diff)
lightnvm: pblk: support packed metadata
pblk performs recovery of open lines by storing the LBA in the per LBA metadata field. Recovery therefore only works for drives that has this field. This patch adds support for packed metadata, which store l2p mapping for open lines in last sector of every write unit and enables drives without per IO metadata to recover open lines. After this patch, drives with OOB size <16B will use packed metadata and metadata size larger than16B will continue to use the device per IO metadata. Reviewed-by: Javier González <javier@cnexlabs.com> Signed-off-by: Igor Konopko <igor.j.konopko@intel.com> Signed-off-by: Matias Bjørling <mb@lightnvm.io> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers/lightnvm/pblk-init.c')
-rw-r--r--drivers/lightnvm/pblk-init.c38
1 files changed, 33 insertions, 5 deletions
diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c
index e8055b796381..f9a3e47b6a93 100644
--- a/drivers/lightnvm/pblk-init.c
+++ b/drivers/lightnvm/pblk-init.c
@@ -399,6 +399,7 @@ static int pblk_core_init(struct pblk *pblk)
399 pblk->nr_flush_rst = 0; 399 pblk->nr_flush_rst = 0;
400 400
401 pblk->min_write_pgs = geo->ws_opt; 401 pblk->min_write_pgs = geo->ws_opt;
402 pblk->min_write_pgs_data = pblk->min_write_pgs;
402 max_write_ppas = pblk->min_write_pgs * geo->all_luns; 403 max_write_ppas = pblk->min_write_pgs * geo->all_luns;
403 pblk->max_write_pgs = min_t(int, max_write_ppas, NVM_MAX_VLBA); 404 pblk->max_write_pgs = min_t(int, max_write_ppas, NVM_MAX_VLBA);
404 pblk->max_write_pgs = min_t(int, pblk->max_write_pgs, 405 pblk->max_write_pgs = min_t(int, pblk->max_write_pgs,
@@ -406,9 +407,35 @@ static int pblk_core_init(struct pblk *pblk)
406 pblk_set_sec_per_write(pblk, pblk->min_write_pgs); 407 pblk_set_sec_per_write(pblk, pblk->min_write_pgs);
407 408
408 pblk->oob_meta_size = geo->sos; 409 pblk->oob_meta_size = geo->sos;
409 if (pblk->oob_meta_size < sizeof(struct pblk_sec_meta)) { 410 if (!pblk_is_oob_meta_supported(pblk)) {
410 pblk_err(pblk, "Unsupported metadata size\n"); 411 /* For drives which does not have OOB metadata feature
411 return -EINVAL; 412 * in order to support recovery feature we need to use
413 * so called packed metadata. Packed metada will store
414 * the same information as OOB metadata (l2p table mapping,
415 * but in the form of the single page at the end of
416 * every write request.
417 */
418 if (pblk->min_write_pgs
419 * sizeof(struct pblk_sec_meta) > PAGE_SIZE) {
420 /* We want to keep all the packed metadata on single
421 * page per write requests. So we need to ensure that
422 * it will fit.
423 *
424 * This is more like sanity check, since there is
425 * no device with such a big minimal write size
426 * (above 1 metabytes).
427 */
428 pblk_err(pblk, "Not supported min write size\n");
429 return -EINVAL;
430 }
431 /* For packed meta approach we do some simplification.
432 * On read path we always issue requests which size
433 * equal to max_write_pgs, with all pages filled with
434 * user payload except of last one page which will be
435 * filled with packed metadata.
436 */
437 pblk->max_write_pgs = pblk->min_write_pgs;
438 pblk->min_write_pgs_data = pblk->min_write_pgs - 1;
412 } 439 }
413 440
414 pblk->pad_dist = kcalloc(pblk->min_write_pgs - 1, sizeof(atomic64_t), 441 pblk->pad_dist = kcalloc(pblk->min_write_pgs - 1, sizeof(atomic64_t),
@@ -641,7 +668,7 @@ static int pblk_set_provision(struct pblk *pblk, int nr_free_chks)
641 struct pblk_line_meta *lm = &pblk->lm; 668 struct pblk_line_meta *lm = &pblk->lm;
642 struct nvm_geo *geo = &dev->geo; 669 struct nvm_geo *geo = &dev->geo;
643 sector_t provisioned; 670 sector_t provisioned;
644 int sec_meta, blk_meta; 671 int sec_meta, blk_meta, clba;
645 int minimum; 672 int minimum;
646 673
647 if (geo->op == NVM_TARGET_DEFAULT_OP) 674 if (geo->op == NVM_TARGET_DEFAULT_OP)
@@ -682,7 +709,8 @@ static int pblk_set_provision(struct pblk *pblk, int nr_free_chks)
682 sec_meta = (lm->smeta_sec + lm->emeta_sec[0]) * l_mg->nr_free_lines; 709 sec_meta = (lm->smeta_sec + lm->emeta_sec[0]) * l_mg->nr_free_lines;
683 blk_meta = DIV_ROUND_UP(sec_meta, geo->clba); 710 blk_meta = DIV_ROUND_UP(sec_meta, geo->clba);
684 711
685 pblk->capacity = (provisioned - blk_meta) * geo->clba; 712 clba = (geo->clba / pblk->min_write_pgs) * pblk->min_write_pgs_data;
713 pblk->capacity = (provisioned - blk_meta) * clba;
686 714
687 atomic_set(&pblk->rl.free_blocks, nr_free_chks); 715 atomic_set(&pblk->rl.free_blocks, nr_free_chks);
688 atomic_set(&pblk->rl.free_user_blocks, nr_free_chks); 716 atomic_set(&pblk->rl.free_user_blocks, nr_free_chks);