diff options
author | Matias Bjørling <m@bjorling.me> | 2016-05-06 14:03:08 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2016-05-06 14:51:10 -0400 |
commit | a63d5cf2031cc84443440caf32c175b3548ac6b8 (patch) | |
tree | a2b6b098163bdeaf048f32a446301c99d1f025d2 | |
parent | 5ebc7d9fe13ff9bd3622d0be3cd39c8751459be6 (diff) |
lightnvm: move responsibility for bad blk mgmt to target
We move the responsibility of managing the persistent bad block table to
the target. The target may choose to mark a block bad or retry writing
to it. Never the less, it should be the target that makes the decision
and not the media manager.
Signed-off-by: Matias Bjørling <m@bjorling.me>
Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r-- | drivers/lightnvm/gennvm.c | 35 |
1 files changed, 16 insertions, 19 deletions
diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c index 9c6b141606e9..89b880a25cc6 100644 --- a/drivers/lightnvm/gennvm.c +++ b/drivers/lightnvm/gennvm.c | |||
@@ -419,6 +419,9 @@ static void gennvm_blk_set_type(struct nvm_dev *dev, struct ppa_addr *ppa, | |||
419 | struct gen_lun *lun; | 419 | struct gen_lun *lun; |
420 | struct nvm_block *blk; | 420 | struct nvm_block *blk; |
421 | 421 | ||
422 | pr_debug("gennvm: ppa (ch: %u lun: %u blk: %u pg: %u) -> %u\n", | ||
423 | ppa->g.ch, ppa->g.lun, ppa->g.blk, ppa->g.pg, type); | ||
424 | |||
422 | if (unlikely(ppa->g.ch > dev->nr_chnls || | 425 | if (unlikely(ppa->g.ch > dev->nr_chnls || |
423 | ppa->g.lun > dev->luns_per_chnl || | 426 | ppa->g.lun > dev->luns_per_chnl || |
424 | ppa->g.blk > dev->blks_per_lun)) { | 427 | ppa->g.blk > dev->blks_per_lun)) { |
@@ -437,39 +440,33 @@ static void gennvm_blk_set_type(struct nvm_dev *dev, struct ppa_addr *ppa, | |||
437 | blk->state = type; | 440 | blk->state = type; |
438 | } | 441 | } |
439 | 442 | ||
440 | /* mark block bad. It is expected the target recover from the error. */ | 443 | /* |
444 | * mark block bad in gennvm. It is expected that the target recovers separately | ||
445 | */ | ||
441 | static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd) | 446 | static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd) |
442 | { | 447 | { |
443 | int i; | 448 | int bit = -1; |
444 | 449 | int max_secs = dev->ops->max_phys_sect; | |
445 | if (!dev->ops->set_bb_tbl) | 450 | void *comp_bits = &rqd->ppa_status; |
446 | return; | ||
447 | |||
448 | if (dev->ops->set_bb_tbl(dev, rqd, 1)) | ||
449 | return; | ||
450 | 451 | ||
451 | nvm_addr_to_generic_mode(dev, rqd); | 452 | nvm_addr_to_generic_mode(dev, rqd); |
452 | 453 | ||
453 | /* look up blocks and mark them as bad */ | 454 | /* look up blocks and mark them as bad */ |
454 | if (rqd->nr_pages > 1) | 455 | if (rqd->nr_pages == 1) { |
455 | for (i = 0; i < rqd->nr_pages; i++) | ||
456 | gennvm_blk_set_type(dev, &rqd->ppa_list[i], | ||
457 | NVM_BLK_ST_BAD); | ||
458 | else | ||
459 | gennvm_blk_set_type(dev, &rqd->ppa_addr, NVM_BLK_ST_BAD); | 456 | gennvm_blk_set_type(dev, &rqd->ppa_addr, NVM_BLK_ST_BAD); |
457 | return; | ||
458 | } | ||
459 | |||
460 | while ((bit = find_next_bit(comp_bits, max_secs, bit + 1)) < max_secs) | ||
461 | gennvm_blk_set_type(dev, &rqd->ppa_list[bit], NVM_BLK_ST_BAD); | ||
460 | } | 462 | } |
461 | 463 | ||
462 | static void gennvm_end_io(struct nvm_rq *rqd) | 464 | static void gennvm_end_io(struct nvm_rq *rqd) |
463 | { | 465 | { |
464 | struct nvm_tgt_instance *ins = rqd->ins; | 466 | struct nvm_tgt_instance *ins = rqd->ins; |
465 | 467 | ||
466 | switch (rqd->error) { | 468 | if (rqd->error == NVM_RSP_ERR_FAILWRITE) |
467 | case NVM_RSP_SUCCESS: | ||
468 | case NVM_RSP_ERR_EMPTYPAGE: | ||
469 | break; | ||
470 | case NVM_RSP_ERR_FAILWRITE: | ||
471 | gennvm_mark_blk_bad(rqd->dev, rqd); | 469 | gennvm_mark_blk_bad(rqd->dev, rqd); |
472 | } | ||
473 | 470 | ||
474 | ins->tt->end_io(rqd); | 471 | ins->tt->end_io(rqd); |
475 | } | 472 | } |