summaryrefslogtreecommitdiffstats
path: root/drivers/lightnvm
diff options
context:
space:
mode:
authorJavier González <jg@lightnvm.io>2016-11-28 16:39:04 -0500
committerJens Axboe <axboe@fb.com>2016-11-29 14:12:51 -0500
commitde93434fcf74d41754a48e45365a5914e00bc0be (patch)
tree9eae34dfc2ddf9fa9fad129fb78e1f00e90277ae /drivers/lightnvm
parent98379a12c54974ee5856dcf81781a5dc845505c3 (diff)
lightnvm: remove gen_lun abstraction
The gen_lun abstraction in the generic media manager was conceived on the assumption that a single target would instantiated on top of it. This has complicated target design to implement multi-instances. Remove this abstraction and move its logic to nvm_lun, which manages physical lun geometry and operations. Signed-off-by: Javier González <javier@cnexlabs.com> Signed-off-by: Matias Bjørling <m@bjorling.me> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers/lightnvm')
-rw-r--r--drivers/lightnvm/gennvm.c85
-rw-r--r--drivers/lightnvm/gennvm.h16
2 files changed, 43 insertions, 58 deletions
diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
index aee5b722ba63..3572ebbb50ce 100644
--- a/drivers/lightnvm/gennvm.c
+++ b/drivers/lightnvm/gennvm.c
@@ -223,13 +223,13 @@ static void gen_put_area(struct nvm_dev *dev, sector_t begin)
223static void gen_blocks_free(struct nvm_dev *dev) 223static void gen_blocks_free(struct nvm_dev *dev)
224{ 224{
225 struct gen_dev *gn = dev->mp; 225 struct gen_dev *gn = dev->mp;
226 struct gen_lun *lun; 226 struct nvm_lun *lun;
227 int i; 227 int i;
228 228
229 gen_for_each_lun(gn, lun, i) { 229 gen_for_each_lun(gn, lun, i) {
230 if (!lun->vlun.blocks) 230 if (!lun->blocks)
231 break; 231 break;
232 vfree(lun->vlun.blocks); 232 vfree(lun->blocks);
233 } 233 }
234} 234}
235 235
@@ -242,24 +242,24 @@ static void gen_luns_free(struct nvm_dev *dev)
242 242
243static int gen_luns_init(struct nvm_dev *dev, struct gen_dev *gn) 243static int gen_luns_init(struct nvm_dev *dev, struct gen_dev *gn)
244{ 244{
245 struct gen_lun *lun; 245 struct nvm_lun *lun;
246 int i; 246 int i;
247 247
248 gn->luns = kcalloc(dev->nr_luns, sizeof(struct gen_lun), GFP_KERNEL); 248 gn->luns = kcalloc(dev->nr_luns, sizeof(struct nvm_lun), GFP_KERNEL);
249 if (!gn->luns) 249 if (!gn->luns)
250 return -ENOMEM; 250 return -ENOMEM;
251 251
252 gen_for_each_lun(gn, lun, i) { 252 gen_for_each_lun(gn, lun, i) {
253 spin_lock_init(&lun->vlun.lock);
254 INIT_LIST_HEAD(&lun->free_list); 253 INIT_LIST_HEAD(&lun->free_list);
255 INIT_LIST_HEAD(&lun->used_list); 254 INIT_LIST_HEAD(&lun->used_list);
256 INIT_LIST_HEAD(&lun->bb_list); 255 INIT_LIST_HEAD(&lun->bb_list);
257 256
258 lun->reserved_blocks = 2; /* for GC only */ 257 spin_lock_init(&lun->lock);
259 lun->vlun.id = i; 258
260 lun->vlun.lun_id = i % dev->luns_per_chnl; 259 lun->id = i;
261 lun->vlun.chnl_id = i / dev->luns_per_chnl; 260 lun->lun_id = i % dev->luns_per_chnl;
262 lun->vlun.nr_free_blocks = dev->blks_per_lun; 261 lun->chnl_id = i / dev->luns_per_chnl;
262 lun->nr_free_blocks = dev->blks_per_lun;
263 } 263 }
264 return 0; 264 return 0;
265} 265}
@@ -268,7 +268,7 @@ static int gen_block_bb(struct gen_dev *gn, struct ppa_addr ppa,
268 u8 *blks, int nr_blks) 268 u8 *blks, int nr_blks)
269{ 269{
270 struct nvm_dev *dev = gn->dev; 270 struct nvm_dev *dev = gn->dev;
271 struct gen_lun *lun; 271 struct nvm_lun *lun;
272 struct nvm_block *blk; 272 struct nvm_block *blk;
273 int i; 273 int i;
274 274
@@ -282,9 +282,10 @@ static int gen_block_bb(struct gen_dev *gn, struct ppa_addr ppa,
282 if (blks[i] == NVM_BLK_T_FREE) 282 if (blks[i] == NVM_BLK_T_FREE)
283 continue; 283 continue;
284 284
285 blk = &lun->vlun.blocks[i]; 285 blk = &lun->blocks[i];
286 list_move_tail(&blk->list, &lun->bb_list); 286 list_move_tail(&blk->list, &lun->bb_list);
287 lun->vlun.nr_free_blocks--; 287 blk->state = NVM_BLK_ST_BAD;
288 lun->nr_free_blocks--;
288 } 289 }
289 290
290 return 0; 291 return 0;
@@ -295,7 +296,7 @@ static int gen_block_map(u64 slba, u32 nlb, __le64 *entries, void *private)
295 struct nvm_dev *dev = private; 296 struct nvm_dev *dev = private;
296 struct gen_dev *gn = dev->mp; 297 struct gen_dev *gn = dev->mp;
297 u64 elba = slba + nlb; 298 u64 elba = slba + nlb;
298 struct gen_lun *lun; 299 struct nvm_lun *lun;
299 struct nvm_block *blk; 300 struct nvm_block *blk;
300 u64 i; 301 u64 i;
301 int lun_id; 302 int lun_id;
@@ -326,7 +327,7 @@ static int gen_block_map(u64 slba, u32 nlb, __le64 *entries, void *private)
326 327
327 /* Calculate block offset into lun */ 328 /* Calculate block offset into lun */
328 pba = pba - (dev->sec_per_lun * lun_id); 329 pba = pba - (dev->sec_per_lun * lun_id);
329 blk = &lun->vlun.blocks[div_u64(pba, dev->sec_per_blk)]; 330 blk = &lun->blocks[div_u64(pba, dev->sec_per_blk)];
330 331
331 if (!blk->state) { 332 if (!blk->state) {
332 /* at this point, we don't know anything about the 333 /* at this point, we don't know anything about the
@@ -335,7 +336,7 @@ static int gen_block_map(u64 slba, u32 nlb, __le64 *entries, void *private)
335 */ 336 */
336 list_move_tail(&blk->list, &lun->used_list); 337 list_move_tail(&blk->list, &lun->used_list);
337 blk->state = NVM_BLK_ST_TGT; 338 blk->state = NVM_BLK_ST_TGT;
338 lun->vlun.nr_free_blocks--; 339 lun->nr_free_blocks--;
339 } 340 }
340 } 341 }
341 342
@@ -344,7 +345,7 @@ static int gen_block_map(u64 slba, u32 nlb, __le64 *entries, void *private)
344 345
345static int gen_blocks_init(struct nvm_dev *dev, struct gen_dev *gn) 346static int gen_blocks_init(struct nvm_dev *dev, struct gen_dev *gn)
346{ 347{
347 struct gen_lun *lun; 348 struct nvm_lun *lun;
348 struct nvm_block *block; 349 struct nvm_block *block;
349 sector_t lun_iter, blk_iter, cur_block_id = 0; 350 sector_t lun_iter, blk_iter, cur_block_id = 0;
350 int ret, nr_blks; 351 int ret, nr_blks;
@@ -356,19 +357,19 @@ static int gen_blocks_init(struct nvm_dev *dev, struct gen_dev *gn)
356 return -ENOMEM; 357 return -ENOMEM;
357 358
358 gen_for_each_lun(gn, lun, lun_iter) { 359 gen_for_each_lun(gn, lun, lun_iter) {
359 lun->vlun.blocks = vzalloc(sizeof(struct nvm_block) * 360 lun->blocks = vzalloc(sizeof(struct nvm_block) *
360 dev->blks_per_lun); 361 dev->blks_per_lun);
361 if (!lun->vlun.blocks) { 362 if (!lun->blocks) {
362 kfree(blks); 363 kfree(blks);
363 return -ENOMEM; 364 return -ENOMEM;
364 } 365 }
365 366
366 for (blk_iter = 0; blk_iter < dev->blks_per_lun; blk_iter++) { 367 for (blk_iter = 0; blk_iter < dev->blks_per_lun; blk_iter++) {
367 block = &lun->vlun.blocks[blk_iter]; 368 block = &lun->blocks[blk_iter];
368 369
369 INIT_LIST_HEAD(&block->list); 370 INIT_LIST_HEAD(&block->list);
370 371
371 block->lun = &lun->vlun; 372 block->lun = lun;
372 block->id = cur_block_id++; 373 block->id = cur_block_id++;
373 374
374 list_add_tail(&block->list, &lun->free_list); 375 list_add_tail(&block->list, &lun->free_list);
@@ -378,8 +379,8 @@ static int gen_blocks_init(struct nvm_dev *dev, struct gen_dev *gn)
378 struct ppa_addr ppa; 379 struct ppa_addr ppa;
379 380
380 ppa.ppa = 0; 381 ppa.ppa = 0;
381 ppa.g.ch = lun->vlun.chnl_id; 382 ppa.g.ch = lun->chnl_id;
382 ppa.g.lun = lun->vlun.lun_id; 383 ppa.g.lun = lun->lun_id;
383 384
384 ret = nvm_get_bb_tbl(dev, ppa, blks); 385 ret = nvm_get_bb_tbl(dev, ppa, blks);
385 if (ret) 386 if (ret)
@@ -468,41 +469,39 @@ static void gen_unregister(struct nvm_dev *dev)
468} 469}
469 470
470static struct nvm_block *gen_get_blk(struct nvm_dev *dev, 471static struct nvm_block *gen_get_blk(struct nvm_dev *dev,
471 struct nvm_lun *vlun, unsigned long flags) 472 struct nvm_lun *lun, unsigned long flags)
472{ 473{
473 struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun);
474 struct nvm_block *blk = NULL; 474 struct nvm_block *blk = NULL;
475 int is_gc = flags & NVM_IOTYPE_GC; 475 int is_gc = flags & NVM_IOTYPE_GC;
476 476
477 spin_lock(&vlun->lock); 477 spin_lock(&lun->lock);
478 if (list_empty(&lun->free_list)) { 478 if (list_empty(&lun->free_list)) {
479 pr_err_ratelimited("gen: lun %u have no free pages available", 479 pr_err_ratelimited("gen: lun %u have no free pages available",
480 lun->vlun.id); 480 lun->id);
481 goto out; 481 goto out;
482 } 482 }
483 483
484 if (!is_gc && lun->vlun.nr_free_blocks < lun->reserved_blocks) 484 if (!is_gc && lun->nr_free_blocks < lun->reserved_blocks)
485 goto out; 485 goto out;
486 486
487 blk = list_first_entry(&lun->free_list, struct nvm_block, list); 487 blk = list_first_entry(&lun->free_list, struct nvm_block, list);
488 488
489 list_move_tail(&blk->list, &lun->used_list); 489 list_move_tail(&blk->list, &lun->used_list);
490 blk->state = NVM_BLK_ST_TGT; 490 blk->state = NVM_BLK_ST_TGT;
491 lun->vlun.nr_free_blocks--; 491 lun->nr_free_blocks--;
492out: 492out:
493 spin_unlock(&vlun->lock); 493 spin_unlock(&lun->lock);
494 return blk; 494 return blk;
495} 495}
496 496
497static void gen_put_blk(struct nvm_dev *dev, struct nvm_block *blk) 497static void gen_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
498{ 498{
499 struct nvm_lun *vlun = blk->lun; 499 struct nvm_lun *lun = blk->lun;
500 struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun);
501 500
502 spin_lock(&vlun->lock); 501 spin_lock(&lun->lock);
503 if (blk->state & NVM_BLK_ST_TGT) { 502 if (blk->state & NVM_BLK_ST_TGT) {
504 list_move_tail(&blk->list, &lun->free_list); 503 list_move_tail(&blk->list, &lun->free_list);
505 lun->vlun.nr_free_blocks++; 504 lun->nr_free_blocks++;
506 blk->state = NVM_BLK_ST_FREE; 505 blk->state = NVM_BLK_ST_FREE;
507 } else if (blk->state & NVM_BLK_ST_BAD) { 506 } else if (blk->state & NVM_BLK_ST_BAD) {
508 list_move_tail(&blk->list, &lun->bb_list); 507 list_move_tail(&blk->list, &lun->bb_list);
@@ -513,13 +512,13 @@ static void gen_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
513 blk->id, blk->state); 512 blk->id, blk->state);
514 list_move_tail(&blk->list, &lun->bb_list); 513 list_move_tail(&blk->list, &lun->bb_list);
515 } 514 }
516 spin_unlock(&vlun->lock); 515 spin_unlock(&lun->lock);
517} 516}
518 517
519static void gen_mark_blk(struct nvm_dev *dev, struct ppa_addr ppa, int type) 518static void gen_mark_blk(struct nvm_dev *dev, struct ppa_addr ppa, int type)
520{ 519{
521 struct gen_dev *gn = dev->mp; 520 struct gen_dev *gn = dev->mp;
522 struct gen_lun *lun; 521 struct nvm_lun *lun;
523 struct nvm_block *blk; 522 struct nvm_block *blk;
524 523
525 pr_debug("gen: ppa (ch: %u lun: %u blk: %u pg: %u) -> %u\n", 524 pr_debug("gen: ppa (ch: %u lun: %u blk: %u pg: %u) -> %u\n",
@@ -537,7 +536,7 @@ static void gen_mark_blk(struct nvm_dev *dev, struct ppa_addr ppa, int type)
537 } 536 }
538 537
539 lun = &gn->luns[(dev->luns_per_chnl * ppa.g.ch) + ppa.g.lun]; 538 lun = &gn->luns[(dev->luns_per_chnl * ppa.g.ch) + ppa.g.lun];
540 blk = &lun->vlun.blocks[ppa.g.blk]; 539 blk = &lun->blocks[ppa.g.blk];
541 540
542 /* will be moved to bb list on put_blk from target */ 541 /* will be moved to bb list on put_blk from target */
543 blk->state = type; 542 blk->state = type;
@@ -587,23 +586,23 @@ static struct nvm_lun *gen_get_lun(struct nvm_dev *dev, int lunid)
587 if (unlikely(lunid >= dev->nr_luns)) 586 if (unlikely(lunid >= dev->nr_luns))
588 return NULL; 587 return NULL;
589 588
590 return &gn->luns[lunid].vlun; 589 return &gn->luns[lunid];
591} 590}
592 591
593static void gen_lun_info_print(struct nvm_dev *dev) 592static void gen_lun_info_print(struct nvm_dev *dev)
594{ 593{
595 struct gen_dev *gn = dev->mp; 594 struct gen_dev *gn = dev->mp;
596 struct gen_lun *lun; 595 struct nvm_lun *lun;
597 unsigned int i; 596 unsigned int i;
598 597
599 598
600 gen_for_each_lun(gn, lun, i) { 599 gen_for_each_lun(gn, lun, i) {
601 spin_lock(&lun->vlun.lock); 600 spin_lock(&lun->lock);
602 601
603 pr_info("%s: lun%8u\t%u\n", dev->name, i, 602 pr_info("%s: lun%8u\t%u\n", dev->name, i,
604 lun->vlun.nr_free_blocks); 603 lun->nr_free_blocks);
605 604
606 spin_unlock(&lun->vlun.lock); 605 spin_unlock(&lun->lock);
607 } 606 }
608} 607}
609 608
diff --git a/drivers/lightnvm/gennvm.h b/drivers/lightnvm/gennvm.h
index 8ecfa817d21d..d167f391fbae 100644
--- a/drivers/lightnvm/gennvm.h
+++ b/drivers/lightnvm/gennvm.h
@@ -20,25 +20,11 @@
20 20
21#include <linux/lightnvm.h> 21#include <linux/lightnvm.h>
22 22
23struct gen_lun {
24 struct nvm_lun vlun;
25
26 int reserved_blocks;
27 /* lun block lists */
28 struct list_head used_list; /* In-use blocks */
29 struct list_head free_list; /* Not used blocks i.e. released
30 * and ready for use
31 */
32 struct list_head bb_list; /* Bad blocks. Mutually exclusive with
33 * free_list and used_list
34 */
35};
36
37struct gen_dev { 23struct gen_dev {
38 struct nvm_dev *dev; 24 struct nvm_dev *dev;
39 25
40 int nr_luns; 26 int nr_luns;
41 struct gen_lun *luns; 27 struct nvm_lun *luns;
42 struct list_head area_list; 28 struct list_head area_list;
43 29
44 struct mutex lock; 30 struct mutex lock;