aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWenwei Tao <ww.tao0320@gmail.com>2016-01-12 01:49:25 -0500
committerJens Axboe <axboe@fb.com>2016-01-12 10:21:16 -0500
commitb262924be03d5d2ae735bc9a4b37eb2c613f61f8 (patch)
treed9128e64b6cd635084725a75d295c8aeb6ed6a35
parentd0ca798f960ad7d86f5186fe312c131d00563eb7 (diff)
lightnvm: fix locking and mempool in rrpc_lun_gc
This patch fix two issues in rrpc_lun_gc 1. prio_list is protected by rrpc_lun's lock not nvm_lun's, so acquire rlun's lock instead of lun's before operate on the list. 2. we delete block from prio_list before allocating gcb, but gcb allocation may fail, we end without putting it back to the list, this makes the block won't get reclaimed in the future. To solve this issue, delete block after gcb allocation. Signed-off-by: Wenwei Tao <ww.tao0320@gmail.com> Signed-off-by: Matias Bjørling <m@bjorling.me> Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r--drivers/lightnvm/rrpc.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index bee2352fcce1..745acd9db523 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -445,7 +445,7 @@ static void rrpc_lun_gc(struct work_struct *work)
445 if (nr_blocks_need < rrpc->nr_luns) 445 if (nr_blocks_need < rrpc->nr_luns)
446 nr_blocks_need = rrpc->nr_luns; 446 nr_blocks_need = rrpc->nr_luns;
447 447
448 spin_lock(&lun->lock); 448 spin_lock(&rlun->lock);
449 while (nr_blocks_need > lun->nr_free_blocks && 449 while (nr_blocks_need > lun->nr_free_blocks &&
450 !list_empty(&rlun->prio_list)) { 450 !list_empty(&rlun->prio_list)) {
451 struct rrpc_block *rblock = block_prio_find_max(rlun); 451 struct rrpc_block *rblock = block_prio_find_max(rlun);
@@ -454,16 +454,16 @@ static void rrpc_lun_gc(struct work_struct *work)
454 if (!rblock->nr_invalid_pages) 454 if (!rblock->nr_invalid_pages)
455 break; 455 break;
456 456
457 gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
458 if (!gcb)
459 break;
460
457 list_del_init(&rblock->prio); 461 list_del_init(&rblock->prio);
458 462
459 BUG_ON(!block_is_full(rrpc, rblock)); 463 BUG_ON(!block_is_full(rrpc, rblock));
460 464
461 pr_debug("rrpc: selected block '%lu' for GC\n", block->id); 465 pr_debug("rrpc: selected block '%lu' for GC\n", block->id);
462 466
463 gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
464 if (!gcb)
465 break;
466
467 gcb->rrpc = rrpc; 467 gcb->rrpc = rrpc;
468 gcb->rblk = rblock; 468 gcb->rblk = rblock;
469 INIT_WORK(&gcb->ws_gc, rrpc_block_gc); 469 INIT_WORK(&gcb->ws_gc, rrpc_block_gc);
@@ -472,7 +472,7 @@ static void rrpc_lun_gc(struct work_struct *work)
472 472
473 nr_blocks_need--; 473 nr_blocks_need--;
474 } 474 }
475 spin_unlock(&lun->lock); 475 spin_unlock(&rlun->lock);
476 476
477 /* TODO: Hint that request queue can be started again */ 477 /* TODO: Hint that request queue can be started again */
478} 478}