summaryrefslogtreecommitdiffstats
path: root/drivers/lightnvm/gennvm.c
diff options
context:
space:
mode:
authorJavier González <jg@lightnvm.io>2016-01-12 01:49:33 -0500
committerJens Axboe <axboe@fb.com>2016-01-12 10:21:17 -0500
commitff0e498bfa185fad5e86c4c7a2db4f9648d2344f (patch)
tree52cf32454a651b0f90e81987353dc1ce0fbad5a7 /drivers/lightnvm/gennvm.c
parentb5d4acd4cbf5029a2616084d9e9f392046d53a37 (diff)
lightnvm: manage open and closed blocks separately
LightNVM targets need to know the state of the flash block when doing flash optimizations. An example is implementing a write buffer to respect the flash page size. Currently, block state is not accounted for; the media manager only differentiates among free, bad and in-use blocks. This patch adds the logic in the generic media manager to enable targets manage blocks into open and close separately, and it implements such management in rrpc. It also adds a set of flags to describe the state of the block (open, closed, free, bad). In order to avoid taking two locks (nvm_lun and rrpc_lun) consecutively, we introduce lockless get_/put_block primitives so that the open and close list locks and future common logic is handled within the nvm_lun lock. Signed-off-by: Javier González <javier@cnexlabs.com> Signed-off-by: Matias Bjørling <m@bjorling.me> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers/lightnvm/gennvm.c')
-rw-r--r--drivers/lightnvm/gennvm.c99
1 files changed, 62 insertions, 37 deletions
diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
index 4c15846b327f..7fb725b16148 100644
--- a/drivers/lightnvm/gennvm.c
+++ b/drivers/lightnvm/gennvm.c
@@ -60,7 +60,8 @@ static int gennvm_luns_init(struct nvm_dev *dev, struct gen_nvm *gn)
60 lun->vlun.lun_id = i % dev->luns_per_chnl; 60 lun->vlun.lun_id = i % dev->luns_per_chnl;
61 lun->vlun.chnl_id = i / dev->luns_per_chnl; 61 lun->vlun.chnl_id = i / dev->luns_per_chnl;
62 lun->vlun.nr_free_blocks = dev->blks_per_lun; 62 lun->vlun.nr_free_blocks = dev->blks_per_lun;
63 lun->vlun.nr_inuse_blocks = 0; 63 lun->vlun.nr_open_blocks = 0;
64 lun->vlun.nr_closed_blocks = 0;
64 lun->vlun.nr_bad_blocks = 0; 65 lun->vlun.nr_bad_blocks = 0;
65 } 66 }
66 return 0; 67 return 0;
@@ -134,15 +135,15 @@ static int gennvm_block_map(u64 slba, u32 nlb, __le64 *entries, void *private)
134 pba = pba - (dev->sec_per_lun * lun_id); 135 pba = pba - (dev->sec_per_lun * lun_id);
135 blk = &lun->vlun.blocks[div_u64(pba, dev->sec_per_blk)]; 136 blk = &lun->vlun.blocks[div_u64(pba, dev->sec_per_blk)];
136 137
137 if (!blk->type) { 138 if (!blk->state) {
138 /* at this point, we don't know anything about the 139 /* at this point, we don't know anything about the
139 * block. It's up to the FTL on top to re-etablish the 140 * block. It's up to the FTL on top to re-etablish the
140 * block state 141 * block state. The block is assumed to be open.
141 */ 142 */
142 list_move_tail(&blk->list, &lun->used_list); 143 list_move_tail(&blk->list, &lun->used_list);
143 blk->type = 1; 144 blk->state = NVM_BLK_ST_OPEN;
144 lun->vlun.nr_free_blocks--; 145 lun->vlun.nr_free_blocks--;
145 lun->vlun.nr_inuse_blocks++; 146 lun->vlun.nr_open_blocks++;
146 } 147 }
147 } 148 }
148 149
@@ -256,14 +257,14 @@ static void gennvm_unregister(struct nvm_dev *dev)
256 module_put(THIS_MODULE); 257 module_put(THIS_MODULE);
257} 258}
258 259
259static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev, 260static struct nvm_block *gennvm_get_blk_unlocked(struct nvm_dev *dev,
260 struct nvm_lun *vlun, unsigned long flags) 261 struct nvm_lun *vlun, unsigned long flags)
261{ 262{
262 struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun); 263 struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun);
263 struct nvm_block *blk = NULL; 264 struct nvm_block *blk = NULL;
264 int is_gc = flags & NVM_IOTYPE_GC; 265 int is_gc = flags & NVM_IOTYPE_GC;
265 266
266 spin_lock(&vlun->lock); 267 assert_spin_locked(&vlun->lock);
267 268
268 if (list_empty(&lun->free_list)) { 269 if (list_empty(&lun->free_list)) {
269 pr_err_ratelimited("gennvm: lun %u have no free pages available", 270 pr_err_ratelimited("gennvm: lun %u have no free pages available",
@@ -276,44 +277,63 @@ static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev,
276 277
277 blk = list_first_entry(&lun->free_list, struct nvm_block, list); 278 blk = list_first_entry(&lun->free_list, struct nvm_block, list);
278 list_move_tail(&blk->list, &lun->used_list); 279 list_move_tail(&blk->list, &lun->used_list);
279 blk->type = 1; 280 blk->state = NVM_BLK_ST_OPEN;
280 281
281 lun->vlun.nr_free_blocks--; 282 lun->vlun.nr_free_blocks--;
282 lun->vlun.nr_inuse_blocks++; 283 lun->vlun.nr_open_blocks++;
283 284
284out: 285out:
286 return blk;
287}
288
289static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev,
290 struct nvm_lun *vlun, unsigned long flags)
291{
292 struct nvm_block *blk;
293
294 spin_lock(&vlun->lock);
295 blk = gennvm_get_blk_unlocked(dev, vlun, flags);
285 spin_unlock(&vlun->lock); 296 spin_unlock(&vlun->lock);
286 return blk; 297 return blk;
287} 298}
288 299
289static void gennvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk) 300static void gennvm_put_blk_unlocked(struct nvm_dev *dev, struct nvm_block *blk)
290{ 301{
291 struct nvm_lun *vlun = blk->lun; 302 struct nvm_lun *vlun = blk->lun;
292 struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun); 303 struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun);
293 304
294 spin_lock(&vlun->lock); 305 assert_spin_locked(&vlun->lock);
295 306
296 switch (blk->type) { 307 if (blk->state & NVM_BLK_ST_OPEN) {
297 case 1:
298 list_move_tail(&blk->list, &lun->free_list); 308 list_move_tail(&blk->list, &lun->free_list);
309 lun->vlun.nr_open_blocks--;
299 lun->vlun.nr_free_blocks++; 310 lun->vlun.nr_free_blocks++;
300 lun->vlun.nr_inuse_blocks--; 311 blk->state = NVM_BLK_ST_FREE;
301 blk->type = 0; 312 } else if (blk->state & NVM_BLK_ST_CLOSED) {
302 break; 313 list_move_tail(&blk->list, &lun->free_list);
303 case 2: 314 lun->vlun.nr_closed_blocks--;
315 lun->vlun.nr_free_blocks++;
316 blk->state = NVM_BLK_ST_FREE;
317 } else if (blk->state & NVM_BLK_ST_BAD) {
304 list_move_tail(&blk->list, &lun->bb_list); 318 list_move_tail(&blk->list, &lun->bb_list);
305 lun->vlun.nr_bad_blocks++; 319 lun->vlun.nr_bad_blocks++;
306 lun->vlun.nr_inuse_blocks--; 320 blk->state = NVM_BLK_ST_BAD;
307 break; 321 } else {
308 default:
309 WARN_ON_ONCE(1); 322 WARN_ON_ONCE(1);
310 pr_err("gennvm: erroneous block type (%lu -> %u)\n", 323 pr_err("gennvm: erroneous block type (%lu -> %u)\n",
311 blk->id, blk->type); 324 blk->id, blk->state);
312 list_move_tail(&blk->list, &lun->bb_list); 325 list_move_tail(&blk->list, &lun->bb_list);
313 lun->vlun.nr_bad_blocks++; 326 lun->vlun.nr_bad_blocks++;
314 lun->vlun.nr_inuse_blocks--; 327 blk->state = NVM_BLK_ST_BAD;
315 } 328 }
329}
316 330
331static void gennvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
332{
333 struct nvm_lun *vlun = blk->lun;
334
335 spin_lock(&vlun->lock);
336 gennvm_put_blk_unlocked(dev, blk);
317 spin_unlock(&vlun->lock); 337 spin_unlock(&vlun->lock);
318} 338}
319 339
@@ -339,7 +359,7 @@ static void gennvm_blk_set_type(struct nvm_dev *dev, struct ppa_addr *ppa,
339 blk = &lun->vlun.blocks[ppa->g.blk]; 359 blk = &lun->vlun.blocks[ppa->g.blk];
340 360
341 /* will be moved to bb list on put_blk from target */ 361 /* will be moved to bb list on put_blk from target */
342 blk->type = type; 362 blk->state = type;
343} 363}
344 364
345/* mark block bad. It is expected the target recover from the error. */ 365/* mark block bad. It is expected the target recover from the error. */
@@ -358,9 +378,10 @@ static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd)
358 /* look up blocks and mark them as bad */ 378 /* look up blocks and mark them as bad */
359 if (rqd->nr_pages > 1) 379 if (rqd->nr_pages > 1)
360 for (i = 0; i < rqd->nr_pages; i++) 380 for (i = 0; i < rqd->nr_pages; i++)
361 gennvm_blk_set_type(dev, &rqd->ppa_list[i], 2); 381 gennvm_blk_set_type(dev, &rqd->ppa_list[i],
382 NVM_BLK_ST_BAD);
362 else 383 else
363 gennvm_blk_set_type(dev, &rqd->ppa_addr, 2); 384 gennvm_blk_set_type(dev, &rqd->ppa_addr, NVM_BLK_ST_BAD);
364} 385}
365 386
366static void gennvm_end_io(struct nvm_rq *rqd) 387static void gennvm_end_io(struct nvm_rq *rqd)
@@ -416,10 +437,11 @@ static void gennvm_lun_info_print(struct nvm_dev *dev)
416 gennvm_for_each_lun(gn, lun, i) { 437 gennvm_for_each_lun(gn, lun, i) {
417 spin_lock(&lun->vlun.lock); 438 spin_lock(&lun->vlun.lock);
418 439
419 pr_info("%s: lun%8u\t%u\t%u\t%u\n", 440 pr_info("%s: lun%8u\t%u\t%u\t%u\t%u\n",
420 dev->name, i, 441 dev->name, i,
421 lun->vlun.nr_free_blocks, 442 lun->vlun.nr_free_blocks,
422 lun->vlun.nr_inuse_blocks, 443 lun->vlun.nr_open_blocks,
444 lun->vlun.nr_closed_blocks,
423 lun->vlun.nr_bad_blocks); 445 lun->vlun.nr_bad_blocks);
424 446
425 spin_unlock(&lun->vlun.lock); 447 spin_unlock(&lun->vlun.lock);
@@ -427,20 +449,23 @@ static void gennvm_lun_info_print(struct nvm_dev *dev)
427} 449}
428 450
429static struct nvmm_type gennvm = { 451static struct nvmm_type gennvm = {
430 .name = "gennvm", 452 .name = "gennvm",
431 .version = {0, 1, 0}, 453 .version = {0, 1, 0},
454
455 .register_mgr = gennvm_register,
456 .unregister_mgr = gennvm_unregister,
432 457
433 .register_mgr = gennvm_register, 458 .get_blk_unlocked = gennvm_get_blk_unlocked,
434 .unregister_mgr = gennvm_unregister, 459 .put_blk_unlocked = gennvm_put_blk_unlocked,
435 460
436 .get_blk = gennvm_get_blk, 461 .get_blk = gennvm_get_blk,
437 .put_blk = gennvm_put_blk, 462 .put_blk = gennvm_put_blk,
438 463
439 .submit_io = gennvm_submit_io, 464 .submit_io = gennvm_submit_io,
440 .erase_blk = gennvm_erase_blk, 465 .erase_blk = gennvm_erase_blk,
441 466
442 .get_lun = gennvm_get_lun, 467 .get_lun = gennvm_get_lun,
443 .lun_info_print = gennvm_lun_info_print, 468 .lun_info_print = gennvm_lun_info_print,
444}; 469};
445 470
446static int __init gennvm_module_init(void) 471static int __init gennvm_module_init(void)