summaryrefslogtreecommitdiffstats
path: root/drivers/lightnvm
diff options
context:
space:
mode:
authorJavier González <jg@lightnvm.io>2016-01-12 01:49:33 -0500
committerJens Axboe <axboe@fb.com>2016-01-12 10:21:17 -0500
commitff0e498bfa185fad5e86c4c7a2db4f9648d2344f (patch)
tree52cf32454a651b0f90e81987353dc1ce0fbad5a7 /drivers/lightnvm
parentb5d4acd4cbf5029a2616084d9e9f392046d53a37 (diff)
lightnvm: manage open and closed blocks separately
LightNVM targets need to know the state of the flash block when doing flash optimizations. An example is implementing a write buffer to respect the flash page size. Currently, block state is not accounted for; the media manager only differentiates among free, bad and in-use blocks. This patch adds the logic in the generic media manager to enable targets manage blocks into open and close separately, and it implements such management in rrpc. It also adds a set of flags to describe the state of the block (open, closed, free, bad). In order to avoid taking two locks (nvm_lun and rrpc_lun) consecutively, we introduce lockless get_/put_block primitives so that the open and close list locks and future common logic is handled within the nvm_lun lock. Signed-off-by: Javier González <javier@cnexlabs.com> Signed-off-by: Matias Bjørling <m@bjorling.me> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers/lightnvm')
-rw-r--r--drivers/lightnvm/core.c14
-rw-r--r--drivers/lightnvm/gennvm.c99
-rw-r--r--drivers/lightnvm/rrpc.c38
-rw-r--r--drivers/lightnvm/rrpc.h12
4 files changed, 120 insertions, 43 deletions
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index dc83e010d084..e5e396338319 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -167,6 +167,20 @@ static struct nvm_dev *nvm_find_nvm_dev(const char *name)
167 return NULL; 167 return NULL;
168} 168}
169 169
170struct nvm_block *nvm_get_blk_unlocked(struct nvm_dev *dev, struct nvm_lun *lun,
171 unsigned long flags)
172{
173 return dev->mt->get_blk_unlocked(dev, lun, flags);
174}
175EXPORT_SYMBOL(nvm_get_blk_unlocked);
176
177/* Assumes that all valid pages have already been moved on release to bm */
178void nvm_put_blk_unlocked(struct nvm_dev *dev, struct nvm_block *blk)
179{
180 return dev->mt->put_blk_unlocked(dev, blk);
181}
182EXPORT_SYMBOL(nvm_put_blk_unlocked);
183
170struct nvm_block *nvm_get_blk(struct nvm_dev *dev, struct nvm_lun *lun, 184struct nvm_block *nvm_get_blk(struct nvm_dev *dev, struct nvm_lun *lun,
171 unsigned long flags) 185 unsigned long flags)
172{ 186{
diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
index 4c15846b327f..7fb725b16148 100644
--- a/drivers/lightnvm/gennvm.c
+++ b/drivers/lightnvm/gennvm.c
@@ -60,7 +60,8 @@ static int gennvm_luns_init(struct nvm_dev *dev, struct gen_nvm *gn)
60 lun->vlun.lun_id = i % dev->luns_per_chnl; 60 lun->vlun.lun_id = i % dev->luns_per_chnl;
61 lun->vlun.chnl_id = i / dev->luns_per_chnl; 61 lun->vlun.chnl_id = i / dev->luns_per_chnl;
62 lun->vlun.nr_free_blocks = dev->blks_per_lun; 62 lun->vlun.nr_free_blocks = dev->blks_per_lun;
63 lun->vlun.nr_inuse_blocks = 0; 63 lun->vlun.nr_open_blocks = 0;
64 lun->vlun.nr_closed_blocks = 0;
64 lun->vlun.nr_bad_blocks = 0; 65 lun->vlun.nr_bad_blocks = 0;
65 } 66 }
66 return 0; 67 return 0;
@@ -134,15 +135,15 @@ static int gennvm_block_map(u64 slba, u32 nlb, __le64 *entries, void *private)
134 pba = pba - (dev->sec_per_lun * lun_id); 135 pba = pba - (dev->sec_per_lun * lun_id);
135 blk = &lun->vlun.blocks[div_u64(pba, dev->sec_per_blk)]; 136 blk = &lun->vlun.blocks[div_u64(pba, dev->sec_per_blk)];
136 137
137 if (!blk->type) { 138 if (!blk->state) {
138 /* at this point, we don't know anything about the 139 /* at this point, we don't know anything about the
139 * block. It's up to the FTL on top to re-etablish the 140 * block. It's up to the FTL on top to re-etablish the
140 * block state 141 * block state. The block is assumed to be open.
141 */ 142 */
142 list_move_tail(&blk->list, &lun->used_list); 143 list_move_tail(&blk->list, &lun->used_list);
143 blk->type = 1; 144 blk->state = NVM_BLK_ST_OPEN;
144 lun->vlun.nr_free_blocks--; 145 lun->vlun.nr_free_blocks--;
145 lun->vlun.nr_inuse_blocks++; 146 lun->vlun.nr_open_blocks++;
146 } 147 }
147 } 148 }
148 149
@@ -256,14 +257,14 @@ static void gennvm_unregister(struct nvm_dev *dev)
256 module_put(THIS_MODULE); 257 module_put(THIS_MODULE);
257} 258}
258 259
259static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev, 260static struct nvm_block *gennvm_get_blk_unlocked(struct nvm_dev *dev,
260 struct nvm_lun *vlun, unsigned long flags) 261 struct nvm_lun *vlun, unsigned long flags)
261{ 262{
262 struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun); 263 struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun);
263 struct nvm_block *blk = NULL; 264 struct nvm_block *blk = NULL;
264 int is_gc = flags & NVM_IOTYPE_GC; 265 int is_gc = flags & NVM_IOTYPE_GC;
265 266
266 spin_lock(&vlun->lock); 267 assert_spin_locked(&vlun->lock);
267 268
268 if (list_empty(&lun->free_list)) { 269 if (list_empty(&lun->free_list)) {
269 pr_err_ratelimited("gennvm: lun %u have no free pages available", 270 pr_err_ratelimited("gennvm: lun %u have no free pages available",
@@ -276,44 +277,63 @@ static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev,
276 277
277 blk = list_first_entry(&lun->free_list, struct nvm_block, list); 278 blk = list_first_entry(&lun->free_list, struct nvm_block, list);
278 list_move_tail(&blk->list, &lun->used_list); 279 list_move_tail(&blk->list, &lun->used_list);
279 blk->type = 1; 280 blk->state = NVM_BLK_ST_OPEN;
280 281
281 lun->vlun.nr_free_blocks--; 282 lun->vlun.nr_free_blocks--;
282 lun->vlun.nr_inuse_blocks++; 283 lun->vlun.nr_open_blocks++;
283 284
284out: 285out:
286 return blk;
287}
288
289static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev,
290 struct nvm_lun *vlun, unsigned long flags)
291{
292 struct nvm_block *blk;
293
294 spin_lock(&vlun->lock);
295 blk = gennvm_get_blk_unlocked(dev, vlun, flags);
285 spin_unlock(&vlun->lock); 296 spin_unlock(&vlun->lock);
286 return blk; 297 return blk;
287} 298}
288 299
289static void gennvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk) 300static void gennvm_put_blk_unlocked(struct nvm_dev *dev, struct nvm_block *blk)
290{ 301{
291 struct nvm_lun *vlun = blk->lun; 302 struct nvm_lun *vlun = blk->lun;
292 struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun); 303 struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun);
293 304
294 spin_lock(&vlun->lock); 305 assert_spin_locked(&vlun->lock);
295 306
296 switch (blk->type) { 307 if (blk->state & NVM_BLK_ST_OPEN) {
297 case 1:
298 list_move_tail(&blk->list, &lun->free_list); 308 list_move_tail(&blk->list, &lun->free_list);
309 lun->vlun.nr_open_blocks--;
299 lun->vlun.nr_free_blocks++; 310 lun->vlun.nr_free_blocks++;
300 lun->vlun.nr_inuse_blocks--; 311 blk->state = NVM_BLK_ST_FREE;
301 blk->type = 0; 312 } else if (blk->state & NVM_BLK_ST_CLOSED) {
302 break; 313 list_move_tail(&blk->list, &lun->free_list);
303 case 2: 314 lun->vlun.nr_closed_blocks--;
315 lun->vlun.nr_free_blocks++;
316 blk->state = NVM_BLK_ST_FREE;
317 } else if (blk->state & NVM_BLK_ST_BAD) {
304 list_move_tail(&blk->list, &lun->bb_list); 318 list_move_tail(&blk->list, &lun->bb_list);
305 lun->vlun.nr_bad_blocks++; 319 lun->vlun.nr_bad_blocks++;
306 lun->vlun.nr_inuse_blocks--; 320 blk->state = NVM_BLK_ST_BAD;
307 break; 321 } else {
308 default:
309 WARN_ON_ONCE(1); 322 WARN_ON_ONCE(1);
310 pr_err("gennvm: erroneous block type (%lu -> %u)\n", 323 pr_err("gennvm: erroneous block type (%lu -> %u)\n",
311 blk->id, blk->type); 324 blk->id, blk->state);
312 list_move_tail(&blk->list, &lun->bb_list); 325 list_move_tail(&blk->list, &lun->bb_list);
313 lun->vlun.nr_bad_blocks++; 326 lun->vlun.nr_bad_blocks++;
314 lun->vlun.nr_inuse_blocks--; 327 blk->state = NVM_BLK_ST_BAD;
315 } 328 }
329}
316 330
331static void gennvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
332{
333 struct nvm_lun *vlun = blk->lun;
334
335 spin_lock(&vlun->lock);
336 gennvm_put_blk_unlocked(dev, blk);
317 spin_unlock(&vlun->lock); 337 spin_unlock(&vlun->lock);
318} 338}
319 339
@@ -339,7 +359,7 @@ static void gennvm_blk_set_type(struct nvm_dev *dev, struct ppa_addr *ppa,
339 blk = &lun->vlun.blocks[ppa->g.blk]; 359 blk = &lun->vlun.blocks[ppa->g.blk];
340 360
341 /* will be moved to bb list on put_blk from target */ 361 /* will be moved to bb list on put_blk from target */
342 blk->type = type; 362 blk->state = type;
343} 363}
344 364
345/* mark block bad. It is expected the target recover from the error. */ 365/* mark block bad. It is expected the target recover from the error. */
@@ -358,9 +378,10 @@ static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd)
358 /* look up blocks and mark them as bad */ 378 /* look up blocks and mark them as bad */
359 if (rqd->nr_pages > 1) 379 if (rqd->nr_pages > 1)
360 for (i = 0; i < rqd->nr_pages; i++) 380 for (i = 0; i < rqd->nr_pages; i++)
361 gennvm_blk_set_type(dev, &rqd->ppa_list[i], 2); 381 gennvm_blk_set_type(dev, &rqd->ppa_list[i],
382 NVM_BLK_ST_BAD);
362 else 383 else
363 gennvm_blk_set_type(dev, &rqd->ppa_addr, 2); 384 gennvm_blk_set_type(dev, &rqd->ppa_addr, NVM_BLK_ST_BAD);
364} 385}
365 386
366static void gennvm_end_io(struct nvm_rq *rqd) 387static void gennvm_end_io(struct nvm_rq *rqd)
@@ -416,10 +437,11 @@ static void gennvm_lun_info_print(struct nvm_dev *dev)
416 gennvm_for_each_lun(gn, lun, i) { 437 gennvm_for_each_lun(gn, lun, i) {
417 spin_lock(&lun->vlun.lock); 438 spin_lock(&lun->vlun.lock);
418 439
419 pr_info("%s: lun%8u\t%u\t%u\t%u\n", 440 pr_info("%s: lun%8u\t%u\t%u\t%u\t%u\n",
420 dev->name, i, 441 dev->name, i,
421 lun->vlun.nr_free_blocks, 442 lun->vlun.nr_free_blocks,
422 lun->vlun.nr_inuse_blocks, 443 lun->vlun.nr_open_blocks,
444 lun->vlun.nr_closed_blocks,
423 lun->vlun.nr_bad_blocks); 445 lun->vlun.nr_bad_blocks);
424 446
425 spin_unlock(&lun->vlun.lock); 447 spin_unlock(&lun->vlun.lock);
@@ -427,20 +449,23 @@ static void gennvm_lun_info_print(struct nvm_dev *dev)
427} 449}
428 450
429static struct nvmm_type gennvm = { 451static struct nvmm_type gennvm = {
430 .name = "gennvm", 452 .name = "gennvm",
431 .version = {0, 1, 0}, 453 .version = {0, 1, 0},
454
455 .register_mgr = gennvm_register,
456 .unregister_mgr = gennvm_unregister,
432 457
433 .register_mgr = gennvm_register, 458 .get_blk_unlocked = gennvm_get_blk_unlocked,
434 .unregister_mgr = gennvm_unregister, 459 .put_blk_unlocked = gennvm_put_blk_unlocked,
435 460
436 .get_blk = gennvm_get_blk, 461 .get_blk = gennvm_get_blk,
437 .put_blk = gennvm_put_blk, 462 .put_blk = gennvm_put_blk,
438 463
439 .submit_io = gennvm_submit_io, 464 .submit_io = gennvm_submit_io,
440 .erase_blk = gennvm_erase_blk, 465 .erase_blk = gennvm_erase_blk,
441 466
442 .get_lun = gennvm_get_lun, 467 .get_lun = gennvm_get_lun,
443 .lun_info_print = gennvm_lun_info_print, 468 .lun_info_print = gennvm_lun_info_print,
444}; 469};
445 470
446static int __init gennvm_module_init(void) 471static int __init gennvm_module_init(void)
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index 280350c24cec..d8c75958ced3 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -179,16 +179,23 @@ static void rrpc_set_lun_cur(struct rrpc_lun *rlun, struct rrpc_block *rblk)
179static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun, 179static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
180 unsigned long flags) 180 unsigned long flags)
181{ 181{
182 struct nvm_lun *lun = rlun->parent;
182 struct nvm_block *blk; 183 struct nvm_block *blk;
183 struct rrpc_block *rblk; 184 struct rrpc_block *rblk;
184 185
185 blk = nvm_get_blk(rrpc->dev, rlun->parent, flags); 186 spin_lock(&lun->lock);
186 if (!blk) 187 blk = nvm_get_blk_unlocked(rrpc->dev, rlun->parent, flags);
188 if (!blk) {
189 pr_err("nvm: rrpc: cannot get new block from media manager\n");
190 spin_unlock(&lun->lock);
187 return NULL; 191 return NULL;
192 }
188 193
189 rblk = &rlun->blocks[blk->id]; 194 rblk = &rlun->blocks[blk->id];
190 blk->priv = rblk; 195 list_add_tail(&rblk->list, &rlun->open_list);
196 spin_unlock(&lun->lock);
191 197
198 blk->priv = rblk;
192 bitmap_zero(rblk->invalid_pages, rrpc->dev->pgs_per_blk); 199 bitmap_zero(rblk->invalid_pages, rrpc->dev->pgs_per_blk);
193 rblk->next_page = 0; 200 rblk->next_page = 0;
194 rblk->nr_invalid_pages = 0; 201 rblk->nr_invalid_pages = 0;
@@ -199,7 +206,13 @@ static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
199 206
200static void rrpc_put_blk(struct rrpc *rrpc, struct rrpc_block *rblk) 207static void rrpc_put_blk(struct rrpc *rrpc, struct rrpc_block *rblk)
201{ 208{
202 nvm_put_blk(rrpc->dev, rblk->parent); 209 struct rrpc_lun *rlun = rblk->rlun;
210 struct nvm_lun *lun = rlun->parent;
211
212 spin_lock(&lun->lock);
213 nvm_put_blk_unlocked(rrpc->dev, rblk->parent);
214 list_del(&rblk->list);
215 spin_unlock(&lun->lock);
203} 216}
204 217
205static void rrpc_put_blks(struct rrpc *rrpc) 218static void rrpc_put_blks(struct rrpc *rrpc)
@@ -653,8 +666,20 @@ static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd,
653 lun = rblk->parent->lun; 666 lun = rblk->parent->lun;
654 667
655 cmnt_size = atomic_inc_return(&rblk->data_cmnt_size); 668 cmnt_size = atomic_inc_return(&rblk->data_cmnt_size);
656 if (unlikely(cmnt_size == rrpc->dev->pgs_per_blk)) 669 if (unlikely(cmnt_size == rrpc->dev->pgs_per_blk)) {
670 struct nvm_block *blk = rblk->parent;
671 struct rrpc_lun *rlun = rblk->rlun;
672
673 spin_lock(&lun->lock);
674 lun->nr_open_blocks--;
675 lun->nr_closed_blocks++;
676 blk->state &= ~NVM_BLK_ST_OPEN;
677 blk->state |= NVM_BLK_ST_CLOSED;
678 list_move_tail(&rblk->list, &rlun->closed_list);
679 spin_unlock(&lun->lock);
680
657 rrpc_run_gc(rrpc, rblk); 681 rrpc_run_gc(rrpc, rblk);
682 }
658 } 683 }
659} 684}
660 685
@@ -1134,6 +1159,9 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
1134 rlun->rrpc = rrpc; 1159 rlun->rrpc = rrpc;
1135 rlun->parent = lun; 1160 rlun->parent = lun;
1136 INIT_LIST_HEAD(&rlun->prio_list); 1161 INIT_LIST_HEAD(&rlun->prio_list);
1162 INIT_LIST_HEAD(&rlun->open_list);
1163 INIT_LIST_HEAD(&rlun->closed_list);
1164
1137 INIT_WORK(&rlun->ws_gc, rrpc_lun_gc); 1165 INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
1138 spin_lock_init(&rlun->lock); 1166 spin_lock_init(&rlun->lock);
1139 1167
diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h
index 7c5fa4dd9722..ef13ac7700c8 100644
--- a/drivers/lightnvm/rrpc.h
+++ b/drivers/lightnvm/rrpc.h
@@ -56,6 +56,7 @@ struct rrpc_block {
56 struct nvm_block *parent; 56 struct nvm_block *parent;
57 struct rrpc_lun *rlun; 57 struct rrpc_lun *rlun;
58 struct list_head prio; 58 struct list_head prio;
59 struct list_head list;
59 60
60#define MAX_INVALID_PAGES_STORAGE 8 61#define MAX_INVALID_PAGES_STORAGE 8
61 /* Bitmap for invalid page intries */ 62 /* Bitmap for invalid page intries */
@@ -74,7 +75,16 @@ struct rrpc_lun {
74 struct nvm_lun *parent; 75 struct nvm_lun *parent;
75 struct rrpc_block *cur, *gc_cur; 76 struct rrpc_block *cur, *gc_cur;
76 struct rrpc_block *blocks; /* Reference to block allocation */ 77 struct rrpc_block *blocks; /* Reference to block allocation */
77 struct list_head prio_list; /* Blocks that may be GC'ed */ 78
79 struct list_head prio_list; /* Blocks that may be GC'ed */
80 struct list_head open_list; /* In-use open blocks. These are blocks
81 * that can be both written to and read
82 * from
83 */
84 struct list_head closed_list; /* In-use closed blocks. These are
85 * blocks that can _only_ be read from
86 */
87
78 struct work_struct ws_gc; 88 struct work_struct ws_gc;
79 89
80 spinlock_t lock; 90 spinlock_t lock;