aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorMatias Bjørling <m@bjorling.me>2016-07-07 03:54:15 -0400
committerJens Axboe <axboe@fb.com>2016-07-07 10:51:52 -0400
commit5e60edb7dcedd6bdcf946ba765f51e8d363d65a8 (patch)
treeece79aec7846bd55953168a0f0da90603ccc1088 /drivers
parent077d2389994197277f4f7662b13d11b2f67646a7 (diff)
lightnvm: rename gennvm and update description
The generic manager should be called the general media manager, and instead of using the rather long name of "gennvm" in front of each data structures, use "gen" instead to shorten it. Update the description of the media manager as well to make the media manager purpose clearer. Signed-off-by: Matias Bjørling <m@bjorling.me> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/lightnvm/Kconfig10
-rw-r--r--drivers/lightnvm/gennvm.c184
-rw-r--r--drivers/lightnvm/gennvm.h7
3 files changed, 102 insertions, 99 deletions
diff --git a/drivers/lightnvm/Kconfig b/drivers/lightnvm/Kconfig
index 85a339030e4b..61c68a1f054a 100644
--- a/drivers/lightnvm/Kconfig
+++ b/drivers/lightnvm/Kconfig
@@ -27,11 +27,13 @@ config NVM_DEBUG
27 It is required to create/remove targets without IOCTLs. 27 It is required to create/remove targets without IOCTLs.
28 28
29config NVM_GENNVM 29config NVM_GENNVM
30 tristate "Generic NVM manager for Open-Channel SSDs" 30 tristate "General Non-Volatile Memory Manager for Open-Channel SSDs"
31 ---help--- 31 ---help---
32 NVM media manager for Open-Channel SSDs that offload management 32 Non-volatile memory media manager for Open-Channel SSDs that implements
33 functionality to device, while keeping data placement and garbage 33 physical media metadata management and block provisioning API.
34 collection decisions on the host. 34
35 This is the standard media manager for using Open-Channel SSDs, and
36 required for targets to be instantiated.
35 37
36config NVM_RRPC 38config NVM_RRPC
37 tristate "Round-robin Hybrid Open-Channel SSD target" 39 tristate "Round-robin Hybrid Open-Channel SSD target"
diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
index 1a3e1649346b..3d2762f2a6fe 100644
--- a/drivers/lightnvm/gennvm.c
+++ b/drivers/lightnvm/gennvm.c
@@ -15,22 +15,22 @@
15 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, 15 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
16 * USA. 16 * USA.
17 * 17 *
18 * Implementation of a generic nvm manager for Open-Channel SSDs. 18 * Implementation of a general nvm manager for Open-Channel SSDs.
19 */ 19 */
20 20
21#include "gennvm.h" 21#include "gennvm.h"
22 22
23static int gennvm_get_area(struct nvm_dev *dev, sector_t *lba, sector_t len) 23static int gen_get_area(struct nvm_dev *dev, sector_t *lba, sector_t len)
24{ 24{
25 struct gen_nvm *gn = dev->mp; 25 struct gen_dev *gn = dev->mp;
26 struct gennvm_area *area, *prev, *next; 26 struct gen_area *area, *prev, *next;
27 sector_t begin = 0; 27 sector_t begin = 0;
28 sector_t max_sectors = (dev->sec_size * dev->total_secs) >> 9; 28 sector_t max_sectors = (dev->sec_size * dev->total_secs) >> 9;
29 29
30 if (len > max_sectors) 30 if (len > max_sectors)
31 return -EINVAL; 31 return -EINVAL;
32 32
33 area = kmalloc(sizeof(struct gennvm_area), GFP_KERNEL); 33 area = kmalloc(sizeof(struct gen_area), GFP_KERNEL);
34 if (!area) 34 if (!area)
35 return -ENOMEM; 35 return -ENOMEM;
36 36
@@ -64,10 +64,10 @@ static int gennvm_get_area(struct nvm_dev *dev, sector_t *lba, sector_t len)
64 return 0; 64 return 0;
65} 65}
66 66
67static void gennvm_put_area(struct nvm_dev *dev, sector_t begin) 67static void gen_put_area(struct nvm_dev *dev, sector_t begin)
68{ 68{
69 struct gen_nvm *gn = dev->mp; 69 struct gen_dev *gn = dev->mp;
70 struct gennvm_area *area; 70 struct gen_area *area;
71 71
72 spin_lock(&dev->lock); 72 spin_lock(&dev->lock);
73 list_for_each_entry(area, &gn->area_list, list) { 73 list_for_each_entry(area, &gn->area_list, list) {
@@ -82,27 +82,27 @@ static void gennvm_put_area(struct nvm_dev *dev, sector_t begin)
82 spin_unlock(&dev->lock); 82 spin_unlock(&dev->lock);
83} 83}
84 84
85static void gennvm_blocks_free(struct nvm_dev *dev) 85static void gen_blocks_free(struct nvm_dev *dev)
86{ 86{
87 struct gen_nvm *gn = dev->mp; 87 struct gen_dev *gn = dev->mp;
88 struct gen_lun *lun; 88 struct gen_lun *lun;
89 int i; 89 int i;
90 90
91 gennvm_for_each_lun(gn, lun, i) { 91 gen_for_each_lun(gn, lun, i) {
92 if (!lun->vlun.blocks) 92 if (!lun->vlun.blocks)
93 break; 93 break;
94 vfree(lun->vlun.blocks); 94 vfree(lun->vlun.blocks);
95 } 95 }
96} 96}
97 97
98static void gennvm_luns_free(struct nvm_dev *dev) 98static void gen_luns_free(struct nvm_dev *dev)
99{ 99{
100 struct gen_nvm *gn = dev->mp; 100 struct gen_dev *gn = dev->mp;
101 101
102 kfree(gn->luns); 102 kfree(gn->luns);
103} 103}
104 104
105static int gennvm_luns_init(struct nvm_dev *dev, struct gen_nvm *gn) 105static int gen_luns_init(struct nvm_dev *dev, struct gen_dev *gn)
106{ 106{
107 struct gen_lun *lun; 107 struct gen_lun *lun;
108 int i; 108 int i;
@@ -111,7 +111,7 @@ static int gennvm_luns_init(struct nvm_dev *dev, struct gen_nvm *gn)
111 if (!gn->luns) 111 if (!gn->luns)
112 return -ENOMEM; 112 return -ENOMEM;
113 113
114 gennvm_for_each_lun(gn, lun, i) { 114 gen_for_each_lun(gn, lun, i) {
115 spin_lock_init(&lun->vlun.lock); 115 spin_lock_init(&lun->vlun.lock);
116 INIT_LIST_HEAD(&lun->free_list); 116 INIT_LIST_HEAD(&lun->free_list);
117 INIT_LIST_HEAD(&lun->used_list); 117 INIT_LIST_HEAD(&lun->used_list);
@@ -126,7 +126,7 @@ static int gennvm_luns_init(struct nvm_dev *dev, struct gen_nvm *gn)
126 return 0; 126 return 0;
127} 127}
128 128
129static int gennvm_block_bb(struct gen_nvm *gn, struct ppa_addr ppa, 129static int gen_block_bb(struct gen_dev *gn, struct ppa_addr ppa,
130 u8 *blks, int nr_blks) 130 u8 *blks, int nr_blks)
131{ 131{
132 struct nvm_dev *dev = gn->dev; 132 struct nvm_dev *dev = gn->dev;
@@ -152,10 +152,10 @@ static int gennvm_block_bb(struct gen_nvm *gn, struct ppa_addr ppa,
152 return 0; 152 return 0;
153} 153}
154 154
155static int gennvm_block_map(u64 slba, u32 nlb, __le64 *entries, void *private) 155static int gen_block_map(u64 slba, u32 nlb, __le64 *entries, void *private)
156{ 156{
157 struct nvm_dev *dev = private; 157 struct nvm_dev *dev = private;
158 struct gen_nvm *gn = dev->mp; 158 struct gen_dev *gn = dev->mp;
159 u64 elba = slba + nlb; 159 u64 elba = slba + nlb;
160 struct gen_lun *lun; 160 struct gen_lun *lun;
161 struct nvm_block *blk; 161 struct nvm_block *blk;
@@ -163,7 +163,7 @@ static int gennvm_block_map(u64 slba, u32 nlb, __le64 *entries, void *private)
163 int lun_id; 163 int lun_id;
164 164
165 if (unlikely(elba > dev->total_secs)) { 165 if (unlikely(elba > dev->total_secs)) {
166 pr_err("gennvm: L2P data from device is out of bounds!\n"); 166 pr_err("gen: L2P data from device is out of bounds!\n");
167 return -EINVAL; 167 return -EINVAL;
168 } 168 }
169 169
@@ -171,7 +171,7 @@ static int gennvm_block_map(u64 slba, u32 nlb, __le64 *entries, void *private)
171 u64 pba = le64_to_cpu(entries[i]); 171 u64 pba = le64_to_cpu(entries[i]);
172 172
173 if (unlikely(pba >= dev->total_secs && pba != U64_MAX)) { 173 if (unlikely(pba >= dev->total_secs && pba != U64_MAX)) {
174 pr_err("gennvm: L2P data entry is out of bounds!\n"); 174 pr_err("gen: L2P data entry is out of bounds!\n");
175 return -EINVAL; 175 return -EINVAL;
176 } 176 }
177 177
@@ -204,7 +204,7 @@ static int gennvm_block_map(u64 slba, u32 nlb, __le64 *entries, void *private)
204 return 0; 204 return 0;
205} 205}
206 206
207static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn) 207static int gen_blocks_init(struct nvm_dev *dev, struct gen_dev *gn)
208{ 208{
209 struct gen_lun *lun; 209 struct gen_lun *lun;
210 struct nvm_block *block; 210 struct nvm_block *block;
@@ -217,7 +217,7 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn)
217 if (!blks) 217 if (!blks)
218 return -ENOMEM; 218 return -ENOMEM;
219 219
220 gennvm_for_each_lun(gn, lun, lun_iter) { 220 gen_for_each_lun(gn, lun, lun_iter) {
221 lun->vlun.blocks = vzalloc(sizeof(struct nvm_block) * 221 lun->vlun.blocks = vzalloc(sizeof(struct nvm_block) *
222 dev->blks_per_lun); 222 dev->blks_per_lun);
223 if (!lun->vlun.blocks) { 223 if (!lun->vlun.blocks) {
@@ -251,20 +251,20 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn)
251 251
252 ret = nvm_get_bb_tbl(dev, ppa, blks); 252 ret = nvm_get_bb_tbl(dev, ppa, blks);
253 if (ret) 253 if (ret)
254 pr_err("gennvm: could not get BB table\n"); 254 pr_err("gen: could not get BB table\n");
255 255
256 ret = gennvm_block_bb(gn, ppa, blks, nr_blks); 256 ret = gen_block_bb(gn, ppa, blks, nr_blks);
257 if (ret) 257 if (ret)
258 pr_err("gennvm: BB table map failed\n"); 258 pr_err("gen: BB table map failed\n");
259 } 259 }
260 } 260 }
261 261
262 if ((dev->identity.dom & NVM_RSP_L2P) && dev->ops->get_l2p_tbl) { 262 if ((dev->identity.dom & NVM_RSP_L2P) && dev->ops->get_l2p_tbl) {
263 ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_secs, 263 ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_secs,
264 gennvm_block_map, dev); 264 gen_block_map, dev);
265 if (ret) { 265 if (ret) {
266 pr_err("gennvm: could not read L2P table.\n"); 266 pr_err("gen: could not read L2P table.\n");
267 pr_warn("gennvm: default block initialization"); 267 pr_warn("gen: default block initialization");
268 } 268 }
269 } 269 }
270 270
@@ -272,23 +272,23 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn)
272 return 0; 272 return 0;
273} 273}
274 274
275static void gennvm_free(struct nvm_dev *dev) 275static void gen_free(struct nvm_dev *dev)
276{ 276{
277 gennvm_blocks_free(dev); 277 gen_blocks_free(dev);
278 gennvm_luns_free(dev); 278 gen_luns_free(dev);
279 kfree(dev->mp); 279 kfree(dev->mp);
280 dev->mp = NULL; 280 dev->mp = NULL;
281} 281}
282 282
283static int gennvm_register(struct nvm_dev *dev) 283static int gen_register(struct nvm_dev *dev)
284{ 284{
285 struct gen_nvm *gn; 285 struct gen_dev *gn;
286 int ret; 286 int ret;
287 287
288 if (!try_module_get(THIS_MODULE)) 288 if (!try_module_get(THIS_MODULE))
289 return -ENODEV; 289 return -ENODEV;
290 290
291 gn = kzalloc(sizeof(struct gen_nvm), GFP_KERNEL); 291 gn = kzalloc(sizeof(struct gen_dev), GFP_KERNEL);
292 if (!gn) 292 if (!gn)
293 return -ENOMEM; 293 return -ENOMEM;
294 294
@@ -297,32 +297,32 @@ static int gennvm_register(struct nvm_dev *dev)
297 INIT_LIST_HEAD(&gn->area_list); 297 INIT_LIST_HEAD(&gn->area_list);
298 dev->mp = gn; 298 dev->mp = gn;
299 299
300 ret = gennvm_luns_init(dev, gn); 300 ret = gen_luns_init(dev, gn);
301 if (ret) { 301 if (ret) {
302 pr_err("gennvm: could not initialize luns\n"); 302 pr_err("gen: could not initialize luns\n");
303 goto err; 303 goto err;
304 } 304 }
305 305
306 ret = gennvm_blocks_init(dev, gn); 306 ret = gen_blocks_init(dev, gn);
307 if (ret) { 307 if (ret) {
308 pr_err("gennvm: could not initialize blocks\n"); 308 pr_err("gen: could not initialize blocks\n");
309 goto err; 309 goto err;
310 } 310 }
311 311
312 return 1; 312 return 1;
313err: 313err:
314 gennvm_free(dev); 314 gen_free(dev);
315 module_put(THIS_MODULE); 315 module_put(THIS_MODULE);
316 return ret; 316 return ret;
317} 317}
318 318
319static void gennvm_unregister(struct nvm_dev *dev) 319static void gen_unregister(struct nvm_dev *dev)
320{ 320{
321 gennvm_free(dev); 321 gen_free(dev);
322 module_put(THIS_MODULE); 322 module_put(THIS_MODULE);
323} 323}
324 324
325static struct nvm_block *gennvm_get_blk_unlocked(struct nvm_dev *dev, 325static struct nvm_block *gen_get_blk_unlocked(struct nvm_dev *dev,
326 struct nvm_lun *vlun, unsigned long flags) 326 struct nvm_lun *vlun, unsigned long flags)
327{ 327{
328 struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun); 328 struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun);
@@ -332,7 +332,7 @@ static struct nvm_block *gennvm_get_blk_unlocked(struct nvm_dev *dev,
332 assert_spin_locked(&vlun->lock); 332 assert_spin_locked(&vlun->lock);
333 333
334 if (list_empty(&lun->free_list)) { 334 if (list_empty(&lun->free_list)) {
335 pr_err_ratelimited("gennvm: lun %u have no free pages available", 335 pr_err_ratelimited("gen: lun %u have no free pages available",
336 lun->vlun.id); 336 lun->vlun.id);
337 goto out; 337 goto out;
338 } 338 }
@@ -350,18 +350,18 @@ out:
350 return blk; 350 return blk;
351} 351}
352 352
353static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev, 353static struct nvm_block *gen_get_blk(struct nvm_dev *dev,
354 struct nvm_lun *vlun, unsigned long flags) 354 struct nvm_lun *vlun, unsigned long flags)
355{ 355{
356 struct nvm_block *blk; 356 struct nvm_block *blk;
357 357
358 spin_lock(&vlun->lock); 358 spin_lock(&vlun->lock);
359 blk = gennvm_get_blk_unlocked(dev, vlun, flags); 359 blk = gen_get_blk_unlocked(dev, vlun, flags);
360 spin_unlock(&vlun->lock); 360 spin_unlock(&vlun->lock);
361 return blk; 361 return blk;
362} 362}
363 363
364static void gennvm_put_blk_unlocked(struct nvm_dev *dev, struct nvm_block *blk) 364static void gen_put_blk_unlocked(struct nvm_dev *dev, struct nvm_block *blk)
365{ 365{
366 struct nvm_lun *vlun = blk->lun; 366 struct nvm_lun *vlun = blk->lun;
367 struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun); 367 struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun);
@@ -377,35 +377,35 @@ static void gennvm_put_blk_unlocked(struct nvm_dev *dev, struct nvm_block *blk)
377 blk->state = NVM_BLK_ST_BAD; 377 blk->state = NVM_BLK_ST_BAD;
378 } else { 378 } else {
379 WARN_ON_ONCE(1); 379 WARN_ON_ONCE(1);
380 pr_err("gennvm: erroneous block type (%lu -> %u)\n", 380 pr_err("gen: erroneous block type (%lu -> %u)\n",
381 blk->id, blk->state); 381 blk->id, blk->state);
382 list_move_tail(&blk->list, &lun->bb_list); 382 list_move_tail(&blk->list, &lun->bb_list);
383 } 383 }
384} 384}
385 385
386static void gennvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk) 386static void gen_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
387{ 387{
388 struct nvm_lun *vlun = blk->lun; 388 struct nvm_lun *vlun = blk->lun;
389 389
390 spin_lock(&vlun->lock); 390 spin_lock(&vlun->lock);
391 gennvm_put_blk_unlocked(dev, blk); 391 gen_put_blk_unlocked(dev, blk);
392 spin_unlock(&vlun->lock); 392 spin_unlock(&vlun->lock);
393} 393}
394 394
395static void gennvm_mark_blk(struct nvm_dev *dev, struct ppa_addr ppa, int type) 395static void gen_mark_blk(struct nvm_dev *dev, struct ppa_addr ppa, int type)
396{ 396{
397 struct gen_nvm *gn = dev->mp; 397 struct gen_dev *gn = dev->mp;
398 struct gen_lun *lun; 398 struct gen_lun *lun;
399 struct nvm_block *blk; 399 struct nvm_block *blk;
400 400
401 pr_debug("gennvm: ppa (ch: %u lun: %u blk: %u pg: %u) -> %u\n", 401 pr_debug("gen: ppa (ch: %u lun: %u blk: %u pg: %u) -> %u\n",
402 ppa.g.ch, ppa.g.lun, ppa.g.blk, ppa.g.pg, type); 402 ppa.g.ch, ppa.g.lun, ppa.g.blk, ppa.g.pg, type);
403 403
404 if (unlikely(ppa.g.ch > dev->nr_chnls || 404 if (unlikely(ppa.g.ch > dev->nr_chnls ||
405 ppa.g.lun > dev->luns_per_chnl || 405 ppa.g.lun > dev->luns_per_chnl ||
406 ppa.g.blk > dev->blks_per_lun)) { 406 ppa.g.blk > dev->blks_per_lun)) {
407 WARN_ON_ONCE(1); 407 WARN_ON_ONCE(1);
408 pr_err("gennvm: ppa broken (ch: %u > %u lun: %u > %u blk: %u > %u", 408 pr_err("gen: ppa broken (ch: %u > %u lun: %u > %u blk: %u > %u",
409 ppa.g.ch, dev->nr_chnls, 409 ppa.g.ch, dev->nr_chnls,
410 ppa.g.lun, dev->luns_per_chnl, 410 ppa.g.lun, dev->luns_per_chnl,
411 ppa.g.blk, dev->blks_per_lun); 411 ppa.g.blk, dev->blks_per_lun);
@@ -420,9 +420,9 @@ static void gennvm_mark_blk(struct nvm_dev *dev, struct ppa_addr ppa, int type)
420} 420}
421 421
422/* 422/*
423 * mark block bad in gennvm. It is expected that the target recovers separately 423 * mark block bad in gen. It is expected that the target recovers separately
424 */ 424 */
425static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd) 425static void gen_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd)
426{ 426{
427 int bit = -1; 427 int bit = -1;
428 int max_secs = dev->ops->max_phys_sect; 428 int max_secs = dev->ops->max_phys_sect;
@@ -432,25 +432,25 @@ static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd)
432 432
433 /* look up blocks and mark them as bad */ 433 /* look up blocks and mark them as bad */
434 if (rqd->nr_ppas == 1) { 434 if (rqd->nr_ppas == 1) {
435 gennvm_mark_blk(dev, rqd->ppa_addr, NVM_BLK_ST_BAD); 435 gen_mark_blk(dev, rqd->ppa_addr, NVM_BLK_ST_BAD);
436 return; 436 return;
437 } 437 }
438 438
439 while ((bit = find_next_bit(comp_bits, max_secs, bit + 1)) < max_secs) 439 while ((bit = find_next_bit(comp_bits, max_secs, bit + 1)) < max_secs)
440 gennvm_mark_blk(dev, rqd->ppa_list[bit], NVM_BLK_ST_BAD); 440 gen_mark_blk(dev, rqd->ppa_list[bit], NVM_BLK_ST_BAD);
441} 441}
442 442
443static void gennvm_end_io(struct nvm_rq *rqd) 443static void gen_end_io(struct nvm_rq *rqd)
444{ 444{
445 struct nvm_tgt_instance *ins = rqd->ins; 445 struct nvm_tgt_instance *ins = rqd->ins;
446 446
447 if (rqd->error == NVM_RSP_ERR_FAILWRITE) 447 if (rqd->error == NVM_RSP_ERR_FAILWRITE)
448 gennvm_mark_blk_bad(rqd->dev, rqd); 448 gen_mark_blk_bad(rqd->dev, rqd);
449 449
450 ins->tt->end_io(rqd); 450 ins->tt->end_io(rqd);
451} 451}
452 452
453static int gennvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd) 453static int gen_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
454{ 454{
455 if (!dev->ops->submit_io) 455 if (!dev->ops->submit_io)
456 return -ENODEV; 456 return -ENODEV;
@@ -459,11 +459,11 @@ static int gennvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
459 nvm_generic_to_addr_mode(dev, rqd); 459 nvm_generic_to_addr_mode(dev, rqd);
460 460
461 rqd->dev = dev; 461 rqd->dev = dev;
462 rqd->end_io = gennvm_end_io; 462 rqd->end_io = gen_end_io;
463 return dev->ops->submit_io(dev, rqd); 463 return dev->ops->submit_io(dev, rqd);
464} 464}
465 465
466static int gennvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk, 466static int gen_erase_blk(struct nvm_dev *dev, struct nvm_block *blk,
467 unsigned long flags) 467 unsigned long flags)
468{ 468{
469 struct ppa_addr addr = block_to_ppa(dev, blk); 469 struct ppa_addr addr = block_to_ppa(dev, blk);
@@ -471,19 +471,19 @@ static int gennvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk,
471 return nvm_erase_ppa(dev, &addr, 1); 471 return nvm_erase_ppa(dev, &addr, 1);
472} 472}
473 473
474static int gennvm_reserve_lun(struct nvm_dev *dev, int lunid) 474static int gen_reserve_lun(struct nvm_dev *dev, int lunid)
475{ 475{
476 return test_and_set_bit(lunid, dev->lun_map); 476 return test_and_set_bit(lunid, dev->lun_map);
477} 477}
478 478
479static void gennvm_release_lun(struct nvm_dev *dev, int lunid) 479static void gen_release_lun(struct nvm_dev *dev, int lunid)
480{ 480{
481 WARN_ON(!test_and_clear_bit(lunid, dev->lun_map)); 481 WARN_ON(!test_and_clear_bit(lunid, dev->lun_map));
482} 482}
483 483
484static struct nvm_lun *gennvm_get_lun(struct nvm_dev *dev, int lunid) 484static struct nvm_lun *gen_get_lun(struct nvm_dev *dev, int lunid)
485{ 485{
486 struct gen_nvm *gn = dev->mp; 486 struct gen_dev *gn = dev->mp;
487 487
488 if (unlikely(lunid >= dev->nr_luns)) 488 if (unlikely(lunid >= dev->nr_luns))
489 return NULL; 489 return NULL;
@@ -491,14 +491,14 @@ static struct nvm_lun *gennvm_get_lun(struct nvm_dev *dev, int lunid)
491 return &gn->luns[lunid].vlun; 491 return &gn->luns[lunid].vlun;
492} 492}
493 493
494static void gennvm_lun_info_print(struct nvm_dev *dev) 494static void gen_lun_info_print(struct nvm_dev *dev)
495{ 495{
496 struct gen_nvm *gn = dev->mp; 496 struct gen_dev *gn = dev->mp;
497 struct gen_lun *lun; 497 struct gen_lun *lun;
498 unsigned int i; 498 unsigned int i;
499 499
500 500
501 gennvm_for_each_lun(gn, lun, i) { 501 gen_for_each_lun(gn, lun, i) {
502 spin_lock(&lun->vlun.lock); 502 spin_lock(&lun->vlun.lock);
503 503
504 pr_info("%s: lun%8u\t%u\n", dev->name, i, 504 pr_info("%s: lun%8u\t%u\n", dev->name, i,
@@ -508,45 +508,45 @@ static void gennvm_lun_info_print(struct nvm_dev *dev)
508 } 508 }
509} 509}
510 510
511static struct nvmm_type gennvm = { 511static struct nvmm_type gen = {
512 .name = "gennvm", 512 .name = "gennvm",
513 .version = {0, 1, 0}, 513 .version = {0, 1, 0},
514 514
515 .register_mgr = gennvm_register, 515 .register_mgr = gen_register,
516 .unregister_mgr = gennvm_unregister, 516 .unregister_mgr = gen_unregister,
517 517
518 .get_blk_unlocked = gennvm_get_blk_unlocked, 518 .get_blk_unlocked = gen_get_blk_unlocked,
519 .put_blk_unlocked = gennvm_put_blk_unlocked, 519 .put_blk_unlocked = gen_put_blk_unlocked,
520 520
521 .get_blk = gennvm_get_blk, 521 .get_blk = gen_get_blk,
522 .put_blk = gennvm_put_blk, 522 .put_blk = gen_put_blk,
523 523
524 .submit_io = gennvm_submit_io, 524 .submit_io = gen_submit_io,
525 .erase_blk = gennvm_erase_blk, 525 .erase_blk = gen_erase_blk,
526 526
527 .mark_blk = gennvm_mark_blk, 527 .mark_blk = gen_mark_blk,
528 528
529 .get_lun = gennvm_get_lun, 529 .get_lun = gen_get_lun,
530 .reserve_lun = gennvm_reserve_lun, 530 .reserve_lun = gen_reserve_lun,
531 .release_lun = gennvm_release_lun, 531 .release_lun = gen_release_lun,
532 .lun_info_print = gennvm_lun_info_print, 532 .lun_info_print = gen_lun_info_print,
533 533
534 .get_area = gennvm_get_area, 534 .get_area = gen_get_area,
535 .put_area = gennvm_put_area, 535 .put_area = gen_put_area,
536 536
537}; 537};
538 538
539static int __init gennvm_module_init(void) 539static int __init gen_module_init(void)
540{ 540{
541 return nvm_register_mgr(&gennvm); 541 return nvm_register_mgr(&gen);
542} 542}
543 543
544static void gennvm_module_exit(void) 544static void gen_module_exit(void)
545{ 545{
546 nvm_unregister_mgr(&gennvm); 546 nvm_unregister_mgr(&gen);
547} 547}
548 548
549module_init(gennvm_module_init); 549module_init(gen_module_init);
550module_exit(gennvm_module_exit); 550module_exit(gen_module_exit);
551MODULE_LICENSE("GPL v2"); 551MODULE_LICENSE("GPL v2");
552MODULE_DESCRIPTION("Generic media manager for Open-Channel SSDs"); 552MODULE_DESCRIPTION("General media manager for Open-Channel SSDs");
diff --git a/drivers/lightnvm/gennvm.h b/drivers/lightnvm/gennvm.h
index 04d7c23cfc61..bf0621963c7c 100644
--- a/drivers/lightnvm/gennvm.h
+++ b/drivers/lightnvm/gennvm.h
@@ -34,7 +34,7 @@ struct gen_lun {
34 */ 34 */
35}; 35};
36 36
37struct gen_nvm { 37struct gen_dev {
38 struct nvm_dev *dev; 38 struct nvm_dev *dev;
39 39
40 int nr_luns; 40 int nr_luns;
@@ -42,12 +42,13 @@ struct gen_nvm {
42 struct list_head area_list; 42 struct list_head area_list;
43}; 43};
44 44
45struct gennvm_area { 45struct gen_area {
46 struct list_head list; 46 struct list_head list;
47 sector_t begin; 47 sector_t begin;
48 sector_t end; /* end is excluded */ 48 sector_t end; /* end is excluded */
49}; 49};
50#define gennvm_for_each_lun(bm, lun, i) \ 50
51#define gen_for_each_lun(bm, lun, i) \
51 for ((i) = 0, lun = &(bm)->luns[0]; \ 52 for ((i) = 0, lun = &(bm)->luns[0]; \
52 (i) < (bm)->nr_luns; (i)++, lun = &(bm)->luns[(i)]) 53 (i) < (bm)->nr_luns; (i)++, lun = &(bm)->luns[(i)])
53 54