summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJavier González <jg@lightnvm.io>2016-11-28 16:39:10 -0500
committerJens Axboe <axboe@fb.com>2016-11-29 14:12:51 -0500
commit8e53624d44c1de31b1b0d4f500703669418a4c67 (patch)
treec4e2e7600ada505ac12b7bd85ba7607c016c9939
parent2a02e627c245bfa987b97707123d7747d7b0e486 (diff)
lightnvm: eliminate nvm_lun abstraction in mm
In order to naturally support multi-target instances on an Open-Channel SSD, targets should own the LUNs they get blocks from and manage provisioning internally. This is done in several steps. Since targets own the LUNs the are instantiated on top of and manage the free block list internally, there is no need for a LUN abstraction in the media manager. LUNs are intrinsically managed as in the physical layout (ch:0,lun:0, ..., ch:0,lun:n, ch:1,lun:0, ch:1,lun:n, ..., ch:m,lun:0, ch:m,lun:n) and given to the targets based on the target creation ioctl. This simplifies LUN management and clears the path for a partition manager to sit directly underneath LightNVM targets. Signed-off-by: Javier González <javier@cnexlabs.com> Signed-off-by: Matias Bjørling <m@bjorling.me> Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r--drivers/lightnvm/core.c14
-rw-r--r--drivers/lightnvm/gennvm.c336
-rw-r--r--drivers/lightnvm/gennvm.h20
-rw-r--r--drivers/lightnvm/rrpc.c137
-rw-r--r--drivers/lightnvm/rrpc.h32
-rw-r--r--drivers/nvme/host/lightnvm.c3
-rw-r--r--include/linux/lightnvm.h41
7 files changed, 419 insertions, 164 deletions
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 691b16ffda88..23d582f82219 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -203,15 +203,19 @@ int nvm_set_bb_tbl(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas,
203} 203}
204EXPORT_SYMBOL(nvm_set_bb_tbl); 204EXPORT_SYMBOL(nvm_set_bb_tbl);
205 205
206int nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd) 206int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
207{ 207{
208 return dev->mt->submit_io(dev, rqd); 208 struct nvm_dev *dev = tgt_dev->parent;
209
210 return dev->mt->submit_io(tgt_dev, rqd);
209} 211}
210EXPORT_SYMBOL(nvm_submit_io); 212EXPORT_SYMBOL(nvm_submit_io);
211 213
212int nvm_erase_blk(struct nvm_dev *dev, struct ppa_addr *p, int flags) 214int nvm_erase_blk(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p, int flags)
213{ 215{
214 return dev->mt->erase_blk(dev, p, flags); 216 struct nvm_dev *dev = tgt_dev->parent;
217
218 return dev->mt->erase_blk(tgt_dev, p, flags);
215} 219}
216EXPORT_SYMBOL(nvm_erase_blk); 220EXPORT_SYMBOL(nvm_erase_blk);
217 221
@@ -350,7 +354,7 @@ static int __nvm_submit_ppa(struct nvm_dev *dev, struct nvm_rq *rqd, int opcode,
350 354
351 nvm_generic_to_addr_mode(dev, rqd); 355 nvm_generic_to_addr_mode(dev, rqd);
352 356
353 rqd->dev = dev; 357 rqd->dev = NULL;
354 rqd->opcode = opcode; 358 rqd->opcode = opcode;
355 rqd->flags = flags; 359 rqd->flags = flags;
356 rqd->bio = bio; 360 rqd->bio = bio;
diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
index 19c3924ffa10..5d7c8c47bef8 100644
--- a/drivers/lightnvm/gennvm.c
+++ b/drivers/lightnvm/gennvm.c
@@ -38,8 +38,6 @@ static const struct block_device_operations gen_fops = {
38static int gen_reserve_luns(struct nvm_dev *dev, struct nvm_target *t, 38static int gen_reserve_luns(struct nvm_dev *dev, struct nvm_target *t,
39 int lun_begin, int lun_end) 39 int lun_begin, int lun_end)
40{ 40{
41 struct gen_dev *gn = dev->mp;
42 struct nvm_lun *lun;
43 int i; 41 int i;
44 42
45 for (i = lun_begin; i <= lun_end; i++) { 43 for (i = lun_begin; i <= lun_end; i++) {
@@ -47,35 +45,50 @@ static int gen_reserve_luns(struct nvm_dev *dev, struct nvm_target *t,
47 pr_err("nvm: lun %d already allocated\n", i); 45 pr_err("nvm: lun %d already allocated\n", i);
48 goto err; 46 goto err;
49 } 47 }
50
51 lun = &gn->luns[i];
52 list_add_tail(&lun->list, &t->lun_list);
53 } 48 }
54 49
55 return 0; 50 return 0;
56 51
57err: 52err:
58 while (--i > lun_begin) { 53 while (--i > lun_begin)
59 lun = &gn->luns[i];
60 clear_bit(i, dev->lun_map); 54 clear_bit(i, dev->lun_map);
61 list_del(&lun->list);
62 }
63 55
64 return -EBUSY; 56 return -EBUSY;
65} 57}
66 58
67static void gen_release_luns(struct nvm_dev *dev, struct nvm_target *t) 59static void gen_release_luns_err(struct nvm_dev *dev, int lun_begin,
60 int lun_end)
68{ 61{
69 struct nvm_lun *lun, *tmp; 62 int i;
70 63
71 list_for_each_entry_safe(lun, tmp, &t->lun_list, list) { 64 for (i = lun_begin; i <= lun_end; i++)
72 WARN_ON(!test_and_clear_bit(lun->id, dev->lun_map)); 65 WARN_ON(!test_and_clear_bit(i, dev->lun_map));
73 list_del(&lun->list);
74 }
75} 66}
76 67
77static void gen_remove_tgt_dev(struct nvm_tgt_dev *tgt_dev) 68static void gen_remove_tgt_dev(struct nvm_tgt_dev *tgt_dev)
78{ 69{
70 struct nvm_dev *dev = tgt_dev->parent;
71 struct gen_dev_map *dev_map = tgt_dev->map;
72 int i, j;
73
74 for (i = 0; i < dev_map->nr_chnls; i++) {
75 struct gen_ch_map *ch_map = &dev_map->chnls[i];
76 int *lun_offs = ch_map->lun_offs;
77 int ch = i + ch_map->ch_off;
78
79 for (j = 0; j < ch_map->nr_luns; j++) {
80 int lun = j + lun_offs[j];
81 int lunid = (ch * dev->geo.luns_per_chnl) + lun;
82
83 WARN_ON(!test_and_clear_bit(lunid, dev->lun_map));
84 }
85
86 kfree(ch_map->lun_offs);
87 }
88
89 kfree(dev_map->chnls);
90 kfree(dev_map);
91 kfree(tgt_dev->luns);
79 kfree(tgt_dev); 92 kfree(tgt_dev);
80} 93}
81 94
@@ -83,24 +96,103 @@ static struct nvm_tgt_dev *gen_create_tgt_dev(struct nvm_dev *dev,
83 int lun_begin, int lun_end) 96 int lun_begin, int lun_end)
84{ 97{
85 struct nvm_tgt_dev *tgt_dev = NULL; 98 struct nvm_tgt_dev *tgt_dev = NULL;
99 struct gen_dev_map *dev_rmap = dev->rmap;
100 struct gen_dev_map *dev_map;
101 struct ppa_addr *luns;
86 int nr_luns = lun_end - lun_begin + 1; 102 int nr_luns = lun_end - lun_begin + 1;
103 int luns_left = nr_luns;
104 int nr_chnls = nr_luns / dev->geo.luns_per_chnl;
105 int nr_chnls_mod = nr_luns % dev->geo.luns_per_chnl;
106 int bch = lun_begin / dev->geo.luns_per_chnl;
107 int blun = lun_begin % dev->geo.luns_per_chnl;
108 int lunid = 0;
109 int lun_balanced = 1;
110 int prev_nr_luns;
111 int i, j;
112
113 nr_chnls = nr_luns / dev->geo.luns_per_chnl;
114 nr_chnls = (nr_chnls_mod == 0) ? nr_chnls : nr_chnls + 1;
115
116 dev_map = kmalloc(sizeof(struct gen_dev_map), GFP_KERNEL);
117 if (!dev_map)
118 goto err_dev;
119
120 dev_map->chnls = kcalloc(nr_chnls, sizeof(struct gen_ch_map),
121 GFP_KERNEL);
122 if (!dev_map->chnls)
123 goto err_chnls;
124
125 luns = kcalloc(nr_luns, sizeof(struct ppa_addr), GFP_KERNEL);
126 if (!luns)
127 goto err_luns;
128
129 prev_nr_luns = (luns_left > dev->geo.luns_per_chnl) ?
130 dev->geo.luns_per_chnl : luns_left;
131 for (i = 0; i < nr_chnls; i++) {
132 struct gen_ch_map *ch_rmap = &dev_rmap->chnls[i + bch];
133 int *lun_roffs = ch_rmap->lun_offs;
134 struct gen_ch_map *ch_map = &dev_map->chnls[i];
135 int *lun_offs;
136 int luns_in_chnl = (luns_left > dev->geo.luns_per_chnl) ?
137 dev->geo.luns_per_chnl : luns_left;
138
139 if (lun_balanced && prev_nr_luns != luns_in_chnl)
140 lun_balanced = 0;
141
142 ch_map->ch_off = ch_rmap->ch_off = bch;
143 ch_map->nr_luns = luns_in_chnl;
144
145 lun_offs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
146 if (!lun_offs)
147 goto err_ch;
148
149 for (j = 0; j < luns_in_chnl; j++) {
150 luns[lunid].ppa = 0;
151 luns[lunid].g.ch = i;
152 luns[lunid++].g.lun = j;
153
154 lun_offs[j] = blun;
155 lun_roffs[j + blun] = blun;
156 }
157
158 ch_map->lun_offs = lun_offs;
159
160 /* when starting a new channel, lun offset is reset */
161 blun = 0;
162 luns_left -= luns_in_chnl;
163 }
164
165 dev_map->nr_chnls = nr_chnls;
87 166
88 tgt_dev = kmalloc(sizeof(struct nvm_tgt_dev), GFP_KERNEL); 167 tgt_dev = kmalloc(sizeof(struct nvm_tgt_dev), GFP_KERNEL);
89 if (!tgt_dev) 168 if (!tgt_dev)
90 goto out; 169 goto err_ch;
91 170
92 memcpy(&tgt_dev->geo, &dev->geo, sizeof(struct nvm_geo)); 171 memcpy(&tgt_dev->geo, &dev->geo, sizeof(struct nvm_geo));
93 tgt_dev->geo.nr_chnls = (nr_luns / (dev->geo.luns_per_chnl + 1)) + 1; 172 /* Target device only owns a portion of the physical device */
173 tgt_dev->geo.nr_chnls = nr_chnls;
94 tgt_dev->geo.nr_luns = nr_luns; 174 tgt_dev->geo.nr_luns = nr_luns;
175 tgt_dev->geo.luns_per_chnl = (lun_balanced) ? prev_nr_luns : -1;
95 tgt_dev->total_secs = nr_luns * tgt_dev->geo.sec_per_lun; 176 tgt_dev->total_secs = nr_luns * tgt_dev->geo.sec_per_lun;
96 tgt_dev->q = dev->q; 177 tgt_dev->q = dev->q;
97 tgt_dev->ops = dev->ops; 178 tgt_dev->ops = dev->ops;
98 tgt_dev->mt = dev->mt; 179 tgt_dev->mt = dev->mt;
180 tgt_dev->map = dev_map;
181 tgt_dev->luns = luns;
99 memcpy(&tgt_dev->identity, &dev->identity, sizeof(struct nvm_id)); 182 memcpy(&tgt_dev->identity, &dev->identity, sizeof(struct nvm_id));
100 183
101 tgt_dev->parent = dev; 184 tgt_dev->parent = dev;
102 185
103out: 186 return tgt_dev;
187err_ch:
188 while (--i > 0)
189 kfree(dev_map->chnls[i].lun_offs);
190 kfree(luns);
191err_luns:
192 kfree(dev_map->chnls);
193err_chnls:
194 kfree(dev_map);
195err_dev:
104 return tgt_dev; 196 return tgt_dev;
105} 197}
106 198
@@ -134,14 +226,14 @@ static int gen_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
134 if (!t) 226 if (!t)
135 return -ENOMEM; 227 return -ENOMEM;
136 228
137 INIT_LIST_HEAD(&t->lun_list);
138
139 if (gen_reserve_luns(dev, t, s->lun_begin, s->lun_end)) 229 if (gen_reserve_luns(dev, t, s->lun_begin, s->lun_end))
140 goto err_t; 230 goto err_t;
141 231
142 tgt_dev = gen_create_tgt_dev(dev, s->lun_begin, s->lun_end); 232 tgt_dev = gen_create_tgt_dev(dev, s->lun_begin, s->lun_end);
143 if (!tgt_dev) 233 if (!tgt_dev) {
234 pr_err("nvm: could not create target device\n");
144 goto err_reserve; 235 goto err_reserve;
236 }
145 237
146 tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node); 238 tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node);
147 if (!tqueue) 239 if (!tqueue)
@@ -159,7 +251,7 @@ static int gen_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
159 tdisk->fops = &gen_fops; 251 tdisk->fops = &gen_fops;
160 tdisk->queue = tqueue; 252 tdisk->queue = tqueue;
161 253
162 targetdata = tt->init(tgt_dev, tdisk, &t->lun_list); 254 targetdata = tt->init(tgt_dev, tdisk);
163 if (IS_ERR(targetdata)) 255 if (IS_ERR(targetdata))
164 goto err_init; 256 goto err_init;
165 257
@@ -187,7 +279,7 @@ err_queue:
187err_dev: 279err_dev:
188 kfree(tgt_dev); 280 kfree(tgt_dev);
189err_reserve: 281err_reserve:
190 gen_release_luns(dev, t); 282 gen_release_luns_err(dev, s->lun_begin, s->lun_end);
191err_t: 283err_t:
192 kfree(t); 284 kfree(t);
193 return -ENOMEM; 285 return -ENOMEM;
@@ -205,7 +297,6 @@ static void __gen_remove_target(struct nvm_target *t)
205 if (tt->exit) 297 if (tt->exit)
206 tt->exit(tdisk->private_data); 298 tt->exit(tdisk->private_data);
207 299
208 gen_release_luns(t->dev->parent, t);
209 gen_remove_tgt_dev(t->dev); 300 gen_remove_tgt_dev(t->dev);
210 put_disk(tdisk); 301 put_disk(tdisk);
211 302
@@ -306,51 +397,54 @@ static void gen_put_area(struct nvm_dev *dev, sector_t begin)
306 spin_unlock(&dev->lock); 397 spin_unlock(&dev->lock);
307} 398}
308 399
309static void gen_luns_free(struct nvm_dev *dev)
310{
311 struct gen_dev *gn = dev->mp;
312
313 kfree(gn->luns);
314}
315
316static int gen_luns_init(struct nvm_dev *dev, struct gen_dev *gn)
317{
318 struct nvm_geo *geo = &dev->geo;
319 struct nvm_lun *lun;
320 int i;
321
322 gn->luns = kcalloc(geo->nr_luns, sizeof(struct nvm_lun), GFP_KERNEL);
323 if (!gn->luns)
324 return -ENOMEM;
325
326 gen_for_each_lun(gn, lun, i) {
327 INIT_LIST_HEAD(&lun->list);
328
329 lun->id = i;
330 lun->lun_id = i % geo->luns_per_chnl;
331 lun->chnl_id = i / geo->luns_per_chnl;
332 }
333 return 0;
334}
335
336static void gen_free(struct nvm_dev *dev) 400static void gen_free(struct nvm_dev *dev)
337{ 401{
338 gen_luns_free(dev);
339 kfree(dev->mp); 402 kfree(dev->mp);
403 kfree(dev->rmap);
340 dev->mp = NULL; 404 dev->mp = NULL;
341} 405}
342 406
343static int gen_register(struct nvm_dev *dev) 407static int gen_register(struct nvm_dev *dev)
344{ 408{
345 struct gen_dev *gn; 409 struct gen_dev *gn;
346 int ret; 410 struct gen_dev_map *dev_rmap;
411 int i, j;
347 412
348 if (!try_module_get(THIS_MODULE)) 413 if (!try_module_get(THIS_MODULE))
349 return -ENODEV; 414 return -ENODEV;
350 415
351 gn = kzalloc(sizeof(struct gen_dev), GFP_KERNEL); 416 gn = kzalloc(sizeof(struct gen_dev), GFP_KERNEL);
352 if (!gn) 417 if (!gn)
353 return -ENOMEM; 418 goto err_gn;
419
420 dev_rmap = kmalloc(sizeof(struct gen_dev_map), GFP_KERNEL);
421 if (!dev_rmap)
422 goto err_rmap;
423
424 dev_rmap->chnls = kcalloc(dev->geo.nr_chnls, sizeof(struct gen_ch_map),
425 GFP_KERNEL);
426 if (!dev_rmap->chnls)
427 goto err_chnls;
428
429 for (i = 0; i < dev->geo.nr_chnls; i++) {
430 struct gen_ch_map *ch_rmap;
431 int *lun_roffs;
432 int luns_in_chnl = dev->geo.luns_per_chnl;
433
434 ch_rmap = &dev_rmap->chnls[i];
435
436 ch_rmap->ch_off = -1;
437 ch_rmap->nr_luns = luns_in_chnl;
438
439 lun_roffs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
440 if (!lun_roffs)
441 goto err_ch;
442
443 for (j = 0; j < luns_in_chnl; j++)
444 lun_roffs[j] = -1;
445
446 ch_rmap->lun_offs = lun_roffs;
447 }
354 448
355 gn->dev = dev; 449 gn->dev = dev;
356 gn->nr_luns = dev->geo.nr_luns; 450 gn->nr_luns = dev->geo.nr_luns;
@@ -358,18 +452,19 @@ static int gen_register(struct nvm_dev *dev)
358 mutex_init(&gn->lock); 452 mutex_init(&gn->lock);
359 INIT_LIST_HEAD(&gn->targets); 453 INIT_LIST_HEAD(&gn->targets);
360 dev->mp = gn; 454 dev->mp = gn;
361 455 dev->rmap = dev_rmap;
362 ret = gen_luns_init(dev, gn);
363 if (ret) {
364 pr_err("gen: could not initialize luns\n");
365 goto err;
366 }
367 456
368 return 1; 457 return 1;
369err: 458err_ch:
459 while (--i >= 0)
460 kfree(dev_rmap->chnls[i].lun_offs);
461err_chnls:
462 kfree(dev_rmap);
463err_rmap:
370 gen_free(dev); 464 gen_free(dev);
465err_gn:
371 module_put(THIS_MODULE); 466 module_put(THIS_MODULE);
372 return ret; 467 return -ENOMEM;
373} 468}
374 469
375static void gen_unregister(struct nvm_dev *dev) 470static void gen_unregister(struct nvm_dev *dev)
@@ -389,29 +484,137 @@ static void gen_unregister(struct nvm_dev *dev)
389 module_put(THIS_MODULE); 484 module_put(THIS_MODULE);
390} 485}
391 486
487enum {
488 TRANS_TGT_TO_DEV = 0x0,
489 TRANS_DEV_TO_TGT = 0x1,
490};
491
492
493static int gen_map_to_dev(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
494{
495 struct gen_dev_map *dev_map = tgt_dev->map;
496 struct gen_ch_map *ch_map = &dev_map->chnls[p->g.ch];
497 int lun_off = ch_map->lun_offs[p->g.lun];
498 struct nvm_dev *dev = tgt_dev->parent;
499 struct gen_dev_map *dev_rmap = dev->rmap;
500 struct gen_ch_map *ch_rmap;
501 int lun_roff;
502
503 p->g.ch += ch_map->ch_off;
504 p->g.lun += lun_off;
505
506 ch_rmap = &dev_rmap->chnls[p->g.ch];
507 lun_roff = ch_rmap->lun_offs[p->g.lun];
508
509 if (unlikely(ch_rmap->ch_off < 0 || lun_roff < 0)) {
510 pr_err("nvm: corrupted device partition table\n");
511 return -EINVAL;
512 }
513
514 return 0;
515}
516
517static int gen_map_to_tgt(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
518{
519 struct nvm_dev *dev = tgt_dev->parent;
520 struct gen_dev_map *dev_rmap = dev->rmap;
521 struct gen_ch_map *ch_rmap = &dev_rmap->chnls[p->g.ch];
522 int lun_roff = ch_rmap->lun_offs[p->g.lun];
523
524 p->g.ch -= ch_rmap->ch_off;
525 p->g.lun -= lun_roff;
526
527 return 0;
528}
529
530static int gen_trans_rq(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
531 int flag)
532{
533 gen_trans_fn *f;
534 int i;
535 int ret = 0;
536
537 f = (flag == TRANS_TGT_TO_DEV) ? gen_map_to_dev : gen_map_to_tgt;
538
539 if (rqd->nr_ppas == 1)
540 return f(tgt_dev, &rqd->ppa_addr);
541
542 for (i = 0; i < rqd->nr_ppas; i++) {
543 ret = f(tgt_dev, &rqd->ppa_list[i]);
544 if (ret)
545 goto out;
546 }
547
548out:
549 return ret;
550}
551
392static void gen_end_io(struct nvm_rq *rqd) 552static void gen_end_io(struct nvm_rq *rqd)
393{ 553{
554 struct nvm_tgt_dev *tgt_dev = rqd->dev;
394 struct nvm_tgt_instance *ins = rqd->ins; 555 struct nvm_tgt_instance *ins = rqd->ins;
395 556
557 /* Convert address space */
558 if (tgt_dev)
559 gen_trans_rq(tgt_dev, rqd, TRANS_DEV_TO_TGT);
560
396 ins->tt->end_io(rqd); 561 ins->tt->end_io(rqd);
397} 562}
398 563
399static int gen_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd) 564static int gen_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
400{ 565{
566 struct nvm_dev *dev = tgt_dev->parent;
567
401 if (!dev->ops->submit_io) 568 if (!dev->ops->submit_io)
402 return -ENODEV; 569 return -ENODEV;
403 570
404 /* Convert address space */ 571 /* Convert address space */
572 gen_trans_rq(tgt_dev, rqd, TRANS_TGT_TO_DEV);
405 nvm_generic_to_addr_mode(dev, rqd); 573 nvm_generic_to_addr_mode(dev, rqd);
406 574
407 rqd->dev = dev; 575 rqd->dev = tgt_dev;
408 rqd->end_io = gen_end_io; 576 rqd->end_io = gen_end_io;
409 return dev->ops->submit_io(dev, rqd); 577 return dev->ops->submit_io(dev, rqd);
410} 578}
411 579
412static int gen_erase_blk(struct nvm_dev *dev, struct ppa_addr *p, int flags) 580static int gen_erase_blk(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p,
581 int flags)
413{ 582{
414 return nvm_erase_ppa(dev, p, 1, flags); 583 /* Convert address space */
584 gen_map_to_dev(tgt_dev, p);
585
586 return nvm_erase_ppa(tgt_dev->parent, p, 1, flags);
587}
588
589static void gen_part_to_tgt(struct nvm_dev *dev, sector_t *entries,
590 int len)
591{
592 struct nvm_geo *geo = &dev->geo;
593 struct gen_dev_map *dev_rmap = dev->rmap;
594 u64 i;
595
596 for (i = 0; i < len; i++) {
597 struct gen_ch_map *ch_rmap;
598 int *lun_roffs;
599 struct ppa_addr gaddr;
600 u64 pba = le64_to_cpu(entries[i]);
601 int off;
602 u64 diff;
603
604 if (!pba)
605 continue;
606
607 gaddr = linear_to_generic_addr(geo, pba);
608 ch_rmap = &dev_rmap->chnls[gaddr.g.ch];
609 lun_roffs = ch_rmap->lun_offs;
610
611 off = gaddr.g.ch * geo->luns_per_chnl + gaddr.g.lun;
612
613 diff = ((ch_rmap->ch_off * geo->luns_per_chnl) +
614 (lun_roffs[gaddr.g.lun])) * geo->sec_per_lun;
615
616 entries[i] -= cpu_to_le64(diff);
617 }
415} 618}
416 619
417static struct nvmm_type gen = { 620static struct nvmm_type gen = {
@@ -430,6 +633,7 @@ static struct nvmm_type gen = {
430 .get_area = gen_get_area, 633 .get_area = gen_get_area,
431 .put_area = gen_put_area, 634 .put_area = gen_put_area,
432 635
636 .part_to_tgt = gen_part_to_tgt,
433}; 637};
434 638
435static int __init gen_module_init(void) 639static int __init gen_module_init(void)
diff --git a/drivers/lightnvm/gennvm.h b/drivers/lightnvm/gennvm.h
index d167f391fbae..6a4b3f368848 100644
--- a/drivers/lightnvm/gennvm.h
+++ b/drivers/lightnvm/gennvm.h
@@ -24,19 +24,37 @@ struct gen_dev {
24 struct nvm_dev *dev; 24 struct nvm_dev *dev;
25 25
26 int nr_luns; 26 int nr_luns;
27 struct nvm_lun *luns;
28 struct list_head area_list; 27 struct list_head area_list;
29 28
30 struct mutex lock; 29 struct mutex lock;
31 struct list_head targets; 30 struct list_head targets;
32}; 31};
33 32
33/* Map between virtual and physical channel and lun */
34struct gen_ch_map {
35 int ch_off;
36 int nr_luns;
37 int *lun_offs;
38};
39
40struct gen_dev_map {
41 struct gen_ch_map *chnls;
42 int nr_chnls;
43};
44
34struct gen_area { 45struct gen_area {
35 struct list_head list; 46 struct list_head list;
36 sector_t begin; 47 sector_t begin;
37 sector_t end; /* end is excluded */ 48 sector_t end; /* end is excluded */
38}; 49};
39 50
51static inline void *ch_map_to_lun_offs(struct gen_ch_map *ch_map)
52{
53 return ch_map + 1;
54}
55
56typedef int (gen_trans_fn)(struct nvm_tgt_dev *, struct ppa_addr *);
57
40#define gen_for_each_lun(bm, lun, i) \ 58#define gen_for_each_lun(bm, lun, i) \
41 for ((i) = 0, lun = &(bm)->luns[0]; \ 59 for ((i) = 0, lun = &(bm)->luns[0]; \
42 (i) < (bm)->nr_luns; (i)++, lun = &(bm)->luns[(i)]) 60 (i) < (bm)->nr_luns; (i)++, lun = &(bm)->luns[(i)])
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index a352285dffc0..75ed12ae0f47 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -45,7 +45,7 @@ static void rrpc_page_invalidate(struct rrpc *rrpc, struct rrpc_addr *a)
45 45
46 spin_unlock(&rblk->lock); 46 spin_unlock(&rblk->lock);
47 47
48 rrpc->rev_trans_map[a->addr - rrpc->poffset].addr = ADDR_EMPTY; 48 rrpc->rev_trans_map[a->addr].addr = ADDR_EMPTY;
49} 49}
50 50
51static void rrpc_invalidate_range(struct rrpc *rrpc, sector_t slba, 51static void rrpc_invalidate_range(struct rrpc *rrpc, sector_t slba,
@@ -127,28 +127,25 @@ static u64 block_to_rel_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
127{ 127{
128 struct nvm_tgt_dev *dev = rrpc->dev; 128 struct nvm_tgt_dev *dev = rrpc->dev;
129 struct rrpc_lun *rlun = rblk->rlun; 129 struct rrpc_lun *rlun = rblk->rlun;
130 struct nvm_lun *lun = rlun->parent;
131 130
132 return lun->id * dev->geo.sec_per_blk; 131 return rlun->id * dev->geo.sec_per_blk;
133} 132}
134 133
135/* Calculate global addr for the given block */ 134static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_tgt_dev *dev,
136static u64 block_to_addr(struct rrpc *rrpc, struct rrpc_block *rblk) 135 struct rrpc_addr *gp)
137{ 136{
138 struct nvm_tgt_dev *dev = rrpc->dev; 137 struct rrpc_block *rblk = gp->rblk;
139 struct nvm_geo *geo = &dev->geo;
140 struct rrpc_lun *rlun = rblk->rlun; 138 struct rrpc_lun *rlun = rblk->rlun;
141 struct nvm_lun *lun = rlun->parent; 139 u64 addr = gp->addr;
142
143 return lun->id * geo->sec_per_lun + rblk->id * geo->sec_per_blk;
144}
145
146static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_tgt_dev *dev, u64 addr)
147{
148 struct ppa_addr paddr; 140 struct ppa_addr paddr;
149 141
150 paddr.ppa = addr; 142 paddr.ppa = addr;
151 return linear_to_generic_addr(&dev->geo, paddr); 143 paddr = rrpc_linear_to_generic_addr(&dev->geo, paddr);
144 paddr.g.ch = rlun->bppa.g.ch;
145 paddr.g.lun = rlun->bppa.g.lun;
146 paddr.g.blk = rblk->id;
147
148 return paddr;
152} 149}
153 150
154/* requires lun->lock taken */ 151/* requires lun->lock taken */
@@ -216,7 +213,6 @@ static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
216static void rrpc_put_blk(struct rrpc *rrpc, struct rrpc_block *rblk) 213static void rrpc_put_blk(struct rrpc *rrpc, struct rrpc_block *rblk)
217{ 214{
218 struct rrpc_lun *rlun = rblk->rlun; 215 struct rrpc_lun *rlun = rblk->rlun;
219 struct nvm_lun *lun = rlun->parent;
220 216
221 spin_lock(&rlun->lock); 217 spin_lock(&rlun->lock);
222 if (rblk->state & NVM_BLK_ST_TGT) { 218 if (rblk->state & NVM_BLK_ST_TGT) {
@@ -229,8 +225,8 @@ static void rrpc_put_blk(struct rrpc *rrpc, struct rrpc_block *rblk)
229 } else { 225 } else {
230 WARN_ON_ONCE(1); 226 WARN_ON_ONCE(1);
231 pr_err("rrpc: erroneous type (ch:%d,lun:%d,blk%d-> %u)\n", 227 pr_err("rrpc: erroneous type (ch:%d,lun:%d,blk%d-> %u)\n",
232 lun->chnl_id, lun->lun_id, 228 rlun->bppa.g.ch, rlun->bppa.g.lun,
233 rblk->id, rblk->state); 229 rblk->id, rblk->state);
234 list_move_tail(&rblk->list, &rlun->bb_list); 230 list_move_tail(&rblk->list, &rlun->bb_list);
235 } 231 }
236 spin_unlock(&rlun->lock); 232 spin_unlock(&rlun->lock);
@@ -336,7 +332,7 @@ static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
336try: 332try:
337 spin_lock(&rrpc->rev_lock); 333 spin_lock(&rrpc->rev_lock);
338 /* Get logical address from physical to logical table */ 334 /* Get logical address from physical to logical table */
339 rev = &rrpc->rev_trans_map[phys_addr - rrpc->poffset]; 335 rev = &rrpc->rev_trans_map[phys_addr];
340 /* already updated by previous regular write */ 336 /* already updated by previous regular write */
341 if (rev->addr == ADDR_EMPTY) { 337 if (rev->addr == ADDR_EMPTY) {
342 spin_unlock(&rrpc->rev_lock); 338 spin_unlock(&rrpc->rev_lock);
@@ -423,18 +419,18 @@ static void rrpc_block_gc(struct work_struct *work)
423 419
424 mempool_free(gcb, rrpc->gcb_pool); 420 mempool_free(gcb, rrpc->gcb_pool);
425 pr_debug("nvm: block 'ch:%d,lun:%d,blk:%d' being reclaimed\n", 421 pr_debug("nvm: block 'ch:%d,lun:%d,blk:%d' being reclaimed\n",
426 rlun->parent->chnl_id, rlun->parent->lun_id, 422 rlun->bppa.g.ch, rlun->bppa.g.lun,
427 rblk->id); 423 rblk->id);
428 424
429 if (rrpc_move_valid_pages(rrpc, rblk)) 425 if (rrpc_move_valid_pages(rrpc, rblk))
430 goto put_back; 426 goto put_back;
431 427
432 ppa.ppa = 0; 428 ppa.ppa = 0;
433 ppa.g.ch = rlun->parent->chnl_id; 429 ppa.g.ch = rlun->bppa.g.ch;
434 ppa.g.lun = rlun->parent->lun_id; 430 ppa.g.lun = rlun->bppa.g.lun;
435 ppa.g.blk = rblk->id; 431 ppa.g.blk = rblk->id;
436 432
437 if (nvm_erase_blk(dev->parent, &ppa, 0)) 433 if (nvm_erase_blk(dev, &ppa, 0))
438 goto put_back; 434 goto put_back;
439 435
440 rrpc_put_blk(rrpc, rblk); 436 rrpc_put_blk(rrpc, rblk);
@@ -506,8 +502,7 @@ static void rrpc_lun_gc(struct work_struct *work)
506 WARN_ON(!block_is_full(rrpc, rblk)); 502 WARN_ON(!block_is_full(rrpc, rblk));
507 503
508 pr_debug("rrpc: selected block 'ch:%d,lun:%d,blk:%d' for GC\n", 504 pr_debug("rrpc: selected block 'ch:%d,lun:%d,blk:%d' for GC\n",
509 rlun->parent->chnl_id, 505 rlun->bppa.g.ch, rlun->bppa.g.lun,
510 rlun->parent->lun_id,
511 rblk->id); 506 rblk->id);
512 507
513 gcb->rrpc = rrpc; 508 gcb->rrpc = rrpc;
@@ -537,8 +532,7 @@ static void rrpc_gc_queue(struct work_struct *work)
537 532
538 mempool_free(gcb, rrpc->gcb_pool); 533 mempool_free(gcb, rrpc->gcb_pool);
539 pr_debug("nvm: block 'ch:%d,lun:%d,blk:%d' full, allow GC (sched)\n", 534 pr_debug("nvm: block 'ch:%d,lun:%d,blk:%d' full, allow GC (sched)\n",
540 rlun->parent->chnl_id, 535 rlun->bppa.g.ch, rlun->bppa.g.lun,
541 rlun->parent->lun_id,
542 rblk->id); 536 rblk->id);
543} 537}
544 538
@@ -586,7 +580,7 @@ static struct rrpc_addr *rrpc_update_map(struct rrpc *rrpc, sector_t laddr,
586 gp->addr = paddr; 580 gp->addr = paddr;
587 gp->rblk = rblk; 581 gp->rblk = rblk;
588 582
589 rev = &rrpc->rev_trans_map[gp->addr - rrpc->poffset]; 583 rev = &rrpc->rev_trans_map[gp->addr];
590 rev->addr = laddr; 584 rev->addr = laddr;
591 spin_unlock(&rrpc->rev_lock); 585 spin_unlock(&rrpc->rev_lock);
592 586
@@ -601,7 +595,7 @@ static u64 rrpc_alloc_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
601 if (block_is_full(rrpc, rblk)) 595 if (block_is_full(rrpc, rblk))
602 goto out; 596 goto out;
603 597
604 addr = block_to_addr(rrpc, rblk) + rblk->next_page; 598 addr = rblk->next_page;
605 599
606 rblk->next_page++; 600 rblk->next_page++;
607out: 601out:
@@ -615,18 +609,22 @@ out:
615 * Returns rrpc_addr with the physical address and block. Returns NULL if no 609 * Returns rrpc_addr with the physical address and block. Returns NULL if no
616 * blocks in the next rlun are available. 610 * blocks in the next rlun are available.
617 */ 611 */
618static struct rrpc_addr *rrpc_map_page(struct rrpc *rrpc, sector_t laddr, 612static struct ppa_addr rrpc_map_page(struct rrpc *rrpc, sector_t laddr,
619 int is_gc) 613 int is_gc)
620{ 614{
615 struct nvm_tgt_dev *tgt_dev = rrpc->dev;
621 struct rrpc_lun *rlun; 616 struct rrpc_lun *rlun;
622 struct rrpc_block *rblk, **cur_rblk; 617 struct rrpc_block *rblk, **cur_rblk;
618 struct rrpc_addr *p;
619 struct ppa_addr ppa;
623 u64 paddr; 620 u64 paddr;
624 int gc_force = 0; 621 int gc_force = 0;
625 622
623 ppa.ppa = ADDR_EMPTY;
626 rlun = rrpc_get_lun_rr(rrpc, is_gc); 624 rlun = rrpc_get_lun_rr(rrpc, is_gc);
627 625
628 if (!is_gc && rlun->nr_free_blocks < rrpc->nr_luns * 4) 626 if (!is_gc && rlun->nr_free_blocks < rrpc->nr_luns * 4)
629 return NULL; 627 return ppa;
630 628
631 /* 629 /*
632 * page allocation steps: 630 * page allocation steps:
@@ -683,10 +681,15 @@ new_blk:
683 } 681 }
684 682
685 pr_err("rrpc: failed to allocate new block\n"); 683 pr_err("rrpc: failed to allocate new block\n");
686 return NULL; 684 return ppa;
687done: 685done:
688 spin_unlock(&rlun->lock); 686 spin_unlock(&rlun->lock);
689 return rrpc_update_map(rrpc, laddr, rblk, paddr); 687 p = rrpc_update_map(rrpc, laddr, rblk, paddr);
688 if (!p)
689 return ppa;
690
691 /* return global address */
692 return rrpc_ppa_to_gaddr(tgt_dev, p);
690} 693}
691 694
692static void rrpc_run_gc(struct rrpc *rrpc, struct rrpc_block *rblk) 695static void rrpc_run_gc(struct rrpc *rrpc, struct rrpc_block *rblk)
@@ -712,8 +715,8 @@ static struct rrpc_lun *rrpc_ppa_to_lun(struct rrpc *rrpc, struct ppa_addr p)
712 int i; 715 int i;
713 716
714 for (i = 0; i < rrpc->nr_luns; i++) { 717 for (i = 0; i < rrpc->nr_luns; i++) {
715 if (rrpc->luns[i].parent->chnl_id == p.g.ch && 718 if (rrpc->luns[i].bppa.g.ch == p.g.ch &&
716 rrpc->luns[i].parent->lun_id == p.g.lun) { 719 rrpc->luns[i].bppa.g.lun == p.g.lun) {
717 rlun = &rrpc->luns[i]; 720 rlun = &rrpc->luns[i];
718 break; 721 break;
719 } 722 }
@@ -823,7 +826,7 @@ static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
823 gp = &rrpc->trans_map[laddr + i]; 826 gp = &rrpc->trans_map[laddr + i];
824 827
825 if (gp->rblk) { 828 if (gp->rblk) {
826 rqd->ppa_list[i] = rrpc_ppa_to_gaddr(dev, gp->addr); 829 rqd->ppa_list[i] = rrpc_ppa_to_gaddr(dev, gp);
827 } else { 830 } else {
828 BUG_ON(is_gc); 831 BUG_ON(is_gc);
829 rrpc_unlock_laddr(rrpc, r); 832 rrpc_unlock_laddr(rrpc, r);
@@ -852,7 +855,7 @@ static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq *rqd,
852 gp = &rrpc->trans_map[laddr]; 855 gp = &rrpc->trans_map[laddr];
853 856
854 if (gp->rblk) { 857 if (gp->rblk) {
855 rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, gp->addr); 858 rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, gp);
856 } else { 859 } else {
857 BUG_ON(is_gc); 860 BUG_ON(is_gc);
858 rrpc_unlock_rq(rrpc, rqd); 861 rrpc_unlock_rq(rrpc, rqd);
@@ -869,7 +872,7 @@ static int rrpc_write_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
869{ 872{
870 struct nvm_tgt_dev *dev = rrpc->dev; 873 struct nvm_tgt_dev *dev = rrpc->dev;
871 struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd); 874 struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
872 struct rrpc_addr *p; 875 struct ppa_addr p;
873 sector_t laddr = rrpc_get_laddr(bio); 876 sector_t laddr = rrpc_get_laddr(bio);
874 int is_gc = flags & NVM_IOTYPE_GC; 877 int is_gc = flags & NVM_IOTYPE_GC;
875 int i; 878 int i;
@@ -882,7 +885,7 @@ static int rrpc_write_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
882 for (i = 0; i < npages; i++) { 885 for (i = 0; i < npages; i++) {
883 /* We assume that mapping occurs at 4KB granularity */ 886 /* We assume that mapping occurs at 4KB granularity */
884 p = rrpc_map_page(rrpc, laddr + i, is_gc); 887 p = rrpc_map_page(rrpc, laddr + i, is_gc);
885 if (!p) { 888 if (p.ppa == ADDR_EMPTY) {
886 BUG_ON(is_gc); 889 BUG_ON(is_gc);
887 rrpc_unlock_laddr(rrpc, r); 890 rrpc_unlock_laddr(rrpc, r);
888 nvm_dev_dma_free(dev->parent, rqd->ppa_list, 891 nvm_dev_dma_free(dev->parent, rqd->ppa_list,
@@ -891,7 +894,7 @@ static int rrpc_write_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
891 return NVM_IO_REQUEUE; 894 return NVM_IO_REQUEUE;
892 } 895 }
893 896
894 rqd->ppa_list[i] = rrpc_ppa_to_gaddr(dev, p->addr); 897 rqd->ppa_list[i] = p;
895 } 898 }
896 899
897 rqd->opcode = NVM_OP_HBWRITE; 900 rqd->opcode = NVM_OP_HBWRITE;
@@ -902,7 +905,7 @@ static int rrpc_write_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
902static int rrpc_write_rq(struct rrpc *rrpc, struct bio *bio, 905static int rrpc_write_rq(struct rrpc *rrpc, struct bio *bio,
903 struct nvm_rq *rqd, unsigned long flags) 906 struct nvm_rq *rqd, unsigned long flags)
904{ 907{
905 struct rrpc_addr *p; 908 struct ppa_addr p;
906 int is_gc = flags & NVM_IOTYPE_GC; 909 int is_gc = flags & NVM_IOTYPE_GC;
907 sector_t laddr = rrpc_get_laddr(bio); 910 sector_t laddr = rrpc_get_laddr(bio);
908 911
@@ -910,14 +913,14 @@ static int rrpc_write_rq(struct rrpc *rrpc, struct bio *bio,
910 return NVM_IO_REQUEUE; 913 return NVM_IO_REQUEUE;
911 914
912 p = rrpc_map_page(rrpc, laddr, is_gc); 915 p = rrpc_map_page(rrpc, laddr, is_gc);
913 if (!p) { 916 if (p.ppa == ADDR_EMPTY) {
914 BUG_ON(is_gc); 917 BUG_ON(is_gc);
915 rrpc_unlock_rq(rrpc, rqd); 918 rrpc_unlock_rq(rrpc, rqd);
916 rrpc_gc_kick(rrpc); 919 rrpc_gc_kick(rrpc);
917 return NVM_IO_REQUEUE; 920 return NVM_IO_REQUEUE;
918 } 921 }
919 922
920 rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, p->addr); 923 rqd->ppa_addr = p;
921 rqd->opcode = NVM_OP_HBWRITE; 924 rqd->opcode = NVM_OP_HBWRITE;
922 925
923 return NVM_IO_OK; 926 return NVM_IO_OK;
@@ -973,7 +976,7 @@ static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
973 rqd->nr_ppas = nr_pages; 976 rqd->nr_ppas = nr_pages;
974 rrq->flags = flags; 977 rrq->flags = flags;
975 978
976 err = nvm_submit_io(dev->parent, rqd); 979 err = nvm_submit_io(dev, rqd);
977 if (err) { 980 if (err) {
978 pr_err("rrpc: I/O submission failed: %d\n", err); 981 pr_err("rrpc: I/O submission failed: %d\n", err);
979 bio_put(bio); 982 bio_put(bio);
@@ -1113,10 +1116,7 @@ static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)
1113 1116
1114 div_u64_rem(pba, rrpc->nr_sects, &mod); 1117 div_u64_rem(pba, rrpc->nr_sects, &mod);
1115 1118
1116 addr[i].addr = pba; 1119 gaddr = rrpc_recov_addr(dev->parent, pba);
1117 raddr[mod].addr = slba + i;
1118
1119 gaddr = rrpc_ppa_to_gaddr(dev, pba);
1120 rlun = rrpc_ppa_to_lun(rrpc, gaddr); 1120 rlun = rrpc_ppa_to_lun(rrpc, gaddr);
1121 if (!rlun) { 1121 if (!rlun) {
1122 pr_err("rrpc: l2p corruption on lba %llu\n", 1122 pr_err("rrpc: l2p corruption on lba %llu\n",
@@ -1134,6 +1134,10 @@ static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)
1134 rblk->state = NVM_BLK_ST_TGT; 1134 rblk->state = NVM_BLK_ST_TGT;
1135 rlun->nr_free_blocks--; 1135 rlun->nr_free_blocks--;
1136 } 1136 }
1137
1138 addr[i].addr = pba;
1139 addr[i].rblk = rblk;
1140 raddr[mod].addr = slba + i;
1137 } 1141 }
1138 1142
1139 return 0; 1143 return 0;
@@ -1230,7 +1234,6 @@ static void rrpc_core_free(struct rrpc *rrpc)
1230 1234
1231static void rrpc_luns_free(struct rrpc *rrpc) 1235static void rrpc_luns_free(struct rrpc *rrpc)
1232{ 1236{
1233 struct nvm_lun *lun;
1234 struct rrpc_lun *rlun; 1237 struct rrpc_lun *rlun;
1235 int i; 1238 int i;
1236 1239
@@ -1239,9 +1242,6 @@ static void rrpc_luns_free(struct rrpc *rrpc)
1239 1242
1240 for (i = 0; i < rrpc->nr_luns; i++) { 1243 for (i = 0; i < rrpc->nr_luns; i++) {
1241 rlun = &rrpc->luns[i]; 1244 rlun = &rrpc->luns[i];
1242 lun = rlun->parent;
1243 if (!lun)
1244 break;
1245 vfree(rlun->blocks); 1245 vfree(rlun->blocks);
1246 } 1246 }
1247 1247
@@ -1264,8 +1264,8 @@ static int rrpc_bb_discovery(struct nvm_tgt_dev *dev, struct rrpc_lun *rlun)
1264 return -ENOMEM; 1264 return -ENOMEM;
1265 1265
1266 ppa.ppa = 0; 1266 ppa.ppa = 0;
1267 ppa.g.ch = rlun->parent->chnl_id; 1267 ppa.g.ch = rlun->bppa.g.ch;
1268 ppa.g.lun = rlun->parent->lun_id; 1268 ppa.g.lun = rlun->bppa.g.lun;
1269 1269
1270 ret = nvm_get_bb_tbl(dev->parent, ppa, blks); 1270 ret = nvm_get_bb_tbl(dev->parent, ppa, blks);
1271 if (ret) { 1271 if (ret) {
@@ -1293,11 +1293,17 @@ out:
1293 return ret; 1293 return ret;
1294} 1294}
1295 1295
1296static int rrpc_luns_init(struct rrpc *rrpc, struct list_head *lun_list) 1296static void rrpc_set_lun_ppa(struct rrpc_lun *rlun, struct ppa_addr ppa)
1297{
1298 rlun->bppa.ppa = 0;
1299 rlun->bppa.g.ch = ppa.g.ch;
1300 rlun->bppa.g.lun = ppa.g.lun;
1301}
1302
1303static int rrpc_luns_init(struct rrpc *rrpc, struct ppa_addr *luns)
1297{ 1304{
1298 struct nvm_tgt_dev *dev = rrpc->dev; 1305 struct nvm_tgt_dev *dev = rrpc->dev;
1299 struct nvm_geo *geo = &dev->geo; 1306 struct nvm_geo *geo = &dev->geo;
1300 struct nvm_lun *lun;
1301 struct rrpc_lun *rlun; 1307 struct rrpc_lun *rlun;
1302 int i, j, ret = -EINVAL; 1308 int i, j, ret = -EINVAL;
1303 1309
@@ -1313,12 +1319,11 @@ static int rrpc_luns_init(struct rrpc *rrpc, struct list_head *lun_list)
1313 if (!rrpc->luns) 1319 if (!rrpc->luns)
1314 return -ENOMEM; 1320 return -ENOMEM;
1315 1321
1316 i = 0;
1317
1318 /* 1:1 mapping */ 1322 /* 1:1 mapping */
1319 list_for_each_entry(lun, lun_list, list) { 1323 for (i = 0; i < rrpc->nr_luns; i++) {
1320 rlun = &rrpc->luns[i++]; 1324 rlun = &rrpc->luns[i];
1321 rlun->parent = lun; 1325 rlun->id = i;
1326 rrpc_set_lun_ppa(rlun, luns[i]);
1322 rlun->blocks = vzalloc(sizeof(struct rrpc_block) * 1327 rlun->blocks = vzalloc(sizeof(struct rrpc_block) *
1323 geo->blks_per_lun); 1328 geo->blks_per_lun);
1324 if (!rlun->blocks) { 1329 if (!rlun->blocks) {
@@ -1356,8 +1361,6 @@ static int rrpc_luns_init(struct rrpc *rrpc, struct list_head *lun_list)
1356 spin_lock_init(&rlun->lock); 1361 spin_lock_init(&rlun->lock);
1357 } 1362 }
1358 1363
1359 WARN_ON(i != rrpc->nr_luns);
1360
1361 return 0; 1364 return 0;
1362err: 1365err:
1363 return ret; 1366 return ret;
@@ -1511,14 +1514,12 @@ err:
1511 1514
1512static struct nvm_tgt_type tt_rrpc; 1515static struct nvm_tgt_type tt_rrpc;
1513 1516
1514static void *rrpc_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk, 1517static void *rrpc_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk)
1515 struct list_head *lun_list)
1516{ 1518{
1517 struct request_queue *bqueue = dev->q; 1519 struct request_queue *bqueue = dev->q;
1518 struct request_queue *tqueue = tdisk->queue; 1520 struct request_queue *tqueue = tdisk->queue;
1519 struct nvm_geo *geo = &dev->geo; 1521 struct nvm_geo *geo = &dev->geo;
1520 struct rrpc *rrpc; 1522 struct rrpc *rrpc;
1521 int lun_begin = (list_first_entry(lun_list, struct nvm_lun, list))->id;
1522 sector_t soffset; 1523 sector_t soffset;
1523 int ret; 1524 int ret;
1524 1525
@@ -1553,14 +1554,12 @@ static void *rrpc_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
1553 } 1554 }
1554 rrpc->soffset = soffset; 1555 rrpc->soffset = soffset;
1555 1556
1556 ret = rrpc_luns_init(rrpc, lun_list); 1557 ret = rrpc_luns_init(rrpc, dev->luns);
1557 if (ret) { 1558 if (ret) {
1558 pr_err("nvm: rrpc: could not initialize luns\n"); 1559 pr_err("nvm: rrpc: could not initialize luns\n");
1559 goto err; 1560 goto err;
1560 } 1561 }
1561 1562
1562 rrpc->poffset = geo->sec_per_lun * lun_begin;
1563
1564 ret = rrpc_core_init(rrpc); 1563 ret = rrpc_core_init(rrpc);
1565 if (ret) { 1564 if (ret) {
1566 pr_err("nvm: rrpc: could not initialize core\n"); 1565 pr_err("nvm: rrpc: could not initialize core\n");
diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h
index c55cec9a1a26..bc8adba4d63f 100644
--- a/drivers/lightnvm/rrpc.h
+++ b/drivers/lightnvm/rrpc.h
@@ -74,7 +74,9 @@ struct rrpc_block {
74 74
75struct rrpc_lun { 75struct rrpc_lun {
76 struct rrpc *rrpc; 76 struct rrpc *rrpc;
77 struct nvm_lun *parent; 77
78 int id;
79 struct ppa_addr bppa;
78 80
79 struct rrpc_block *cur, *gc_cur; 81 struct rrpc_block *cur, *gc_cur;
80 struct rrpc_block *blocks; /* Reference to block allocation */ 82 struct rrpc_block *blocks; /* Reference to block allocation */
@@ -107,7 +109,6 @@ struct rrpc {
107 struct gendisk *disk; 109 struct gendisk *disk;
108 110
109 sector_t soffset; /* logical sector offset */ 111 sector_t soffset; /* logical sector offset */
110 u64 poffset; /* physical page offset */
111 112
112 int nr_luns; 113 int nr_luns;
113 struct rrpc_lun *luns; 114 struct rrpc_lun *luns;
@@ -164,14 +165,37 @@ struct rrpc_rev_addr {
164 u64 addr; 165 u64 addr;
165}; 166};
166 167
168static inline struct ppa_addr rrpc_linear_to_generic_addr(struct nvm_geo *geo,
169 struct ppa_addr r)
170{
171 struct ppa_addr l;
172 int secs, pgs;
173 sector_t ppa = r.ppa;
174
175 l.ppa = 0;
176
177 div_u64_rem(ppa, geo->sec_per_pg, &secs);
178 l.g.sec = secs;
179
180 sector_div(ppa, geo->sec_per_pg);
181 div_u64_rem(ppa, geo->pgs_per_blk, &pgs);
182 l.g.pg = pgs;
183
184 return l;
185}
186
187static inline struct ppa_addr rrpc_recov_addr(struct nvm_dev *dev, u64 pba)
188{
189 return linear_to_generic_addr(&dev->geo, pba);
190}
191
167static inline u64 rrpc_blk_to_ppa(struct rrpc *rrpc, struct rrpc_block *rblk) 192static inline u64 rrpc_blk_to_ppa(struct rrpc *rrpc, struct rrpc_block *rblk)
168{ 193{
169 struct nvm_tgt_dev *dev = rrpc->dev; 194 struct nvm_tgt_dev *dev = rrpc->dev;
170 struct nvm_geo *geo = &dev->geo; 195 struct nvm_geo *geo = &dev->geo;
171 struct rrpc_lun *rlun = rblk->rlun; 196 struct rrpc_lun *rlun = rblk->rlun;
172 struct nvm_lun *lun = rlun->parent;
173 197
174 return (lun->id * geo->sec_per_lun) + (rblk->id * geo->sec_per_blk); 198 return (rlun->id * geo->sec_per_lun) + (rblk->id * geo->sec_per_blk);
175} 199}
176 200
177static inline sector_t rrpc_get_laddr(struct bio *bio) 201static inline sector_t rrpc_get_laddr(struct bio *bio)
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index 1cdc8124c8c0..588d4a34c083 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -371,6 +371,9 @@ static int nvme_nvm_get_l2p_tbl(struct nvm_dev *nvmdev, u64 slba, u32 nlb,
371 return -EINVAL; 371 return -EINVAL;
372 } 372 }
373 373
374 /* Transform physical address to target address space */
375 nvmdev->mt->part_to_tgt(nvmdev, entries, cmd_nlb);
376
374 if (update_l2p(cmd_slba, cmd_nlb, entries, priv)) { 377 if (update_l2p(cmd_slba, cmd_nlb, entries, priv)) {
375 ret = -EINTR; 378 ret = -EINTR;
376 goto out; 379 goto out;
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index cc210cc85c6d..2222853ef969 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -47,6 +47,7 @@ struct ppa_addr {
47struct nvm_rq; 47struct nvm_rq;
48struct nvm_id; 48struct nvm_id;
49struct nvm_dev; 49struct nvm_dev;
50struct nvm_tgt_dev;
50 51
51typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *); 52typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *);
52typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *); 53typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *);
@@ -210,7 +211,6 @@ struct nvm_id {
210 211
211struct nvm_target { 212struct nvm_target {
212 struct list_head list; 213 struct list_head list;
213 struct list_head lun_list;
214 struct nvm_tgt_dev *dev; 214 struct nvm_tgt_dev *dev;
215 struct nvm_tgt_type *type; 215 struct nvm_tgt_type *type;
216 struct gendisk *disk; 216 struct gendisk *disk;
@@ -231,7 +231,7 @@ typedef void (nvm_end_io_fn)(struct nvm_rq *);
231 231
232struct nvm_rq { 232struct nvm_rq {
233 struct nvm_tgt_instance *ins; 233 struct nvm_tgt_instance *ins;
234 struct nvm_dev *dev; 234 struct nvm_tgt_dev *dev;
235 235
236 struct bio *bio; 236 struct bio *bio;
237 237
@@ -266,15 +266,6 @@ static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata)
266 return rqdata + 1; 266 return rqdata + 1;
267} 267}
268 268
269struct nvm_lun {
270 int id;
271
272 int lun_id;
273 int chnl_id;
274
275 struct list_head list;
276};
277
278enum { 269enum {
279 NVM_BLK_ST_FREE = 0x1, /* Free block */ 270 NVM_BLK_ST_FREE = 0x1, /* Free block */
280 NVM_BLK_ST_TGT = 0x2, /* Block in use by target */ 271 NVM_BLK_ST_TGT = 0x2, /* Block in use by target */
@@ -321,6 +312,9 @@ struct nvm_tgt_dev {
321 /* Device information */ 312 /* Device information */
322 struct nvm_geo geo; 313 struct nvm_geo geo;
323 314
315 /* Base ppas for target LUNs */
316 struct ppa_addr *luns;
317
324 sector_t total_secs; 318 sector_t total_secs;
325 319
326 struct nvm_id identity; 320 struct nvm_id identity;
@@ -330,6 +324,7 @@ struct nvm_tgt_dev {
330 struct nvm_dev_ops *ops; 324 struct nvm_dev_ops *ops;
331 325
332 void *parent; 326 void *parent;
327 void *map;
333}; 328};
334 329
335struct nvm_dev { 330struct nvm_dev {
@@ -363,16 +358,18 @@ struct nvm_dev {
363 char name[DISK_NAME_LEN]; 358 char name[DISK_NAME_LEN];
364 void *private_data; 359 void *private_data;
365 360
361 void *rmap;
362
366 struct mutex mlock; 363 struct mutex mlock;
367 spinlock_t lock; 364 spinlock_t lock;
368}; 365};
369 366
370static inline struct ppa_addr linear_to_generic_addr(struct nvm_geo *geo, 367static inline struct ppa_addr linear_to_generic_addr(struct nvm_geo *geo,
371 struct ppa_addr r) 368 u64 pba)
372{ 369{
373 struct ppa_addr l; 370 struct ppa_addr l;
374 int secs, pgs, blks, luns; 371 int secs, pgs, blks, luns;
375 sector_t ppa = r.ppa; 372 sector_t ppa = pba;
376 373
377 l.ppa = 0; 374 l.ppa = 0;
378 375
@@ -465,8 +462,7 @@ static inline int ppa_to_slc(struct nvm_dev *dev, int slc_pg)
465 462
466typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *); 463typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *);
467typedef sector_t (nvm_tgt_capacity_fn)(void *); 464typedef sector_t (nvm_tgt_capacity_fn)(void *);
468typedef void *(nvm_tgt_init_fn)(struct nvm_tgt_dev *, struct gendisk *, 465typedef void *(nvm_tgt_init_fn)(struct nvm_tgt_dev *, struct gendisk *);
469 struct list_head *lun_list);
470typedef void (nvm_tgt_exit_fn)(void *); 466typedef void (nvm_tgt_exit_fn)(void *);
471 467
472struct nvm_tgt_type { 468struct nvm_tgt_type {
@@ -499,10 +495,11 @@ typedef void (nvmm_unregister_fn)(struct nvm_dev *);
499 495
500typedef int (nvmm_create_tgt_fn)(struct nvm_dev *, struct nvm_ioctl_create *); 496typedef int (nvmm_create_tgt_fn)(struct nvm_dev *, struct nvm_ioctl_create *);
501typedef int (nvmm_remove_tgt_fn)(struct nvm_dev *, struct nvm_ioctl_remove *); 497typedef int (nvmm_remove_tgt_fn)(struct nvm_dev *, struct nvm_ioctl_remove *);
502typedef int (nvmm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *); 498typedef int (nvmm_submit_io_fn)(struct nvm_tgt_dev *, struct nvm_rq *);
503typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct ppa_addr *, int); 499typedef int (nvmm_erase_blk_fn)(struct nvm_tgt_dev *, struct ppa_addr *, int);
504typedef int (nvmm_get_area_fn)(struct nvm_dev *, sector_t *, sector_t); 500typedef int (nvmm_get_area_fn)(struct nvm_dev *, sector_t *, sector_t);
505typedef void (nvmm_put_area_fn)(struct nvm_dev *, sector_t); 501typedef void (nvmm_put_area_fn)(struct nvm_dev *, sector_t);
502typedef void (nvmm_part_to_tgt_fn)(struct nvm_dev *, sector_t*, int);
506 503
507struct nvmm_type { 504struct nvmm_type {
508 const char *name; 505 const char *name;
@@ -520,6 +517,8 @@ struct nvmm_type {
520 nvmm_get_area_fn *get_area; 517 nvmm_get_area_fn *get_area;
521 nvmm_put_area_fn *put_area; 518 nvmm_put_area_fn *put_area;
522 519
520 nvmm_part_to_tgt_fn *part_to_tgt;
521
523 struct list_head list; 522 struct list_head list;
524}; 523};
525 524
@@ -533,14 +532,18 @@ extern void nvm_unregister(struct nvm_dev *);
533extern int nvm_set_bb_tbl(struct nvm_dev *dev, struct ppa_addr *ppas, 532extern int nvm_set_bb_tbl(struct nvm_dev *dev, struct ppa_addr *ppas,
534 int nr_ppas, int type); 533 int nr_ppas, int type);
535 534
536extern int nvm_submit_io(struct nvm_dev *, struct nvm_rq *); 535extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *);
537extern void nvm_generic_to_addr_mode(struct nvm_dev *, struct nvm_rq *); 536extern void nvm_generic_to_addr_mode(struct nvm_dev *, struct nvm_rq *);
538extern void nvm_addr_to_generic_mode(struct nvm_dev *, struct nvm_rq *); 537extern void nvm_addr_to_generic_mode(struct nvm_dev *, struct nvm_rq *);
539extern int nvm_set_rqd_ppalist(struct nvm_dev *, struct nvm_rq *, 538extern int nvm_set_rqd_ppalist(struct nvm_dev *, struct nvm_rq *,
540 const struct ppa_addr *, int, int); 539 const struct ppa_addr *, int, int);
541extern void nvm_free_rqd_ppalist(struct nvm_dev *, struct nvm_rq *); 540extern void nvm_free_rqd_ppalist(struct nvm_dev *, struct nvm_rq *);
542extern int nvm_erase_ppa(struct nvm_dev *, struct ppa_addr *, int, int); 541extern int nvm_erase_ppa(struct nvm_dev *, struct ppa_addr *, int, int);
543extern int nvm_erase_blk(struct nvm_dev *, struct ppa_addr *, int); 542extern int nvm_erase_blk(struct nvm_tgt_dev *, struct ppa_addr *, int);
543extern int nvm_get_l2p_tbl(struct nvm_dev *, u64, u32, nvm_l2p_update_fn *,
544 void *);
545extern int nvm_get_area(struct nvm_dev *, sector_t *, sector_t);
546extern void nvm_put_area(struct nvm_dev *, sector_t);
544extern void nvm_end_io(struct nvm_rq *, int); 547extern void nvm_end_io(struct nvm_rq *, int);
545extern int nvm_submit_ppa(struct nvm_dev *, struct ppa_addr *, int, int, int, 548extern int nvm_submit_ppa(struct nvm_dev *, struct ppa_addr *, int, int, int,
546 void *, int); 549 void *, int);