aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/lightnvm/core.c
diff options
context:
space:
mode:
authorMatias Bjørling <matias@cnexlabs.com>2017-01-31 07:17:09 -0500
committerJens Axboe <axboe@fb.com>2017-01-31 10:32:13 -0500
commitade69e2432b795c76653e1dfa09c684549826a50 (patch)
tree7b1bdff11f23c803c5047c4fd973e3698da8e933 /drivers/lightnvm/core.c
parent400f73b23f457a82288814e21af57dbc9f3f2afd (diff)
lightnvm: merge gennvm with core
For the first iteration of Open-Channel SSDs, it was anticipated that there could be various media managers on top of an open-channel SSD, such to allow vendors to plug in their own host-side FTLs, without the media manager in between. Now that an Open-Channel SSD is exposed as a traditional block device, there is no longer a need for this. Therefore lets merge the gennvm code with core and simplify the stack. Signed-off-by: Matias Bjørling <matias@cnexlabs.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers/lightnvm/core.c')
-rw-r--r--drivers/lightnvm/core.c786
1 files changed, 595 insertions, 191 deletions
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 02240a0b39c9..e9a495650dd0 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -29,10 +29,492 @@
29 29
30static LIST_HEAD(nvm_tgt_types); 30static LIST_HEAD(nvm_tgt_types);
31static DECLARE_RWSEM(nvm_tgtt_lock); 31static DECLARE_RWSEM(nvm_tgtt_lock);
32static LIST_HEAD(nvm_mgrs);
33static LIST_HEAD(nvm_devices); 32static LIST_HEAD(nvm_devices);
34static DECLARE_RWSEM(nvm_lock); 33static DECLARE_RWSEM(nvm_lock);
35 34
35/* Map between virtual and physical channel and lun */
36struct nvm_ch_map {
37 int ch_off;
38 int nr_luns;
39 int *lun_offs;
40};
41
42struct nvm_dev_map {
43 struct nvm_ch_map *chnls;
44 int nr_chnls;
45};
46
47struct nvm_area {
48 struct list_head list;
49 sector_t begin;
50 sector_t end; /* end is excluded */
51};
52
53enum {
54 TRANS_TGT_TO_DEV = 0x0,
55 TRANS_DEV_TO_TGT = 0x1,
56};
57
58static struct nvm_target *nvm_find_target(struct nvm_dev *dev, const char *name)
59{
60 struct nvm_target *tgt;
61
62 list_for_each_entry(tgt, &dev->targets, list)
63 if (!strcmp(name, tgt->disk->disk_name))
64 return tgt;
65
66 return NULL;
67}
68
69static int nvm_reserve_luns(struct nvm_dev *dev, int lun_begin, int lun_end)
70{
71 int i;
72
73 for (i = lun_begin; i <= lun_end; i++) {
74 if (test_and_set_bit(i, dev->lun_map)) {
75 pr_err("nvm: lun %d already allocated\n", i);
76 goto err;
77 }
78 }
79
80 return 0;
81err:
82 while (--i > lun_begin)
83 clear_bit(i, dev->lun_map);
84
85 return -EBUSY;
86}
87
88static void nvm_release_luns_err(struct nvm_dev *dev, int lun_begin,
89 int lun_end)
90{
91 int i;
92
93 for (i = lun_begin; i <= lun_end; i++)
94 WARN_ON(!test_and_clear_bit(i, dev->lun_map));
95}
96
97static void nvm_remove_tgt_dev(struct nvm_tgt_dev *tgt_dev)
98{
99 struct nvm_dev *dev = tgt_dev->parent;
100 struct nvm_dev_map *dev_map = tgt_dev->map;
101 int i, j;
102
103 for (i = 0; i < dev_map->nr_chnls; i++) {
104 struct nvm_ch_map *ch_map = &dev_map->chnls[i];
105 int *lun_offs = ch_map->lun_offs;
106 int ch = i + ch_map->ch_off;
107
108 for (j = 0; j < ch_map->nr_luns; j++) {
109 int lun = j + lun_offs[j];
110 int lunid = (ch * dev->geo.luns_per_chnl) + lun;
111
112 WARN_ON(!test_and_clear_bit(lunid, dev->lun_map));
113 }
114
115 kfree(ch_map->lun_offs);
116 }
117
118 kfree(dev_map->chnls);
119 kfree(dev_map);
120
121 kfree(tgt_dev->luns);
122 kfree(tgt_dev);
123}
124
125static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev,
126 int lun_begin, int lun_end)
127{
128 struct nvm_tgt_dev *tgt_dev = NULL;
129 struct nvm_dev_map *dev_rmap = dev->rmap;
130 struct nvm_dev_map *dev_map;
131 struct ppa_addr *luns;
132 int nr_luns = lun_end - lun_begin + 1;
133 int luns_left = nr_luns;
134 int nr_chnls = nr_luns / dev->geo.luns_per_chnl;
135 int nr_chnls_mod = nr_luns % dev->geo.luns_per_chnl;
136 int bch = lun_begin / dev->geo.luns_per_chnl;
137 int blun = lun_begin % dev->geo.luns_per_chnl;
138 int lunid = 0;
139 int lun_balanced = 1;
140 int prev_nr_luns;
141 int i, j;
142
143 nr_chnls = nr_luns / dev->geo.luns_per_chnl;
144 nr_chnls = (nr_chnls_mod == 0) ? nr_chnls : nr_chnls + 1;
145
146 dev_map = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL);
147 if (!dev_map)
148 goto err_dev;
149
150 dev_map->chnls = kcalloc(nr_chnls, sizeof(struct nvm_ch_map),
151 GFP_KERNEL);
152 if (!dev_map->chnls)
153 goto err_chnls;
154
155 luns = kcalloc(nr_luns, sizeof(struct ppa_addr), GFP_KERNEL);
156 if (!luns)
157 goto err_luns;
158
159 prev_nr_luns = (luns_left > dev->geo.luns_per_chnl) ?
160 dev->geo.luns_per_chnl : luns_left;
161 for (i = 0; i < nr_chnls; i++) {
162 struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[i + bch];
163 int *lun_roffs = ch_rmap->lun_offs;
164 struct nvm_ch_map *ch_map = &dev_map->chnls[i];
165 int *lun_offs;
166 int luns_in_chnl = (luns_left > dev->geo.luns_per_chnl) ?
167 dev->geo.luns_per_chnl : luns_left;
168
169 if (lun_balanced && prev_nr_luns != luns_in_chnl)
170 lun_balanced = 0;
171
172 ch_map->ch_off = ch_rmap->ch_off = bch;
173 ch_map->nr_luns = luns_in_chnl;
174
175 lun_offs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
176 if (!lun_offs)
177 goto err_ch;
178
179 for (j = 0; j < luns_in_chnl; j++) {
180 luns[lunid].ppa = 0;
181 luns[lunid].g.ch = i;
182 luns[lunid++].g.lun = j;
183
184 lun_offs[j] = blun;
185 lun_roffs[j + blun] = blun;
186 }
187
188 ch_map->lun_offs = lun_offs;
189
190 /* when starting a new channel, lun offset is reset */
191 blun = 0;
192 luns_left -= luns_in_chnl;
193 }
194
195 dev_map->nr_chnls = nr_chnls;
196
197 tgt_dev = kmalloc(sizeof(struct nvm_tgt_dev), GFP_KERNEL);
198 if (!tgt_dev)
199 goto err_ch;
200
201 memcpy(&tgt_dev->geo, &dev->geo, sizeof(struct nvm_geo));
202 /* Target device only owns a portion of the physical device */
203 tgt_dev->geo.nr_chnls = nr_chnls;
204 tgt_dev->geo.nr_luns = nr_luns;
205 tgt_dev->geo.luns_per_chnl = (lun_balanced) ? prev_nr_luns : -1;
206 tgt_dev->total_secs = nr_luns * tgt_dev->geo.sec_per_lun;
207 tgt_dev->q = dev->q;
208 tgt_dev->map = dev_map;
209 tgt_dev->luns = luns;
210 memcpy(&tgt_dev->identity, &dev->identity, sizeof(struct nvm_id));
211
212 tgt_dev->parent = dev;
213
214 return tgt_dev;
215err_ch:
216 while (--i > 0)
217 kfree(dev_map->chnls[i].lun_offs);
218 kfree(luns);
219err_luns:
220 kfree(dev_map->chnls);
221err_chnls:
222 kfree(dev_map);
223err_dev:
224 return tgt_dev;
225}
226
227static const struct block_device_operations nvm_fops = {
228 .owner = THIS_MODULE,
229};
230
231static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
232{
233 struct nvm_ioctl_create_simple *s = &create->conf.s;
234 struct request_queue *tqueue;
235 struct gendisk *tdisk;
236 struct nvm_tgt_type *tt;
237 struct nvm_target *t;
238 struct nvm_tgt_dev *tgt_dev;
239 void *targetdata;
240
241 tt = nvm_find_target_type(create->tgttype, 1);
242 if (!tt) {
243 pr_err("nvm: target type %s not found\n", create->tgttype);
244 return -EINVAL;
245 }
246
247 mutex_lock(&dev->mlock);
248 t = nvm_find_target(dev, create->tgtname);
249 if (t) {
250 pr_err("nvm: target name already exists.\n");
251 mutex_unlock(&dev->mlock);
252 return -EINVAL;
253 }
254 mutex_unlock(&dev->mlock);
255
256 if (nvm_reserve_luns(dev, s->lun_begin, s->lun_end))
257 return -ENOMEM;
258
259 t = kmalloc(sizeof(struct nvm_target), GFP_KERNEL);
260 if (!t)
261 goto err_reserve;
262
263 tgt_dev = nvm_create_tgt_dev(dev, s->lun_begin, s->lun_end);
264 if (!tgt_dev) {
265 pr_err("nvm: could not create target device\n");
266 goto err_t;
267 }
268
269 tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node);
270 if (!tqueue)
271 goto err_dev;
272 blk_queue_make_request(tqueue, tt->make_rq);
273
274 tdisk = alloc_disk(0);
275 if (!tdisk)
276 goto err_queue;
277
278 sprintf(tdisk->disk_name, "%s", create->tgtname);
279 tdisk->flags = GENHD_FL_EXT_DEVT;
280 tdisk->major = 0;
281 tdisk->first_minor = 0;
282 tdisk->fops = &nvm_fops;
283 tdisk->queue = tqueue;
284
285 targetdata = tt->init(tgt_dev, tdisk);
286 if (IS_ERR(targetdata))
287 goto err_init;
288
289 tdisk->private_data = targetdata;
290 tqueue->queuedata = targetdata;
291
292 blk_queue_max_hw_sectors(tqueue, 8 * dev->ops->max_phys_sect);
293
294 set_capacity(tdisk, tt->capacity(targetdata));
295 add_disk(tdisk);
296
297 t->type = tt;
298 t->disk = tdisk;
299 t->dev = tgt_dev;
300
301 mutex_lock(&dev->mlock);
302 list_add_tail(&t->list, &dev->targets);
303 mutex_unlock(&dev->mlock);
304
305 return 0;
306err_init:
307 put_disk(tdisk);
308err_queue:
309 blk_cleanup_queue(tqueue);
310err_dev:
311 kfree(tgt_dev);
312err_t:
313 kfree(t);
314err_reserve:
315 nvm_release_luns_err(dev, s->lun_begin, s->lun_end);
316 return -ENOMEM;
317}
318
319static void __nvm_remove_target(struct nvm_target *t)
320{
321 struct nvm_tgt_type *tt = t->type;
322 struct gendisk *tdisk = t->disk;
323 struct request_queue *q = tdisk->queue;
324
325 del_gendisk(tdisk);
326 blk_cleanup_queue(q);
327
328 if (tt->exit)
329 tt->exit(tdisk->private_data);
330
331 nvm_remove_tgt_dev(t->dev);
332 put_disk(tdisk);
333
334 list_del(&t->list);
335 kfree(t);
336}
337
338/**
339 * nvm_remove_tgt - Removes a target from the media manager
340 * @dev: device
341 * @remove: ioctl structure with target name to remove.
342 *
343 * Returns:
344 * 0: on success
345 * 1: on not found
346 * <0: on error
347 */
348static int nvm_remove_tgt(struct nvm_dev *dev, struct nvm_ioctl_remove *remove)
349{
350 struct nvm_target *t;
351
352 mutex_lock(&dev->mlock);
353 t = nvm_find_target(dev, remove->tgtname);
354 if (!t) {
355 mutex_unlock(&dev->mlock);
356 return 1;
357 }
358 __nvm_remove_target(t);
359 mutex_unlock(&dev->mlock);
360
361 return 0;
362}
363
364static int nvm_register_map(struct nvm_dev *dev)
365{
366 struct nvm_dev_map *rmap;
367 int i, j;
368
369 rmap = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL);
370 if (!rmap)
371 goto err_rmap;
372
373 rmap->chnls = kcalloc(dev->geo.nr_chnls, sizeof(struct nvm_ch_map),
374 GFP_KERNEL);
375 if (!rmap->chnls)
376 goto err_chnls;
377
378 for (i = 0; i < dev->geo.nr_chnls; i++) {
379 struct nvm_ch_map *ch_rmap;
380 int *lun_roffs;
381 int luns_in_chnl = dev->geo.luns_per_chnl;
382
383 ch_rmap = &rmap->chnls[i];
384
385 ch_rmap->ch_off = -1;
386 ch_rmap->nr_luns = luns_in_chnl;
387
388 lun_roffs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
389 if (!lun_roffs)
390 goto err_ch;
391
392 for (j = 0; j < luns_in_chnl; j++)
393 lun_roffs[j] = -1;
394
395 ch_rmap->lun_offs = lun_roffs;
396 }
397
398 dev->rmap = rmap;
399
400 return 0;
401err_ch:
402 while (--i >= 0)
403 kfree(rmap->chnls[i].lun_offs);
404err_chnls:
405 kfree(rmap);
406err_rmap:
407 return -ENOMEM;
408}
409
410static int nvm_map_to_dev(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
411{
412 struct nvm_dev_map *dev_map = tgt_dev->map;
413 struct nvm_ch_map *ch_map = &dev_map->chnls[p->g.ch];
414 int lun_off = ch_map->lun_offs[p->g.lun];
415 struct nvm_dev *dev = tgt_dev->parent;
416 struct nvm_dev_map *dev_rmap = dev->rmap;
417 struct nvm_ch_map *ch_rmap;
418 int lun_roff;
419
420 p->g.ch += ch_map->ch_off;
421 p->g.lun += lun_off;
422
423 ch_rmap = &dev_rmap->chnls[p->g.ch];
424 lun_roff = ch_rmap->lun_offs[p->g.lun];
425
426 if (unlikely(ch_rmap->ch_off < 0 || lun_roff < 0)) {
427 pr_err("nvm: corrupted device partition table\n");
428 return -EINVAL;
429 }
430
431 return 0;
432}
433
434static int nvm_map_to_tgt(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
435{
436 struct nvm_dev *dev = tgt_dev->parent;
437 struct nvm_dev_map *dev_rmap = dev->rmap;
438 struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[p->g.ch];
439 int lun_roff = ch_rmap->lun_offs[p->g.lun];
440
441 p->g.ch -= ch_rmap->ch_off;
442 p->g.lun -= lun_roff;
443
444 return 0;
445}
446
447static int nvm_trans_rq(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
448 int flag)
449{
450 int i;
451 int ret;
452
453 if (rqd->nr_ppas == 1) {
454 if (flag == TRANS_TGT_TO_DEV)
455 return nvm_map_to_dev(tgt_dev, &rqd->ppa_addr);
456 else
457 return nvm_map_to_tgt(tgt_dev, &rqd->ppa_addr);
458 }
459
460 for (i = 0; i < rqd->nr_ppas; i++) {
461 if (flag == TRANS_TGT_TO_DEV)
462 ret = nvm_map_to_dev(tgt_dev, &rqd->ppa_list[i]);
463 else
464 ret = nvm_map_to_tgt(tgt_dev, &rqd->ppa_list[i]);
465
466 if (ret)
467 break;
468 }
469
470 return ret;
471}
472
473static struct ppa_addr nvm_trans_ppa(struct nvm_tgt_dev *tgt_dev,
474 struct ppa_addr p, int dir)
475{
476 struct ppa_addr ppa = p;
477
478 if (dir == TRANS_TGT_TO_DEV)
479 nvm_map_to_dev(tgt_dev, &ppa);
480 else
481 nvm_map_to_tgt(tgt_dev, &ppa);
482
483 return ppa;
484}
485
486void nvm_part_to_tgt(struct nvm_dev *dev, sector_t *entries,
487 int len)
488{
489 struct nvm_geo *geo = &dev->geo;
490 struct nvm_dev_map *dev_rmap = dev->rmap;
491 u64 i;
492
493 for (i = 0; i < len; i++) {
494 struct nvm_ch_map *ch_rmap;
495 int *lun_roffs;
496 struct ppa_addr gaddr;
497 u64 pba = le64_to_cpu(entries[i]);
498 int off;
499 u64 diff;
500
501 if (!pba)
502 continue;
503
504 gaddr = linear_to_generic_addr(geo, pba);
505 ch_rmap = &dev_rmap->chnls[gaddr.g.ch];
506 lun_roffs = ch_rmap->lun_offs;
507
508 off = gaddr.g.ch * geo->luns_per_chnl + gaddr.g.lun;
509
510 diff = ((ch_rmap->ch_off * geo->luns_per_chnl) +
511 (lun_roffs[gaddr.g.lun])) * geo->sec_per_lun;
512
513 entries[i] -= cpu_to_le64(diff);
514 }
515}
516EXPORT_SYMBOL(nvm_part_to_tgt);
517
36struct nvm_tgt_type *nvm_find_target_type(const char *name, int lock) 518struct nvm_tgt_type *nvm_find_target_type(const char *name, int lock)
37{ 519{
38 struct nvm_tgt_type *tmp, *tt = NULL; 520 struct nvm_tgt_type *tmp, *tt = NULL;
@@ -92,78 +574,6 @@ void nvm_dev_dma_free(struct nvm_dev *dev, void *addr, dma_addr_t dma_handler)
92} 574}
93EXPORT_SYMBOL(nvm_dev_dma_free); 575EXPORT_SYMBOL(nvm_dev_dma_free);
94 576
95static struct nvmm_type *nvm_find_mgr_type(const char *name)
96{
97 struct nvmm_type *mt;
98
99 list_for_each_entry(mt, &nvm_mgrs, list)
100 if (!strcmp(name, mt->name))
101 return mt;
102
103 return NULL;
104}
105
106static struct nvmm_type *nvm_init_mgr(struct nvm_dev *dev)
107{
108 struct nvmm_type *mt;
109 int ret;
110
111 lockdep_assert_held(&nvm_lock);
112
113 list_for_each_entry(mt, &nvm_mgrs, list) {
114 if (strncmp(dev->sb.mmtype, mt->name, NVM_MMTYPE_LEN))
115 continue;
116
117 ret = mt->register_mgr(dev);
118 if (ret < 0) {
119 pr_err("nvm: media mgr failed to init (%d) on dev %s\n",
120 ret, dev->name);
121 return NULL; /* initialization failed */
122 } else if (ret > 0)
123 return mt;
124 }
125
126 return NULL;
127}
128
129int nvm_register_mgr(struct nvmm_type *mt)
130{
131 struct nvm_dev *dev;
132 int ret = 0;
133
134 down_write(&nvm_lock);
135 if (nvm_find_mgr_type(mt->name)) {
136 ret = -EEXIST;
137 goto finish;
138 } else {
139 list_add(&mt->list, &nvm_mgrs);
140 }
141
142 /* try to register media mgr if any device have none configured */
143 list_for_each_entry(dev, &nvm_devices, devices) {
144 if (dev->mt)
145 continue;
146
147 dev->mt = nvm_init_mgr(dev);
148 }
149finish:
150 up_write(&nvm_lock);
151
152 return ret;
153}
154EXPORT_SYMBOL(nvm_register_mgr);
155
156void nvm_unregister_mgr(struct nvmm_type *mt)
157{
158 if (!mt)
159 return;
160
161 down_write(&nvm_lock);
162 list_del(&mt->list);
163 up_write(&nvm_lock);
164}
165EXPORT_SYMBOL(nvm_unregister_mgr);
166
167static struct nvm_dev *nvm_find_nvm_dev(const char *name) 577static struct nvm_dev *nvm_find_nvm_dev(const char *name)
168{ 578{
169 struct nvm_dev *dev; 579 struct nvm_dev *dev;
@@ -183,13 +593,13 @@ static void nvm_tgt_generic_to_addr_mode(struct nvm_tgt_dev *tgt_dev,
183 593
184 if (rqd->nr_ppas > 1) { 594 if (rqd->nr_ppas > 1) {
185 for (i = 0; i < rqd->nr_ppas; i++) { 595 for (i = 0; i < rqd->nr_ppas; i++) {
186 rqd->ppa_list[i] = dev->mt->trans_ppa(tgt_dev, 596 rqd->ppa_list[i] = nvm_trans_ppa(tgt_dev,
187 rqd->ppa_list[i], TRANS_TGT_TO_DEV); 597 rqd->ppa_list[i], TRANS_TGT_TO_DEV);
188 rqd->ppa_list[i] = generic_to_dev_addr(dev, 598 rqd->ppa_list[i] = generic_to_dev_addr(dev,
189 rqd->ppa_list[i]); 599 rqd->ppa_list[i]);
190 } 600 }
191 } else { 601 } else {
192 rqd->ppa_addr = dev->mt->trans_ppa(tgt_dev, rqd->ppa_addr, 602 rqd->ppa_addr = nvm_trans_ppa(tgt_dev, rqd->ppa_addr,
193 TRANS_TGT_TO_DEV); 603 TRANS_TGT_TO_DEV);
194 rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr); 604 rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr);
195 } 605 }
@@ -242,7 +652,7 @@ int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
242 ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type); 652 ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
243 nvm_free_rqd_ppalist(dev, &rqd); 653 nvm_free_rqd_ppalist(dev, &rqd);
244 if (ret) { 654 if (ret) {
245 pr_err("nvm: sysblk failed bb mark\n"); 655 pr_err("nvm: failed bb mark\n");
246 return -EINVAL; 656 return -EINVAL;
247 } 657 }
248 658
@@ -262,15 +672,23 @@ int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
262{ 672{
263 struct nvm_dev *dev = tgt_dev->parent; 673 struct nvm_dev *dev = tgt_dev->parent;
264 674
265 return dev->mt->submit_io(tgt_dev, rqd); 675 if (!dev->ops->submit_io)
676 return -ENODEV;
677
678 /* Convert address space */
679 nvm_generic_to_addr_mode(dev, rqd);
680
681 rqd->dev = tgt_dev;
682 return dev->ops->submit_io(dev, rqd);
266} 683}
267EXPORT_SYMBOL(nvm_submit_io); 684EXPORT_SYMBOL(nvm_submit_io);
268 685
269int nvm_erase_blk(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p, int flags) 686int nvm_erase_blk(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p, int flags)
270{ 687{
271 struct nvm_dev *dev = tgt_dev->parent; 688 /* Convert address space */
689 nvm_map_to_dev(tgt_dev, p);
272 690
273 return dev->mt->erase_blk(tgt_dev, p, flags); 691 return nvm_erase_ppa(tgt_dev->parent, p, 1, flags);
274} 692}
275EXPORT_SYMBOL(nvm_erase_blk); 693EXPORT_SYMBOL(nvm_erase_blk);
276 694
@@ -289,16 +707,65 @@ EXPORT_SYMBOL(nvm_get_l2p_tbl);
289int nvm_get_area(struct nvm_tgt_dev *tgt_dev, sector_t *lba, sector_t len) 707int nvm_get_area(struct nvm_tgt_dev *tgt_dev, sector_t *lba, sector_t len)
290{ 708{
291 struct nvm_dev *dev = tgt_dev->parent; 709 struct nvm_dev *dev = tgt_dev->parent;
710 struct nvm_geo *geo = &dev->geo;
711 struct nvm_area *area, *prev, *next;
712 sector_t begin = 0;
713 sector_t max_sectors = (geo->sec_size * dev->total_secs) >> 9;
714
715 if (len > max_sectors)
716 return -EINVAL;
717
718 area = kmalloc(sizeof(struct nvm_area), GFP_KERNEL);
719 if (!area)
720 return -ENOMEM;
721
722 prev = NULL;
723
724 spin_lock(&dev->lock);
725 list_for_each_entry(next, &dev->area_list, list) {
726 if (begin + len > next->begin) {
727 begin = next->end;
728 prev = next;
729 continue;
730 }
731 break;
732 }
733
734 if ((begin + len) > max_sectors) {
735 spin_unlock(&dev->lock);
736 kfree(area);
737 return -EINVAL;
738 }
292 739
293 return dev->mt->get_area(dev, lba, len); 740 area->begin = *lba = begin;
741 area->end = begin + len;
742
743 if (prev) /* insert into sorted order */
744 list_add(&area->list, &prev->list);
745 else
746 list_add(&area->list, &dev->area_list);
747 spin_unlock(&dev->lock);
748
749 return 0;
294} 750}
295EXPORT_SYMBOL(nvm_get_area); 751EXPORT_SYMBOL(nvm_get_area);
296 752
297void nvm_put_area(struct nvm_tgt_dev *tgt_dev, sector_t lba) 753void nvm_put_area(struct nvm_tgt_dev *tgt_dev, sector_t begin)
298{ 754{
299 struct nvm_dev *dev = tgt_dev->parent; 755 struct nvm_dev *dev = tgt_dev->parent;
756 struct nvm_area *area;
300 757
301 dev->mt->put_area(dev, lba); 758 spin_lock(&dev->lock);
759 list_for_each_entry(area, &dev->area_list, list) {
760 if (area->begin != begin)
761 continue;
762
763 list_del(&area->list);
764 spin_unlock(&dev->lock);
765 kfree(area);
766 return;
767 }
768 spin_unlock(&dev->lock);
302} 769}
303EXPORT_SYMBOL(nvm_put_area); 770EXPORT_SYMBOL(nvm_put_area);
304 771
@@ -409,8 +876,15 @@ EXPORT_SYMBOL(nvm_erase_ppa);
409 876
410void nvm_end_io(struct nvm_rq *rqd, int error) 877void nvm_end_io(struct nvm_rq *rqd, int error)
411{ 878{
879 struct nvm_tgt_dev *tgt_dev = rqd->dev;
880 struct nvm_tgt_instance *ins = rqd->ins;
881
882 /* Convert address space */
883 if (tgt_dev)
884 nvm_trans_rq(tgt_dev, rqd, TRANS_DEV_TO_TGT);
885
412 rqd->error = error; 886 rqd->error = error;
413 rqd->end_io(rqd); 887 ins->tt->end_io(rqd);
414} 888}
415EXPORT_SYMBOL(nvm_end_io); 889EXPORT_SYMBOL(nvm_end_io);
416 890
@@ -570,10 +1044,9 @@ EXPORT_SYMBOL(nvm_get_bb_tbl);
570int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa, 1044int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa,
571 u8 *blks) 1045 u8 *blks)
572{ 1046{
573 struct nvm_dev *dev = tgt_dev->parent; 1047 ppa = nvm_trans_ppa(tgt_dev, ppa, TRANS_TGT_TO_DEV);
574 1048
575 ppa = dev->mt->trans_ppa(tgt_dev, ppa, TRANS_TGT_TO_DEV); 1049 return nvm_get_bb_tbl(tgt_dev->parent, ppa, blks);
576 return nvm_get_bb_tbl(dev, ppa, blks);
577} 1050}
578EXPORT_SYMBOL(nvm_get_tgt_bb_tbl); 1051EXPORT_SYMBOL(nvm_get_tgt_bb_tbl);
579 1052
@@ -691,36 +1164,31 @@ static int nvm_core_init(struct nvm_dev *dev)
691 goto err_fmtype; 1164 goto err_fmtype;
692 } 1165 }
693 1166
1167 INIT_LIST_HEAD(&dev->area_list);
1168 INIT_LIST_HEAD(&dev->targets);
694 mutex_init(&dev->mlock); 1169 mutex_init(&dev->mlock);
695 spin_lock_init(&dev->lock); 1170 spin_lock_init(&dev->lock);
696 1171
697 blk_queue_logical_block_size(dev->q, geo->sec_size); 1172 ret = nvm_register_map(dev);
1173 if (ret)
1174 goto err_fmtype;
698 1175
1176 blk_queue_logical_block_size(dev->q, geo->sec_size);
699 return 0; 1177 return 0;
700err_fmtype: 1178err_fmtype:
701 kfree(dev->lun_map); 1179 kfree(dev->lun_map);
702 return ret; 1180 return ret;
703} 1181}
704 1182
705static void nvm_free_mgr(struct nvm_dev *dev)
706{
707 if (!dev->mt)
708 return;
709
710 dev->mt->unregister_mgr(dev);
711 dev->mt = NULL;
712}
713
714void nvm_free(struct nvm_dev *dev) 1183void nvm_free(struct nvm_dev *dev)
715{ 1184{
716 if (!dev) 1185 if (!dev)
717 return; 1186 return;
718 1187
719 nvm_free_mgr(dev);
720
721 if (dev->dma_pool) 1188 if (dev->dma_pool)
722 dev->ops->destroy_dma_pool(dev->dma_pool); 1189 dev->ops->destroy_dma_pool(dev->dma_pool);
723 1190
1191 kfree(dev->rmap);
724 kfree(dev->lptbl); 1192 kfree(dev->lptbl);
725 kfree(dev->lun_map); 1193 kfree(dev->lun_map);
726 kfree(dev); 1194 kfree(dev);
@@ -731,9 +1199,6 @@ static int nvm_init(struct nvm_dev *dev)
731 struct nvm_geo *geo = &dev->geo; 1199 struct nvm_geo *geo = &dev->geo;
732 int ret = -EINVAL; 1200 int ret = -EINVAL;
733 1201
734 if (!dev->q || !dev->ops)
735 return ret;
736
737 if (dev->ops->identity(dev, &dev->identity)) { 1202 if (dev->ops->identity(dev, &dev->identity)) {
738 pr_err("nvm: device could not be identified\n"); 1203 pr_err("nvm: device could not be identified\n");
739 goto err; 1204 goto err;
@@ -779,49 +1244,50 @@ int nvm_register(struct nvm_dev *dev)
779{ 1244{
780 int ret; 1245 int ret;
781 1246
782 ret = nvm_init(dev); 1247 if (!dev->q || !dev->ops)
783 if (ret) 1248 return -EINVAL;
784 goto err_init;
785 1249
786 if (dev->ops->max_phys_sect > 256) { 1250 if (dev->ops->max_phys_sect > 256) {
787 pr_info("nvm: max sectors supported is 256.\n"); 1251 pr_info("nvm: max sectors supported is 256.\n");
788 ret = -EINVAL; 1252 return -EINVAL;
789 goto err_init;
790 } 1253 }
791 1254
792 if (dev->ops->max_phys_sect > 1) { 1255 if (dev->ops->max_phys_sect > 1) {
793 dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist"); 1256 dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist");
794 if (!dev->dma_pool) { 1257 if (!dev->dma_pool) {
795 pr_err("nvm: could not create dma pool\n"); 1258 pr_err("nvm: could not create dma pool\n");
796 ret = -ENOMEM; 1259 return -ENOMEM;
797 goto err_init;
798 } 1260 }
799 } 1261 }
800 1262
801 if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) { 1263 ret = nvm_init(dev);
802 ret = nvm_get_sysblock(dev, &dev->sb); 1264 if (ret)
803 if (!ret) 1265 goto err_init;
804 pr_err("nvm: device not initialized.\n");
805 else if (ret < 0)
806 pr_err("nvm: err (%d) on device initialization\n", ret);
807 }
808 1266
809 /* register device with a supported media manager */ 1267 /* register device with a supported media manager */
810 down_write(&nvm_lock); 1268 down_write(&nvm_lock);
811 if (ret > 0)
812 dev->mt = nvm_init_mgr(dev);
813 list_add(&dev->devices, &nvm_devices); 1269 list_add(&dev->devices, &nvm_devices);
814 up_write(&nvm_lock); 1270 up_write(&nvm_lock);
815 1271
816 return 0; 1272 return 0;
817err_init: 1273err_init:
818 kfree(dev->lun_map); 1274 dev->ops->destroy_dma_pool(dev->dma_pool);
819 return ret; 1275 return ret;
820} 1276}
821EXPORT_SYMBOL(nvm_register); 1277EXPORT_SYMBOL(nvm_register);
822 1278
823void nvm_unregister(struct nvm_dev *dev) 1279void nvm_unregister(struct nvm_dev *dev)
824{ 1280{
1281 struct nvm_target *t, *tmp;
1282
1283 mutex_lock(&dev->mlock);
1284 list_for_each_entry_safe(t, tmp, &dev->targets, list) {
1285 if (t->dev->parent != dev)
1286 continue;
1287 __nvm_remove_target(t);
1288 }
1289 mutex_unlock(&dev->mlock);
1290
825 down_write(&nvm_lock); 1291 down_write(&nvm_lock);
826 list_del(&dev->devices); 1292 list_del(&dev->devices);
827 up_write(&nvm_lock); 1293 up_write(&nvm_lock);
@@ -844,11 +1310,6 @@ static int __nvm_configure_create(struct nvm_ioctl_create *create)
844 return -EINVAL; 1310 return -EINVAL;
845 } 1311 }
846 1312
847 if (!dev->mt) {
848 pr_info("nvm: device has no media manager registered.\n");
849 return -ENODEV;
850 }
851
852 if (create->conf.type != NVM_CONFIG_TYPE_SIMPLE) { 1313 if (create->conf.type != NVM_CONFIG_TYPE_SIMPLE) {
853 pr_err("nvm: config type not valid\n"); 1314 pr_err("nvm: config type not valid\n");
854 return -EINVAL; 1315 return -EINVAL;
@@ -861,7 +1322,7 @@ static int __nvm_configure_create(struct nvm_ioctl_create *create)
861 return -EINVAL; 1322 return -EINVAL;
862 } 1323 }
863 1324
864 return dev->mt->create_tgt(dev, create); 1325 return nvm_create_tgt(dev, create);
865} 1326}
866 1327
867static long nvm_ioctl_info(struct file *file, void __user *arg) 1328static long nvm_ioctl_info(struct file *file, void __user *arg)
@@ -923,16 +1384,14 @@ static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
923 struct nvm_ioctl_device_info *info = &devices->info[i]; 1384 struct nvm_ioctl_device_info *info = &devices->info[i];
924 1385
925 sprintf(info->devname, "%s", dev->name); 1386 sprintf(info->devname, "%s", dev->name);
926 if (dev->mt) {
927 info->bmversion[0] = dev->mt->version[0];
928 info->bmversion[1] = dev->mt->version[1];
929 info->bmversion[2] = dev->mt->version[2];
930 sprintf(info->bmname, "%s", dev->mt->name);
931 } else {
932 sprintf(info->bmname, "none");
933 }
934 1387
1388 /* kept for compatibility */
1389 info->bmversion[0] = 1;
1390 info->bmversion[1] = 0;
1391 info->bmversion[2] = 0;
1392 sprintf(info->bmname, "%s", "gennvm");
935 i++; 1393 i++;
1394
936 if (i > 31) { 1395 if (i > 31) {
937 pr_err("nvm: max 31 devices can be reported.\n"); 1396 pr_err("nvm: max 31 devices can be reported.\n");
938 break; 1397 break;
@@ -994,7 +1453,7 @@ static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
994 } 1453 }
995 1454
996 list_for_each_entry(dev, &nvm_devices, devices) { 1455 list_for_each_entry(dev, &nvm_devices, devices) {
997 ret = dev->mt->remove_tgt(dev, &remove); 1456 ret = nvm_remove_tgt(dev, &remove);
998 if (!ret) 1457 if (!ret)
999 break; 1458 break;
1000 } 1459 }
@@ -1002,47 +1461,7 @@ static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
1002 return ret; 1461 return ret;
1003} 1462}
1004 1463
1005static void nvm_setup_nvm_sb_info(struct nvm_sb_info *info) 1464/* kept for compatibility reasons */
1006{
1007 info->seqnr = 1;
1008 info->erase_cnt = 0;
1009 info->version = 1;
1010}
1011
1012static long __nvm_ioctl_dev_init(struct nvm_ioctl_dev_init *init)
1013{
1014 struct nvm_dev *dev;
1015 struct nvm_sb_info info;
1016 int ret;
1017
1018 down_write(&nvm_lock);
1019 dev = nvm_find_nvm_dev(init->dev);
1020 up_write(&nvm_lock);
1021 if (!dev) {
1022 pr_err("nvm: device not found\n");
1023 return -EINVAL;
1024 }
1025
1026 nvm_setup_nvm_sb_info(&info);
1027
1028 strncpy(info.mmtype, init->mmtype, NVM_MMTYPE_LEN);
1029 info.fs_ppa.ppa = -1;
1030
1031 if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) {
1032 ret = nvm_init_sysblock(dev, &info);
1033 if (ret)
1034 return ret;
1035 }
1036
1037 memcpy(&dev->sb, &info, sizeof(struct nvm_sb_info));
1038
1039 down_write(&nvm_lock);
1040 dev->mt = nvm_init_mgr(dev);
1041 up_write(&nvm_lock);
1042
1043 return 0;
1044}
1045
1046static long nvm_ioctl_dev_init(struct file *file, void __user *arg) 1465static long nvm_ioctl_dev_init(struct file *file, void __user *arg)
1047{ 1466{
1048 struct nvm_ioctl_dev_init init; 1467 struct nvm_ioctl_dev_init init;
@@ -1058,15 +1477,13 @@ static long nvm_ioctl_dev_init(struct file *file, void __user *arg)
1058 return -EINVAL; 1477 return -EINVAL;
1059 } 1478 }
1060 1479
1061 init.dev[DISK_NAME_LEN - 1] = '\0'; 1480 return 0;
1062
1063 return __nvm_ioctl_dev_init(&init);
1064} 1481}
1065 1482
1483/* Kept for compatibility reasons */
1066static long nvm_ioctl_dev_factory(struct file *file, void __user *arg) 1484static long nvm_ioctl_dev_factory(struct file *file, void __user *arg)
1067{ 1485{
1068 struct nvm_ioctl_dev_factory fact; 1486 struct nvm_ioctl_dev_factory fact;
1069 struct nvm_dev *dev;
1070 1487
1071 if (!capable(CAP_SYS_ADMIN)) 1488 if (!capable(CAP_SYS_ADMIN))
1072 return -EPERM; 1489 return -EPERM;
@@ -1079,19 +1496,6 @@ static long nvm_ioctl_dev_factory(struct file *file, void __user *arg)
1079 if (fact.flags & ~(NVM_FACTORY_NR_BITS - 1)) 1496 if (fact.flags & ~(NVM_FACTORY_NR_BITS - 1))
1080 return -EINVAL; 1497 return -EINVAL;
1081 1498
1082 down_write(&nvm_lock);
1083 dev = nvm_find_nvm_dev(fact.dev);
1084 up_write(&nvm_lock);
1085 if (!dev) {
1086 pr_err("nvm: device not found\n");
1087 return -EINVAL;
1088 }
1089
1090 nvm_free_mgr(dev);
1091
1092 if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT)
1093 return nvm_dev_factory(dev, fact.flags);
1094
1095 return 0; 1499 return 0;
1096} 1500}
1097 1501