aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMatias Bjørling <m@bjorling.me>2015-10-28 14:54:55 -0400
committerJens Axboe <axboe@fb.com>2015-10-29 03:21:42 -0400
commitcd9e9808d18fe7107c306f6e71c8be7230ee42b4 (patch)
tree1e447cc1019e8128753bbf78712484b8eb8144f4
parentb3975e94f5688691f487ea00126dabe8f5bee3af (diff)
lightnvm: Support for Open-Channel SSDs
Open-channel SSDs are devices that share responsibilities with the host in order to implement and maintain features that typical SSDs keep strictly in firmware. These include (i) the Flash Translation Layer (FTL), (ii) bad block management, and (iii) hardware units such as the flash controller, the interface controller, and large amounts of flash chips. In this way, Open-channels SSDs exposes direct access to their physical flash storage, while keeping a subset of the internal features of SSDs. LightNVM is a specification that gives support to Open-channel SSDs LightNVM allows the host to manage data placement, garbage collection, and parallelism. Device specific responsibilities such as bad block management, FTL extensions to support atomic IOs, or metadata persistence are still handled by the device. The implementation of LightNVM consists of two parts: core and (multiple) targets. The core implements functionality shared across targets. This is initialization, teardown and statistics. The targets implement the interface that exposes physical flash to user-space applications. Examples of such targets include key-value store, object-store, as well as traditional block devices, which can be application-specific. Contributions in this patch from: Javier Gonzalez <jg@lightnvm.io> Dongsheng Yang <yangds.fnst@cn.fujitsu.com> Jesper Madsen <jmad@itu.dk> Signed-off-by: Matias Bjørling <m@bjorling.me> Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r--Documentation/ioctl/ioctl-number.txt1
-rw-r--r--MAINTAINERS8
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/lightnvm/Kconfig28
-rw-r--r--drivers/lightnvm/Makefile5
-rw-r--r--drivers/lightnvm/core.c826
-rw-r--r--include/linux/lightnvm.h522
-rw-r--r--include/uapi/linux/lightnvm.h130
9 files changed, 1523 insertions, 0 deletions
diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt
index df1b25eb8382..8a44d44cf901 100644
--- a/Documentation/ioctl/ioctl-number.txt
+++ b/Documentation/ioctl/ioctl-number.txt
@@ -149,6 +149,7 @@ Code Seq#(hex) Include File Comments
149'K' all linux/kd.h 149'K' all linux/kd.h
150'L' 00-1F linux/loop.h conflict! 150'L' 00-1F linux/loop.h conflict!
151'L' 10-1F drivers/scsi/mpt2sas/mpt2sas_ctl.h conflict! 151'L' 10-1F drivers/scsi/mpt2sas/mpt2sas_ctl.h conflict!
152'L' 20-2F linux/lightnvm.h
152'L' E0-FF linux/ppdd.h encrypted disk device driver 153'L' E0-FF linux/ppdd.h encrypted disk device driver
153 <http://linux01.gwdg.de/~alatham/ppdd.html> 154 <http://linux01.gwdg.de/~alatham/ppdd.html>
154'M' all linux/soundcard.h conflict! 155'M' all linux/soundcard.h conflict!
diff --git a/MAINTAINERS b/MAINTAINERS
index f1d5a59432fc..d8be12c57f84 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -6243,6 +6243,14 @@ F: drivers/nvdimm/pmem.c
6243F: include/linux/pmem.h 6243F: include/linux/pmem.h
6244F: arch/*/include/asm/pmem.h 6244F: arch/*/include/asm/pmem.h
6245 6245
6246LIGHTNVM PLATFORM SUPPORT
6247M: Matias Bjorling <mb@lightnvm.io>
6248W: http://github/OpenChannelSSD
6249S: Maintained
6250F: drivers/lightnvm/
6251F: include/linux/lightnvm.h
6252F: include/uapi/linux/lightnvm.h
6253
6246LINUX FOR IBM pSERIES (RS/6000) 6254LINUX FOR IBM pSERIES (RS/6000)
6247M: Paul Mackerras <paulus@au.ibm.com> 6255M: Paul Mackerras <paulus@au.ibm.com>
6248W: http://www.ibm.com/linux/ltc/projects/ppc 6256W: http://www.ibm.com/linux/ltc/projects/ppc
diff --git a/drivers/Kconfig b/drivers/Kconfig
index e69ec82ac80a..3a5ab4d5873d 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -44,6 +44,8 @@ source "drivers/net/Kconfig"
44 44
45source "drivers/isdn/Kconfig" 45source "drivers/isdn/Kconfig"
46 46
47source "drivers/lightnvm/Kconfig"
48
47# input before char - char/joystick depends on it. As does USB. 49# input before char - char/joystick depends on it. As does USB.
48 50
49source "drivers/input/Kconfig" 51source "drivers/input/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 42f9dd5f07c8..7f1b7c5a1cfd 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -70,6 +70,7 @@ obj-$(CONFIG_NUBUS) += nubus/
70obj-y += macintosh/ 70obj-y += macintosh/
71obj-$(CONFIG_IDE) += ide/ 71obj-$(CONFIG_IDE) += ide/
72obj-$(CONFIG_SCSI) += scsi/ 72obj-$(CONFIG_SCSI) += scsi/
73obj-$(CONFIG_NVM) += lightnvm/
73obj-y += nvme/ 74obj-y += nvme/
74obj-$(CONFIG_ATA) += ata/ 75obj-$(CONFIG_ATA) += ata/
75obj-$(CONFIG_TARGET_CORE) += target/ 76obj-$(CONFIG_TARGET_CORE) += target/
diff --git a/drivers/lightnvm/Kconfig b/drivers/lightnvm/Kconfig
new file mode 100644
index 000000000000..d4f309f127cd
--- /dev/null
+++ b/drivers/lightnvm/Kconfig
@@ -0,0 +1,28 @@
1#
2# Open-Channel SSD NVM configuration
3#
4
5menuconfig NVM
6 bool "Open-Channel SSD target support"
7 depends on BLOCK
8 help
9 Say Y here to get to enable Open-channel SSDs.
10
11 Open-Channel SSDs implement a set of extension to SSDs, that
12 exposes direct access to the underlying non-volatile memory.
13
14 If you say N, all options in this submenu will be skipped and disabled
15 only do this if you know what you are doing.
16
17if NVM
18
19config NVM_DEBUG
20 bool "Open-Channel SSD debugging support"
21 ---help---
22 Exposes a debug management interface to create/remove targets at:
23
24 /sys/module/lnvm/parameters/configure_debug
25
26 It is required to create/remove targets without IOCTLs.
27
28endif # NVM
diff --git a/drivers/lightnvm/Makefile b/drivers/lightnvm/Makefile
new file mode 100644
index 000000000000..38185e990d5d
--- /dev/null
+++ b/drivers/lightnvm/Makefile
@@ -0,0 +1,5 @@
1#
2# Makefile for Open-Channel SSDs.
3#
4
5obj-$(CONFIG_NVM) := core.o
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
new file mode 100644
index 000000000000..f659e605a406
--- /dev/null
+++ b/drivers/lightnvm/core.c
@@ -0,0 +1,826 @@
1/*
2 * Copyright (C) 2015 IT University of Copenhagen. All rights reserved.
3 * Initial release: Matias Bjorling <m@bjorling.me>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version
7 * 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; see the file COPYING. If not, write to
16 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
17 * USA.
18 *
19 */
20
21#include <linux/blkdev.h>
22#include <linux/blk-mq.h>
23#include <linux/list.h>
24#include <linux/types.h>
25#include <linux/sem.h>
26#include <linux/bitmap.h>
27#include <linux/module.h>
28#include <linux/miscdevice.h>
29#include <linux/lightnvm.h>
30#include <uapi/linux/lightnvm.h>
31
32static LIST_HEAD(nvm_targets);
33static LIST_HEAD(nvm_mgrs);
34static LIST_HEAD(nvm_devices);
35static DECLARE_RWSEM(nvm_lock);
36
37static struct nvm_tgt_type *nvm_find_target_type(const char *name)
38{
39 struct nvm_tgt_type *tt;
40
41 list_for_each_entry(tt, &nvm_targets, list)
42 if (!strcmp(name, tt->name))
43 return tt;
44
45 return NULL;
46}
47
48int nvm_register_target(struct nvm_tgt_type *tt)
49{
50 int ret = 0;
51
52 down_write(&nvm_lock);
53 if (nvm_find_target_type(tt->name))
54 ret = -EEXIST;
55 else
56 list_add(&tt->list, &nvm_targets);
57 up_write(&nvm_lock);
58
59 return ret;
60}
61EXPORT_SYMBOL(nvm_register_target);
62
63void nvm_unregister_target(struct nvm_tgt_type *tt)
64{
65 if (!tt)
66 return;
67
68 down_write(&nvm_lock);
69 list_del(&tt->list);
70 up_write(&nvm_lock);
71}
72EXPORT_SYMBOL(nvm_unregister_target);
73
74void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
75 dma_addr_t *dma_handler)
76{
77 return dev->ops->dev_dma_alloc(dev->q, dev->ppalist_pool, mem_flags,
78 dma_handler);
79}
80EXPORT_SYMBOL(nvm_dev_dma_alloc);
81
82void nvm_dev_dma_free(struct nvm_dev *dev, void *ppa_list,
83 dma_addr_t dma_handler)
84{
85 dev->ops->dev_dma_free(dev->ppalist_pool, ppa_list, dma_handler);
86}
87EXPORT_SYMBOL(nvm_dev_dma_free);
88
89static struct nvmm_type *nvm_find_mgr_type(const char *name)
90{
91 struct nvmm_type *mt;
92
93 list_for_each_entry(mt, &nvm_mgrs, list)
94 if (!strcmp(name, mt->name))
95 return mt;
96
97 return NULL;
98}
99
100int nvm_register_mgr(struct nvmm_type *mt)
101{
102 int ret = 0;
103
104 down_write(&nvm_lock);
105 if (nvm_find_mgr_type(mt->name))
106 ret = -EEXIST;
107 else
108 list_add(&mt->list, &nvm_mgrs);
109 up_write(&nvm_lock);
110
111 return ret;
112}
113EXPORT_SYMBOL(nvm_register_mgr);
114
115void nvm_unregister_mgr(struct nvmm_type *mt)
116{
117 if (!mt)
118 return;
119
120 down_write(&nvm_lock);
121 list_del(&mt->list);
122 up_write(&nvm_lock);
123}
124EXPORT_SYMBOL(nvm_unregister_mgr);
125
126static struct nvm_dev *nvm_find_nvm_dev(const char *name)
127{
128 struct nvm_dev *dev;
129
130 list_for_each_entry(dev, &nvm_devices, devices)
131 if (!strcmp(name, dev->name))
132 return dev;
133
134 return NULL;
135}
136
137struct nvm_block *nvm_get_blk(struct nvm_dev *dev, struct nvm_lun *lun,
138 unsigned long flags)
139{
140 return dev->mt->get_blk(dev, lun, flags);
141}
142EXPORT_SYMBOL(nvm_get_blk);
143
144/* Assumes that all valid pages have already been moved on release to bm */
145void nvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
146{
147 return dev->mt->put_blk(dev, blk);
148}
149EXPORT_SYMBOL(nvm_put_blk);
150
151int nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
152{
153 return dev->mt->submit_io(dev, rqd);
154}
155EXPORT_SYMBOL(nvm_submit_io);
156
157int nvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk)
158{
159 return dev->mt->erase_blk(dev, blk, 0);
160}
161EXPORT_SYMBOL(nvm_erase_blk);
162
163static void nvm_core_free(struct nvm_dev *dev)
164{
165 kfree(dev);
166}
167
168static int nvm_core_init(struct nvm_dev *dev)
169{
170 struct nvm_id *id = &dev->identity;
171 struct nvm_id_group *grp = &id->groups[0];
172
173 /* device values */
174 dev->nr_chnls = grp->num_ch;
175 dev->luns_per_chnl = grp->num_lun;
176 dev->pgs_per_blk = grp->num_pg;
177 dev->blks_per_lun = grp->num_blk;
178 dev->nr_planes = grp->num_pln;
179 dev->sec_size = grp->csecs;
180 dev->oob_size = grp->sos;
181 dev->sec_per_pg = grp->fpg_sz / grp->csecs;
182 dev->addr_mode = id->ppat;
183 dev->addr_format = id->ppaf;
184
185 dev->plane_mode = NVM_PLANE_SINGLE;
186 dev->max_rq_size = dev->ops->max_phys_sect * dev->sec_size;
187
188 if (grp->mpos & 0x020202)
189 dev->plane_mode = NVM_PLANE_DOUBLE;
190 if (grp->mpos & 0x040404)
191 dev->plane_mode = NVM_PLANE_QUAD;
192
193 /* calculated values */
194 dev->sec_per_pl = dev->sec_per_pg * dev->nr_planes;
195 dev->sec_per_blk = dev->sec_per_pl * dev->pgs_per_blk;
196 dev->sec_per_lun = dev->sec_per_blk * dev->blks_per_lun;
197 dev->nr_luns = dev->luns_per_chnl * dev->nr_chnls;
198
199 dev->total_blocks = dev->nr_planes *
200 dev->blks_per_lun *
201 dev->luns_per_chnl *
202 dev->nr_chnls;
203 dev->total_pages = dev->total_blocks * dev->pgs_per_blk;
204 INIT_LIST_HEAD(&dev->online_targets);
205
206 return 0;
207}
208
209static void nvm_free(struct nvm_dev *dev)
210{
211 if (!dev)
212 return;
213
214 if (dev->mt)
215 dev->mt->unregister_mgr(dev);
216
217 nvm_core_free(dev);
218}
219
220static int nvm_init(struct nvm_dev *dev)
221{
222 struct nvmm_type *mt;
223 int ret = 0;
224
225 if (!dev->q || !dev->ops)
226 return -EINVAL;
227
228 if (dev->ops->identity(dev->q, &dev->identity)) {
229 pr_err("nvm: device could not be identified\n");
230 ret = -EINVAL;
231 goto err;
232 }
233
234 pr_debug("nvm: ver:%x nvm_vendor:%x groups:%u\n",
235 dev->identity.ver_id, dev->identity.vmnt,
236 dev->identity.cgrps);
237
238 if (dev->identity.ver_id != 1) {
239 pr_err("nvm: device not supported by kernel.");
240 goto err;
241 }
242
243 if (dev->identity.cgrps != 1) {
244 pr_err("nvm: only one group configuration supported.");
245 goto err;
246 }
247
248 ret = nvm_core_init(dev);
249 if (ret) {
250 pr_err("nvm: could not initialize core structures.\n");
251 goto err;
252 }
253
254 /* register with device with a supported manager */
255 list_for_each_entry(mt, &nvm_mgrs, list) {
256 ret = mt->register_mgr(dev);
257 if (ret < 0)
258 goto err; /* initialization failed */
259 if (ret > 0) {
260 dev->mt = mt;
261 break; /* successfully initialized */
262 }
263 }
264
265 if (!ret) {
266 pr_info("nvm: no compatible manager found.\n");
267 return 0;
268 }
269
270 pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n",
271 dev->name, dev->sec_per_pg, dev->nr_planes,
272 dev->pgs_per_blk, dev->blks_per_lun, dev->nr_luns,
273 dev->nr_chnls);
274 return 0;
275err:
276 nvm_free(dev);
277 pr_err("nvm: failed to initialize nvm\n");
278 return ret;
279}
280
281static void nvm_exit(struct nvm_dev *dev)
282{
283 if (dev->ppalist_pool)
284 dev->ops->destroy_dma_pool(dev->ppalist_pool);
285 nvm_free(dev);
286
287 pr_info("nvm: successfully unloaded\n");
288}
289
290int nvm_register(struct request_queue *q, char *disk_name,
291 struct nvm_dev_ops *ops)
292{
293 struct nvm_dev *dev;
294 int ret;
295
296 if (!ops->identity)
297 return -EINVAL;
298
299 dev = kzalloc(sizeof(struct nvm_dev), GFP_KERNEL);
300 if (!dev)
301 return -ENOMEM;
302
303 dev->q = q;
304 dev->ops = ops;
305 strncpy(dev->name, disk_name, DISK_NAME_LEN);
306
307 ret = nvm_init(dev);
308 if (ret)
309 goto err_init;
310
311 down_write(&nvm_lock);
312 list_add(&dev->devices, &nvm_devices);
313 up_write(&nvm_lock);
314
315 if (dev->ops->max_phys_sect > 1) {
316 dev->ppalist_pool = dev->ops->create_dma_pool(dev->q,
317 "ppalist");
318 if (!dev->ppalist_pool) {
319 pr_err("nvm: could not create ppa pool\n");
320 return -ENOMEM;
321 }
322 } else if (dev->ops->max_phys_sect > 256) {
323 pr_info("nvm: max sectors supported is 256.\n");
324 return -EINVAL;
325 }
326
327 return 0;
328err_init:
329 kfree(dev);
330 return ret;
331}
332EXPORT_SYMBOL(nvm_register);
333
334void nvm_unregister(char *disk_name)
335{
336 struct nvm_dev *dev = nvm_find_nvm_dev(disk_name);
337
338 if (!dev) {
339 pr_err("nvm: could not find device %s to unregister\n",
340 disk_name);
341 return;
342 }
343
344 nvm_exit(dev);
345
346 down_write(&nvm_lock);
347 list_del(&dev->devices);
348 up_write(&nvm_lock);
349}
350EXPORT_SYMBOL(nvm_unregister);
351
352static const struct block_device_operations nvm_fops = {
353 .owner = THIS_MODULE,
354};
355
356static int nvm_create_target(struct nvm_dev *dev,
357 struct nvm_ioctl_create *create)
358{
359 struct nvm_ioctl_create_simple *s = &create->conf.s;
360 struct request_queue *tqueue;
361 struct nvmm_type *mt;
362 struct gendisk *tdisk;
363 struct nvm_tgt_type *tt;
364 struct nvm_target *t;
365 void *targetdata;
366 int ret = 0;
367
368 if (!dev->mt) {
369 /* register with device with a supported NVM manager */
370 list_for_each_entry(mt, &nvm_mgrs, list) {
371 ret = mt->register_mgr(dev);
372 if (ret < 0)
373 return ret; /* initialization failed */
374 if (ret > 0) {
375 dev->mt = mt;
376 break; /* successfully initialized */
377 }
378 }
379
380 if (!ret) {
381 pr_info("nvm: no compatible nvm manager found.\n");
382 return -ENODEV;
383 }
384 }
385
386 tt = nvm_find_target_type(create->tgttype);
387 if (!tt) {
388 pr_err("nvm: target type %s not found\n", create->tgttype);
389 return -EINVAL;
390 }
391
392 down_write(&nvm_lock);
393 list_for_each_entry(t, &dev->online_targets, list) {
394 if (!strcmp(create->tgtname, t->disk->disk_name)) {
395 pr_err("nvm: target name already exists.\n");
396 up_write(&nvm_lock);
397 return -EINVAL;
398 }
399 }
400 up_write(&nvm_lock);
401
402 t = kmalloc(sizeof(struct nvm_target), GFP_KERNEL);
403 if (!t)
404 return -ENOMEM;
405
406 tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node);
407 if (!tqueue)
408 goto err_t;
409 blk_queue_make_request(tqueue, tt->make_rq);
410
411 tdisk = alloc_disk(0);
412 if (!tdisk)
413 goto err_queue;
414
415 sprintf(tdisk->disk_name, "%s", create->tgtname);
416 tdisk->flags = GENHD_FL_EXT_DEVT;
417 tdisk->major = 0;
418 tdisk->first_minor = 0;
419 tdisk->fops = &nvm_fops;
420 tdisk->queue = tqueue;
421
422 targetdata = tt->init(dev, tdisk, s->lun_begin, s->lun_end);
423 if (IS_ERR(targetdata))
424 goto err_init;
425
426 tdisk->private_data = targetdata;
427 tqueue->queuedata = targetdata;
428
429 blk_queue_max_hw_sectors(tqueue, 8 * dev->ops->max_phys_sect);
430
431 set_capacity(tdisk, tt->capacity(targetdata));
432 add_disk(tdisk);
433
434 t->type = tt;
435 t->disk = tdisk;
436
437 down_write(&nvm_lock);
438 list_add_tail(&t->list, &dev->online_targets);
439 up_write(&nvm_lock);
440
441 return 0;
442err_init:
443 put_disk(tdisk);
444err_queue:
445 blk_cleanup_queue(tqueue);
446err_t:
447 kfree(t);
448 return -ENOMEM;
449}
450
451static void nvm_remove_target(struct nvm_target *t)
452{
453 struct nvm_tgt_type *tt = t->type;
454 struct gendisk *tdisk = t->disk;
455 struct request_queue *q = tdisk->queue;
456
457 lockdep_assert_held(&nvm_lock);
458
459 del_gendisk(tdisk);
460 if (tt->exit)
461 tt->exit(tdisk->private_data);
462
463 blk_cleanup_queue(q);
464
465 put_disk(tdisk);
466
467 list_del(&t->list);
468 kfree(t);
469}
470
471static int __nvm_configure_create(struct nvm_ioctl_create *create)
472{
473 struct nvm_dev *dev;
474 struct nvm_ioctl_create_simple *s;
475
476 dev = nvm_find_nvm_dev(create->dev);
477 if (!dev) {
478 pr_err("nvm: device not found\n");
479 return -EINVAL;
480 }
481
482 if (create->conf.type != NVM_CONFIG_TYPE_SIMPLE) {
483 pr_err("nvm: config type not valid\n");
484 return -EINVAL;
485 }
486 s = &create->conf.s;
487
488 if (s->lun_begin > s->lun_end || s->lun_end > dev->nr_luns) {
489 pr_err("nvm: lun out of bound (%u:%u > %u)\n",
490 s->lun_begin, s->lun_end, dev->nr_luns);
491 return -EINVAL;
492 }
493
494 return nvm_create_target(dev, create);
495}
496
497static int __nvm_configure_remove(struct nvm_ioctl_remove *remove)
498{
499 struct nvm_target *t = NULL;
500 struct nvm_dev *dev;
501 int ret = -1;
502
503 down_write(&nvm_lock);
504 list_for_each_entry(dev, &nvm_devices, devices)
505 list_for_each_entry(t, &dev->online_targets, list) {
506 if (!strcmp(remove->tgtname, t->disk->disk_name)) {
507 nvm_remove_target(t);
508 ret = 0;
509 break;
510 }
511 }
512 up_write(&nvm_lock);
513
514 if (ret) {
515 pr_err("nvm: target \"%s\" doesn't exist.\n", remove->tgtname);
516 return -EINVAL;
517 }
518
519 return 0;
520}
521
522#ifdef CONFIG_NVM_DEBUG
523static int nvm_configure_show(const char *val)
524{
525 struct nvm_dev *dev;
526 char opcode, devname[DISK_NAME_LEN];
527 int ret;
528
529 ret = sscanf(val, "%c %32s", &opcode, devname);
530 if (ret != 2) {
531 pr_err("nvm: invalid command. Use \"opcode devicename\".\n");
532 return -EINVAL;
533 }
534
535 dev = nvm_find_nvm_dev(devname);
536 if (!dev) {
537 pr_err("nvm: device not found\n");
538 return -EINVAL;
539 }
540
541 if (!dev->mt)
542 return 0;
543
544 dev->mt->free_blocks_print(dev);
545
546 return 0;
547}
548
549static int nvm_configure_remove(const char *val)
550{
551 struct nvm_ioctl_remove remove;
552 char opcode;
553 int ret;
554
555 ret = sscanf(val, "%c %256s", &opcode, remove.tgtname);
556 if (ret != 2) {
557 pr_err("nvm: invalid command. Use \"d targetname\".\n");
558 return -EINVAL;
559 }
560
561 remove.flags = 0;
562
563 return __nvm_configure_remove(&remove);
564}
565
566static int nvm_configure_create(const char *val)
567{
568 struct nvm_ioctl_create create;
569 char opcode;
570 int lun_begin, lun_end, ret;
571
572 ret = sscanf(val, "%c %256s %256s %48s %u:%u", &opcode, create.dev,
573 create.tgtname, create.tgttype,
574 &lun_begin, &lun_end);
575 if (ret != 6) {
576 pr_err("nvm: invalid command. Use \"opcode device name tgttype lun_begin:lun_end\".\n");
577 return -EINVAL;
578 }
579
580 create.flags = 0;
581 create.conf.type = NVM_CONFIG_TYPE_SIMPLE;
582 create.conf.s.lun_begin = lun_begin;
583 create.conf.s.lun_end = lun_end;
584
585 return __nvm_configure_create(&create);
586}
587
588
589/* Exposes administrative interface through /sys/module/lnvm/configure_by_str */
590static int nvm_configure_by_str_event(const char *val,
591 const struct kernel_param *kp)
592{
593 char opcode;
594 int ret;
595
596 ret = sscanf(val, "%c", &opcode);
597 if (ret != 1) {
598 pr_err("nvm: string must have the format of \"cmd ...\"\n");
599 return -EINVAL;
600 }
601
602 switch (opcode) {
603 case 'a':
604 return nvm_configure_create(val);
605 case 'd':
606 return nvm_configure_remove(val);
607 case 's':
608 return nvm_configure_show(val);
609 default:
610 pr_err("nvm: invalid command\n");
611 return -EINVAL;
612 }
613
614 return 0;
615}
616
617static int nvm_configure_get(char *buf, const struct kernel_param *kp)
618{
619 int sz = 0;
620 char *buf_start = buf;
621 struct nvm_dev *dev;
622
623 buf += sprintf(buf, "available devices:\n");
624 down_write(&nvm_lock);
625 list_for_each_entry(dev, &nvm_devices, devices) {
626 if (sz > 4095 - DISK_NAME_LEN)
627 break;
628 buf += sprintf(buf, " %32s\n", dev->name);
629 }
630 up_write(&nvm_lock);
631
632 return buf - buf_start - 1;
633}
634
635static const struct kernel_param_ops nvm_configure_by_str_event_param_ops = {
636 .set = nvm_configure_by_str_event,
637 .get = nvm_configure_get,
638};
639
640#undef MODULE_PARAM_PREFIX
641#define MODULE_PARAM_PREFIX "lnvm."
642
643module_param_cb(configure_debug, &nvm_configure_by_str_event_param_ops, NULL,
644 0644);
645
646#endif /* CONFIG_NVM_DEBUG */
647
648static long nvm_ioctl_info(struct file *file, void __user *arg)
649{
650 struct nvm_ioctl_info *info;
651 struct nvm_tgt_type *tt;
652 int tgt_iter = 0;
653
654 if (!capable(CAP_SYS_ADMIN))
655 return -EPERM;
656
657 info = memdup_user(arg, sizeof(struct nvm_ioctl_info));
658 if (IS_ERR(info))
659 return -EFAULT;
660
661 info->version[0] = NVM_VERSION_MAJOR;
662 info->version[1] = NVM_VERSION_MINOR;
663 info->version[2] = NVM_VERSION_PATCH;
664
665 down_write(&nvm_lock);
666 list_for_each_entry(tt, &nvm_targets, list) {
667 struct nvm_ioctl_info_tgt *tgt = &info->tgts[tgt_iter];
668
669 tgt->version[0] = tt->version[0];
670 tgt->version[1] = tt->version[1];
671 tgt->version[2] = tt->version[2];
672 strncpy(tgt->tgtname, tt->name, NVM_TTYPE_NAME_MAX);
673
674 tgt_iter++;
675 }
676
677 info->tgtsize = tgt_iter;
678 up_write(&nvm_lock);
679
680 if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info)))
681 return -EFAULT;
682
683 kfree(info);
684 return 0;
685}
686
687static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
688{
689 struct nvm_ioctl_get_devices *devices;
690 struct nvm_dev *dev;
691 int i = 0;
692
693 if (!capable(CAP_SYS_ADMIN))
694 return -EPERM;
695
696 devices = kzalloc(sizeof(struct nvm_ioctl_get_devices), GFP_KERNEL);
697 if (!devices)
698 return -ENOMEM;
699
700 down_write(&nvm_lock);
701 list_for_each_entry(dev, &nvm_devices, devices) {
702 struct nvm_ioctl_device_info *info = &devices->info[i];
703
704 sprintf(info->devname, "%s", dev->name);
705 if (dev->mt) {
706 info->bmversion[0] = dev->mt->version[0];
707 info->bmversion[1] = dev->mt->version[1];
708 info->bmversion[2] = dev->mt->version[2];
709 sprintf(info->bmname, "%s", dev->mt->name);
710 } else {
711 sprintf(info->bmname, "none");
712 }
713
714 i++;
715 if (i > 31) {
716 pr_err("nvm: max 31 devices can be reported.\n");
717 break;
718 }
719 }
720 up_write(&nvm_lock);
721
722 devices->nr_devices = i;
723
724 if (copy_to_user(arg, devices, sizeof(struct nvm_ioctl_get_devices)))
725 return -EFAULT;
726
727 kfree(devices);
728 return 0;
729}
730
731static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
732{
733 struct nvm_ioctl_create create;
734
735 if (!capable(CAP_SYS_ADMIN))
736 return -EPERM;
737
738 if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create)))
739 return -EFAULT;
740
741 create.dev[DISK_NAME_LEN - 1] = '\0';
742 create.tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0';
743 create.tgtname[DISK_NAME_LEN - 1] = '\0';
744
745 if (create.flags != 0) {
746 pr_err("nvm: no flags supported\n");
747 return -EINVAL;
748 }
749
750 return __nvm_configure_create(&create);
751}
752
753static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
754{
755 struct nvm_ioctl_remove remove;
756
757 if (!capable(CAP_SYS_ADMIN))
758 return -EPERM;
759
760 if (copy_from_user(&remove, arg, sizeof(struct nvm_ioctl_remove)))
761 return -EFAULT;
762
763 remove.tgtname[DISK_NAME_LEN - 1] = '\0';
764
765 if (remove.flags != 0) {
766 pr_err("nvm: no flags supported\n");
767 return -EINVAL;
768 }
769
770 return __nvm_configure_remove(&remove);
771}
772
773static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
774{
775 void __user *argp = (void __user *)arg;
776
777 switch (cmd) {
778 case NVM_INFO:
779 return nvm_ioctl_info(file, argp);
780 case NVM_GET_DEVICES:
781 return nvm_ioctl_get_devices(file, argp);
782 case NVM_DEV_CREATE:
783 return nvm_ioctl_dev_create(file, argp);
784 case NVM_DEV_REMOVE:
785 return nvm_ioctl_dev_remove(file, argp);
786 }
787 return 0;
788}
789
790static const struct file_operations _ctl_fops = {
791 .open = nonseekable_open,
792 .unlocked_ioctl = nvm_ctl_ioctl,
793 .owner = THIS_MODULE,
794 .llseek = noop_llseek,
795};
796
797static struct miscdevice _nvm_misc = {
798 .minor = MISC_DYNAMIC_MINOR,
799 .name = "lightnvm",
800 .nodename = "lightnvm/control",
801 .fops = &_ctl_fops,
802};
803
804MODULE_ALIAS_MISCDEV(MISC_DYNAMIC_MINOR);
805
806static int __init nvm_mod_init(void)
807{
808 int ret;
809
810 ret = misc_register(&_nvm_misc);
811 if (ret)
812 pr_err("nvm: misc_register failed for control device");
813
814 return ret;
815}
816
817static void __exit nvm_mod_exit(void)
818{
819 misc_deregister(&_nvm_misc);
820}
821
822MODULE_AUTHOR("Matias Bjorling <m@bjorling.me>");
823MODULE_LICENSE("GPL v2");
824MODULE_VERSION("0.1");
825module_init(nvm_mod_init);
826module_exit(nvm_mod_exit);
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
new file mode 100644
index 000000000000..122b176600fa
--- /dev/null
+++ b/include/linux/lightnvm.h
@@ -0,0 +1,522 @@
1#ifndef NVM_H
2#define NVM_H
3
4enum {
5 NVM_IO_OK = 0,
6 NVM_IO_REQUEUE = 1,
7 NVM_IO_DONE = 2,
8 NVM_IO_ERR = 3,
9
10 NVM_IOTYPE_NONE = 0,
11 NVM_IOTYPE_GC = 1,
12};
13
14#ifdef CONFIG_NVM
15
16#include <linux/blkdev.h>
17#include <linux/types.h>
18#include <linux/file.h>
19#include <linux/dmapool.h>
20
21enum {
22 /* HW Responsibilities */
23 NVM_RSP_L2P = 1 << 0,
24 NVM_RSP_ECC = 1 << 1,
25
26 /* Physical Adressing Mode */
27 NVM_ADDRMODE_LINEAR = 0,
28 NVM_ADDRMODE_CHANNEL = 1,
29
30 /* Plane programming mode for LUN */
31 NVM_PLANE_SINGLE = 0,
32 NVM_PLANE_DOUBLE = 1,
33 NVM_PLANE_QUAD = 2,
34
35 /* Status codes */
36 NVM_RSP_SUCCESS = 0x0,
37 NVM_RSP_NOT_CHANGEABLE = 0x1,
38 NVM_RSP_ERR_FAILWRITE = 0x40ff,
39 NVM_RSP_ERR_EMPTYPAGE = 0x42ff,
40
41 /* Device opcodes */
42 NVM_OP_HBREAD = 0x02,
43 NVM_OP_HBWRITE = 0x81,
44 NVM_OP_PWRITE = 0x91,
45 NVM_OP_PREAD = 0x92,
46 NVM_OP_ERASE = 0x90,
47
48 /* PPA Command Flags */
49 NVM_IO_SNGL_ACCESS = 0x0,
50 NVM_IO_DUAL_ACCESS = 0x1,
51 NVM_IO_QUAD_ACCESS = 0x2,
52
53 NVM_IO_SUSPEND = 0x80,
54 NVM_IO_SLC_MODE = 0x100,
55 NVM_IO_SCRAMBLE_DISABLE = 0x200,
56};
57
58struct nvm_id_group {
59 u8 mtype;
60 u8 fmtype;
61 u16 res16;
62 u8 num_ch;
63 u8 num_lun;
64 u8 num_pln;
65 u16 num_blk;
66 u16 num_pg;
67 u16 fpg_sz;
68 u16 csecs;
69 u16 sos;
70 u32 trdt;
71 u32 trdm;
72 u32 tprt;
73 u32 tprm;
74 u32 tbet;
75 u32 tbem;
76 u32 mpos;
77 u16 cpar;
78 u8 res[913];
79} __packed;
80
81struct nvm_addr_format {
82 u8 ch_offset;
83 u8 ch_len;
84 u8 lun_offset;
85 u8 lun_len;
86 u8 pln_offset;
87 u8 pln_len;
88 u8 blk_offset;
89 u8 blk_len;
90 u8 pg_offset;
91 u8 pg_len;
92 u8 sect_offset;
93 u8 sect_len;
94 u8 res[4];
95};
96
97struct nvm_id {
98 u8 ver_id;
99 u8 vmnt;
100 u8 cgrps;
101 u8 res[5];
102 u32 cap;
103 u32 dom;
104 struct nvm_addr_format ppaf;
105 u8 ppat;
106 u8 resv[224];
107 struct nvm_id_group groups[4];
108} __packed;
109
110struct nvm_target {
111 struct list_head list;
112 struct nvm_tgt_type *type;
113 struct gendisk *disk;
114};
115
116struct nvm_tgt_instance {
117 struct nvm_tgt_type *tt;
118};
119
120#define ADDR_EMPTY (~0ULL)
121
122#define NVM_VERSION_MAJOR 1
123#define NVM_VERSION_MINOR 0
124#define NVM_VERSION_PATCH 0
125
126#define NVM_SEC_BITS (8)
127#define NVM_PL_BITS (6)
128#define NVM_PG_BITS (16)
129#define NVM_BLK_BITS (16)
130#define NVM_LUN_BITS (10)
131#define NVM_CH_BITS (8)
132
133struct ppa_addr {
134 union {
135 /* Channel-based PPA format in nand 4x2x2x2x8x10 */
136 struct {
137 sector_t ch : 4;
138 sector_t sec : 2; /* 4 sectors per page */
139 sector_t pl : 2; /* 4 planes per LUN */
140 sector_t lun : 2; /* 4 LUNs per channel */
141 sector_t pg : 8; /* 256 pages per block */
142 sector_t blk : 10;/* 1024 blocks per plane */
143 sector_t resved : 36;
144 } chnl;
145
146 /* Generic structure for all addresses */
147 struct {
148 sector_t sec : NVM_SEC_BITS;
149 sector_t pl : NVM_PL_BITS;
150 sector_t pg : NVM_PG_BITS;
151 sector_t blk : NVM_BLK_BITS;
152 sector_t lun : NVM_LUN_BITS;
153 sector_t ch : NVM_CH_BITS;
154 } g;
155
156 sector_t ppa;
157 };
158} __packed;
159
160struct nvm_rq {
161 struct nvm_tgt_instance *ins;
162 struct nvm_dev *dev;
163
164 struct bio *bio;
165
166 union {
167 struct ppa_addr ppa_addr;
168 dma_addr_t dma_ppa_list;
169 };
170
171 struct ppa_addr *ppa_list;
172
173 void *metadata;
174 dma_addr_t dma_metadata;
175
176 uint8_t opcode;
177 uint16_t nr_pages;
178 uint16_t flags;
179};
180
181static inline struct nvm_rq *nvm_rq_from_pdu(void *pdu)
182{
183 return pdu - sizeof(struct nvm_rq);
184}
185
186static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata)
187{
188 return rqdata + 1;
189}
190
191struct nvm_block;
192
193typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *);
194typedef int (nvm_bb_update_fn)(u32, void *, unsigned int, void *);
195typedef int (nvm_id_fn)(struct request_queue *, struct nvm_id *);
196typedef int (nvm_get_l2p_tbl_fn)(struct request_queue *, u64, u32,
197 nvm_l2p_update_fn *, void *);
198typedef int (nvm_op_bb_tbl_fn)(struct request_queue *, int, unsigned int,
199 nvm_bb_update_fn *, void *);
200typedef int (nvm_op_set_bb_fn)(struct request_queue *, struct nvm_rq *, int);
201typedef int (nvm_submit_io_fn)(struct request_queue *, struct nvm_rq *);
202typedef int (nvm_erase_blk_fn)(struct request_queue *, struct nvm_rq *);
203typedef void *(nvm_create_dma_pool_fn)(struct request_queue *, char *);
204typedef void (nvm_destroy_dma_pool_fn)(void *);
205typedef void *(nvm_dev_dma_alloc_fn)(struct request_queue *, void *, gfp_t,
206 dma_addr_t *);
207typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t);
208
209struct nvm_dev_ops {
210 nvm_id_fn *identity;
211 nvm_get_l2p_tbl_fn *get_l2p_tbl;
212 nvm_op_bb_tbl_fn *get_bb_tbl;
213 nvm_op_set_bb_fn *set_bb;
214
215 nvm_submit_io_fn *submit_io;
216 nvm_erase_blk_fn *erase_block;
217
218 nvm_create_dma_pool_fn *create_dma_pool;
219 nvm_destroy_dma_pool_fn *destroy_dma_pool;
220 nvm_dev_dma_alloc_fn *dev_dma_alloc;
221 nvm_dev_dma_free_fn *dev_dma_free;
222
223 uint8_t max_phys_sect;
224};
225
226struct nvm_lun {
227 int id;
228
229 int lun_id;
230 int chnl_id;
231
232 unsigned int nr_free_blocks; /* Number of unused blocks */
233 struct nvm_block *blocks;
234
235 spinlock_t lock;
236};
237
238struct nvm_block {
239 struct list_head list;
240 struct nvm_lun *lun;
241 unsigned long id;
242
243 void *priv;
244 int type;
245};
246
247struct nvm_dev {
248 struct nvm_dev_ops *ops;
249
250 struct list_head devices;
251 struct list_head online_targets;
252
253 /* Media manager */
254 struct nvmm_type *mt;
255 void *mp;
256
257 /* Device information */
258 int nr_chnls;
259 int nr_planes;
260 int luns_per_chnl;
261 int sec_per_pg; /* only sectors for a single page */
262 int pgs_per_blk;
263 int blks_per_lun;
264 int sec_size;
265 int oob_size;
266 int addr_mode;
267 struct nvm_addr_format addr_format;
268
269 /* Calculated/Cached values. These do not reflect the actual usable
270 * blocks at run-time.
271 */
272 int max_rq_size;
273 int plane_mode; /* drive device in single, double or quad mode */
274
275 int sec_per_pl; /* all sectors across planes */
276 int sec_per_blk;
277 int sec_per_lun;
278
279 unsigned long total_pages;
280 unsigned long total_blocks;
281 int nr_luns;
282 unsigned max_pages_per_blk;
283
284 void *ppalist_pool;
285
286 struct nvm_id identity;
287
288 /* Backend device */
289 struct request_queue *q;
290 char name[DISK_NAME_LEN];
291};
292
293/* fallback conversion */
294static struct ppa_addr __generic_to_linear_addr(struct nvm_dev *dev,
295 struct ppa_addr r)
296{
297 struct ppa_addr l;
298
299 l.ppa = r.g.sec +
300 r.g.pg * dev->sec_per_pg +
301 r.g.blk * (dev->pgs_per_blk *
302 dev->sec_per_pg) +
303 r.g.lun * (dev->blks_per_lun *
304 dev->pgs_per_blk *
305 dev->sec_per_pg) +
306 r.g.ch * (dev->blks_per_lun *
307 dev->pgs_per_blk *
308 dev->luns_per_chnl *
309 dev->sec_per_pg);
310
311 return l;
312}
313
314/* fallback conversion */
315static struct ppa_addr __linear_to_generic_addr(struct nvm_dev *dev,
316 struct ppa_addr r)
317{
318 struct ppa_addr l;
319 int secs, pgs, blks, luns;
320 sector_t ppa = r.ppa;
321
322 l.ppa = 0;
323
324 div_u64_rem(ppa, dev->sec_per_pg, &secs);
325 l.g.sec = secs;
326
327 sector_div(ppa, dev->sec_per_pg);
328 div_u64_rem(ppa, dev->sec_per_blk, &pgs);
329 l.g.pg = pgs;
330
331 sector_div(ppa, dev->pgs_per_blk);
332 div_u64_rem(ppa, dev->blks_per_lun, &blks);
333 l.g.blk = blks;
334
335 sector_div(ppa, dev->blks_per_lun);
336 div_u64_rem(ppa, dev->luns_per_chnl, &luns);
337 l.g.lun = luns;
338
339 sector_div(ppa, dev->luns_per_chnl);
340 l.g.ch = ppa;
341
342 return l;
343}
344
345static struct ppa_addr __generic_to_chnl_addr(struct ppa_addr r)
346{
347 struct ppa_addr l;
348
349 l.ppa = 0;
350
351 l.chnl.sec = r.g.sec;
352 l.chnl.pl = r.g.pl;
353 l.chnl.pg = r.g.pg;
354 l.chnl.blk = r.g.blk;
355 l.chnl.lun = r.g.lun;
356 l.chnl.ch = r.g.ch;
357
358 return l;
359}
360
361static struct ppa_addr __chnl_to_generic_addr(struct ppa_addr r)
362{
363 struct ppa_addr l;
364
365 l.ppa = 0;
366
367 l.g.sec = r.chnl.sec;
368 l.g.pl = r.chnl.pl;
369 l.g.pg = r.chnl.pg;
370 l.g.blk = r.chnl.blk;
371 l.g.lun = r.chnl.lun;
372 l.g.ch = r.chnl.ch;
373
374 return l;
375}
376
377static inline struct ppa_addr addr_to_generic_mode(struct nvm_dev *dev,
378 struct ppa_addr gppa)
379{
380 switch (dev->addr_mode) {
381 case NVM_ADDRMODE_LINEAR:
382 return __linear_to_generic_addr(dev, gppa);
383 case NVM_ADDRMODE_CHANNEL:
384 return __chnl_to_generic_addr(gppa);
385 default:
386 BUG();
387 }
388 return gppa;
389}
390
391static inline struct ppa_addr generic_to_addr_mode(struct nvm_dev *dev,
392 struct ppa_addr gppa)
393{
394 switch (dev->addr_mode) {
395 case NVM_ADDRMODE_LINEAR:
396 return __generic_to_linear_addr(dev, gppa);
397 case NVM_ADDRMODE_CHANNEL:
398 return __generic_to_chnl_addr(gppa);
399 default:
400 BUG();
401 }
402 return gppa;
403}
404
405static inline int ppa_empty(struct ppa_addr ppa_addr)
406{
407 return (ppa_addr.ppa == ADDR_EMPTY);
408}
409
410static inline void ppa_set_empty(struct ppa_addr *ppa_addr)
411{
412 ppa_addr->ppa = ADDR_EMPTY;
413}
414
415static inline struct ppa_addr block_to_ppa(struct nvm_dev *dev,
416 struct nvm_block *blk)
417{
418 struct ppa_addr ppa;
419 struct nvm_lun *lun = blk->lun;
420
421 ppa.ppa = 0;
422 ppa.g.blk = blk->id % dev->blks_per_lun;
423 ppa.g.lun = lun->lun_id;
424 ppa.g.ch = lun->chnl_id;
425
426 return ppa;
427}
428
429typedef void (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *);
430typedef sector_t (nvm_tgt_capacity_fn)(void *);
431typedef int (nvm_tgt_end_io_fn)(struct nvm_rq *, int);
432typedef void *(nvm_tgt_init_fn)(struct nvm_dev *, struct gendisk *, int, int);
433typedef void (nvm_tgt_exit_fn)(void *);
434
435struct nvm_tgt_type {
436 const char *name;
437 unsigned int version[3];
438
439 /* target entry points */
440 nvm_tgt_make_rq_fn *make_rq;
441 nvm_tgt_capacity_fn *capacity;
442 nvm_tgt_end_io_fn *end_io;
443
444 /* module-specific init/teardown */
445 nvm_tgt_init_fn *init;
446 nvm_tgt_exit_fn *exit;
447
448 /* For internal use */
449 struct list_head list;
450};
451
452extern int nvm_register_target(struct nvm_tgt_type *);
453extern void nvm_unregister_target(struct nvm_tgt_type *);
454
455extern void *nvm_dev_dma_alloc(struct nvm_dev *, gfp_t, dma_addr_t *);
456extern void nvm_dev_dma_free(struct nvm_dev *, void *, dma_addr_t);
457
458typedef int (nvmm_register_fn)(struct nvm_dev *);
459typedef void (nvmm_unregister_fn)(struct nvm_dev *);
460typedef struct nvm_block *(nvmm_get_blk_fn)(struct nvm_dev *,
461 struct nvm_lun *, unsigned long);
462typedef void (nvmm_put_blk_fn)(struct nvm_dev *, struct nvm_block *);
463typedef int (nvmm_open_blk_fn)(struct nvm_dev *, struct nvm_block *);
464typedef int (nvmm_close_blk_fn)(struct nvm_dev *, struct nvm_block *);
465typedef void (nvmm_flush_blk_fn)(struct nvm_dev *, struct nvm_block *);
466typedef int (nvmm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
467typedef int (nvmm_end_io_fn)(struct nvm_rq *, int);
468typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *,
469 unsigned long);
470typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int);
471typedef void (nvmm_free_blocks_print_fn)(struct nvm_dev *);
472
473struct nvmm_type {
474 const char *name;
475 unsigned int version[3];
476
477 nvmm_register_fn *register_mgr;
478 nvmm_unregister_fn *unregister_mgr;
479
480 /* Block administration callbacks */
481 nvmm_get_blk_fn *get_blk;
482 nvmm_put_blk_fn *put_blk;
483 nvmm_open_blk_fn *open_blk;
484 nvmm_close_blk_fn *close_blk;
485 nvmm_flush_blk_fn *flush_blk;
486
487 nvmm_submit_io_fn *submit_io;
488 nvmm_end_io_fn *end_io;
489 nvmm_erase_blk_fn *erase_blk;
490
491 /* Configuration management */
492 nvmm_get_lun_fn *get_lun;
493
494 /* Statistics */
495 nvmm_free_blocks_print_fn *free_blocks_print;
496 struct list_head list;
497};
498
499extern int nvm_register_mgr(struct nvmm_type *);
500extern void nvm_unregister_mgr(struct nvmm_type *);
501
502extern struct nvm_block *nvm_get_blk(struct nvm_dev *, struct nvm_lun *,
503 unsigned long);
504extern void nvm_put_blk(struct nvm_dev *, struct nvm_block *);
505
506extern int nvm_register(struct request_queue *, char *,
507 struct nvm_dev_ops *);
508extern void nvm_unregister(char *);
509
510extern int nvm_submit_io(struct nvm_dev *, struct nvm_rq *);
511extern int nvm_erase_blk(struct nvm_dev *, struct nvm_block *);
512#else /* CONFIG_NVM */
513struct nvm_dev_ops;
514
515static inline int nvm_register(struct request_queue *q, char *disk_name,
516 struct nvm_dev_ops *ops)
517{
518 return -EINVAL;
519}
520static inline void nvm_unregister(char *disk_name) {}
521#endif /* CONFIG_NVM */
522#endif /* LIGHTNVM.H */
diff --git a/include/uapi/linux/lightnvm.h b/include/uapi/linux/lightnvm.h
new file mode 100644
index 000000000000..928f98997d8a
--- /dev/null
+++ b/include/uapi/linux/lightnvm.h
@@ -0,0 +1,130 @@
1/*
2 * Copyright (C) 2015 CNEX Labs. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License version
6 * 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; see the file COPYING. If not, write to
15 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
16 * USA.
17 */
18
19#ifndef _UAPI_LINUX_LIGHTNVM_H
20#define _UAPI_LINUX_LIGHTNVM_H
21
22#ifdef __KERNEL__
23#include <linux/kernel.h>
24#include <linux/ioctl.h>
25#else /* __KERNEL__ */
26#include <stdio.h>
27#include <sys/ioctl.h>
28#define DISK_NAME_LEN 32
29#endif /* __KERNEL__ */
30
31#include <linux/types.h>
32#include <linux/ioctl.h>
33
34#define NVM_TTYPE_NAME_MAX 48
35#define NVM_TTYPE_MAX 63
36
37#define NVM_CTRL_FILE "/dev/lightnvm/control"
38
39struct nvm_ioctl_info_tgt {
40 __u32 version[3];
41 __u32 reserved;
42 char tgtname[NVM_TTYPE_NAME_MAX];
43};
44
45struct nvm_ioctl_info {
46 __u32 version[3]; /* in/out - major, minor, patch */
47 __u16 tgtsize; /* number of targets */
48 __u16 reserved16; /* pad to 4K page */
49 __u32 reserved[12];
50 struct nvm_ioctl_info_tgt tgts[NVM_TTYPE_MAX];
51};
52
53enum {
54 NVM_DEVICE_ACTIVE = 1 << 0,
55};
56
57struct nvm_ioctl_device_info {
58 char devname[DISK_NAME_LEN];
59 char bmname[NVM_TTYPE_NAME_MAX];
60 __u32 bmversion[3];
61 __u32 flags;
62 __u32 reserved[8];
63};
64
65struct nvm_ioctl_get_devices {
66 __u32 nr_devices;
67 __u32 reserved[31];
68 struct nvm_ioctl_device_info info[31];
69};
70
71struct nvm_ioctl_create_simple {
72 __u32 lun_begin;
73 __u32 lun_end;
74};
75
76enum {
77 NVM_CONFIG_TYPE_SIMPLE = 0,
78};
79
80struct nvm_ioctl_create_conf {
81 __u32 type;
82 union {
83 struct nvm_ioctl_create_simple s;
84 };
85};
86
87struct nvm_ioctl_create {
88 char dev[DISK_NAME_LEN]; /* open-channel SSD device */
89 char tgttype[NVM_TTYPE_NAME_MAX]; /* target type name */
90 char tgtname[DISK_NAME_LEN]; /* dev to expose target as */
91
92 __u32 flags;
93
94 struct nvm_ioctl_create_conf conf;
95};
96
97struct nvm_ioctl_remove {
98 char tgtname[DISK_NAME_LEN];
99
100 __u32 flags;
101};
102
103
104/* The ioctl type, 'L', 0x20 - 0x2F documented in ioctl-number.txt */
105enum {
106 /* top level cmds */
107 NVM_INFO_CMD = 0x20,
108 NVM_GET_DEVICES_CMD,
109
110 /* device level cmds */
111 NVM_DEV_CREATE_CMD,
112 NVM_DEV_REMOVE_CMD,
113};
114
115#define NVM_IOCTL 'L' /* 0x4c */
116
117#define NVM_INFO _IOWR(NVM_IOCTL, NVM_INFO_CMD, \
118 struct nvm_ioctl_info)
119#define NVM_GET_DEVICES _IOR(NVM_IOCTL, NVM_GET_DEVICES_CMD, \
120 struct nvm_ioctl_get_devices)
121#define NVM_DEV_CREATE _IOW(NVM_IOCTL, NVM_DEV_CREATE_CMD, \
122 struct nvm_ioctl_create)
123#define NVM_DEV_REMOVE _IOW(NVM_IOCTL, NVM_DEV_REMOVE_CMD, \
124 struct nvm_ioctl_remove)
125
126#define NVM_VERSION_MAJOR 1
127#define NVM_VERSION_MINOR 0
128#define NVM_VERSION_PATCHLEVEL 0
129
130#endif