aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-01-21 22:01:55 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-01-21 22:01:55 -0500
commit0a13daedf7ffc71b0c374a036355da7fddb20d6d (patch)
treeaf51c711e3e998613a8a4af686cdac0d8b754d98
parent641203549a21ba6a701aecd05c3dfc969ec670cc (diff)
parenta7fd9a4f3e8179bab31e4637236ebb0e0b7867c6 (diff)
Merge branch 'for-4.5/lightnvm' of git://git.kernel.dk/linux-block
Pull lightnvm fixes and updates from Jens Axboe: "This should have been part of the drivers branch, but it arrived a bit late and wasn't based on the official core block driver branch. So they got a small scolding, but got a pass since it's still new. Hence it's in a separate branch. This is mostly pure fixes, contained to lightnvm/, and minor feature additions" * 'for-4.5/lightnvm' of git://git.kernel.dk/linux-block: (26 commits) lightnvm: ensure that nvm_dev_ops can be used without CONFIG_NVM lightnvm: introduce factory reset lightnvm: use system block for mm initialization lightnvm: introduce ioctl to initialize device lightnvm: core on-disk initialization lightnvm: introduce mlc lower page table mappings lightnvm: add mccap support lightnvm: manage open and closed blocks separately lightnvm: fix missing grown bad block type lightnvm: reference rrpc lun in rrpc block lightnvm: introduce nvm_submit_ppa lightnvm: move rq->error to nvm_rq->error lightnvm: support multiple ppas in nvm_erase_ppa lightnvm: move the pages per block check out of the loop lightnvm: sectors first in ppa list lightnvm: fix locking and mempool in rrpc_lun_gc lightnvm: put block back to gc list on its reclaim fail lightnvm: check bi_error in gc lightnvm: return the get_bb_tbl return value lightnvm: refactor end_io functions for sync ...
-rw-r--r--drivers/block/null_blk.c3
-rw-r--r--drivers/lightnvm/Makefile2
-rw-r--r--drivers/lightnvm/core.c340
-rw-r--r--drivers/lightnvm/gennvm.c198
-rw-r--r--drivers/lightnvm/rrpc.c104
-rw-r--r--drivers/lightnvm/rrpc.h13
-rw-r--r--drivers/lightnvm/sysblk.c741
-rw-r--r--drivers/nvme/host/lightnvm.c32
-rw-r--r--include/linux/lightnvm.h238
-rw-r--r--include/uapi/linux/lightnvm.h31
10 files changed, 1476 insertions, 226 deletions
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 6f9587156569..8ba1e97d573c 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -436,9 +436,8 @@ static void null_del_dev(struct nullb *nullb)
436static void null_lnvm_end_io(struct request *rq, int error) 436static void null_lnvm_end_io(struct request *rq, int error)
437{ 437{
438 struct nvm_rq *rqd = rq->end_io_data; 438 struct nvm_rq *rqd = rq->end_io_data;
439 struct nvm_dev *dev = rqd->dev;
440 439
441 dev->mt->end_io(rqd, error); 440 nvm_end_io(rqd, error);
442 441
443 blk_put_request(rq); 442 blk_put_request(rq);
444} 443}
diff --git a/drivers/lightnvm/Makefile b/drivers/lightnvm/Makefile
index 7e0f42acb737..a7a0a22cf1a5 100644
--- a/drivers/lightnvm/Makefile
+++ b/drivers/lightnvm/Makefile
@@ -2,6 +2,6 @@
2# Makefile for Open-Channel SSDs. 2# Makefile for Open-Channel SSDs.
3# 3#
4 4
5obj-$(CONFIG_NVM) := core.o 5obj-$(CONFIG_NVM) := core.o sysblk.o
6obj-$(CONFIG_NVM_GENNVM) += gennvm.o 6obj-$(CONFIG_NVM_GENNVM) += gennvm.o
7obj-$(CONFIG_NVM_RRPC) += rrpc.o 7obj-$(CONFIG_NVM_RRPC) += rrpc.o
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 8f41b245cd55..33224cb91c5b 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -27,6 +27,7 @@
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/miscdevice.h> 28#include <linux/miscdevice.h>
29#include <linux/lightnvm.h> 29#include <linux/lightnvm.h>
30#include <linux/sched/sysctl.h>
30#include <uapi/linux/lightnvm.h> 31#include <uapi/linux/lightnvm.h>
31 32
32static LIST_HEAD(nvm_targets); 33static LIST_HEAD(nvm_targets);
@@ -105,6 +106,9 @@ struct nvmm_type *nvm_init_mgr(struct nvm_dev *dev)
105 lockdep_assert_held(&nvm_lock); 106 lockdep_assert_held(&nvm_lock);
106 107
107 list_for_each_entry(mt, &nvm_mgrs, list) { 108 list_for_each_entry(mt, &nvm_mgrs, list) {
109 if (strncmp(dev->sb.mmtype, mt->name, NVM_MMTYPE_LEN))
110 continue;
111
108 ret = mt->register_mgr(dev); 112 ret = mt->register_mgr(dev);
109 if (ret < 0) { 113 if (ret < 0) {
110 pr_err("nvm: media mgr failed to init (%d) on dev %s\n", 114 pr_err("nvm: media mgr failed to init (%d) on dev %s\n",
@@ -166,6 +170,20 @@ static struct nvm_dev *nvm_find_nvm_dev(const char *name)
166 return NULL; 170 return NULL;
167} 171}
168 172
173struct nvm_block *nvm_get_blk_unlocked(struct nvm_dev *dev, struct nvm_lun *lun,
174 unsigned long flags)
175{
176 return dev->mt->get_blk_unlocked(dev, lun, flags);
177}
178EXPORT_SYMBOL(nvm_get_blk_unlocked);
179
180/* Assumes that all valid pages have already been moved on release to bm */
181void nvm_put_blk_unlocked(struct nvm_dev *dev, struct nvm_block *blk)
182{
183 return dev->mt->put_blk_unlocked(dev, blk);
184}
185EXPORT_SYMBOL(nvm_put_blk_unlocked);
186
169struct nvm_block *nvm_get_blk(struct nvm_dev *dev, struct nvm_lun *lun, 187struct nvm_block *nvm_get_blk(struct nvm_dev *dev, struct nvm_lun *lun,
170 unsigned long flags) 188 unsigned long flags)
171{ 189{
@@ -192,6 +210,206 @@ int nvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk)
192} 210}
193EXPORT_SYMBOL(nvm_erase_blk); 211EXPORT_SYMBOL(nvm_erase_blk);
194 212
213void nvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
214{
215 int i;
216
217 if (rqd->nr_pages > 1) {
218 for (i = 0; i < rqd->nr_pages; i++)
219 rqd->ppa_list[i] = dev_to_generic_addr(dev,
220 rqd->ppa_list[i]);
221 } else {
222 rqd->ppa_addr = dev_to_generic_addr(dev, rqd->ppa_addr);
223 }
224}
225EXPORT_SYMBOL(nvm_addr_to_generic_mode);
226
227void nvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
228{
229 int i;
230
231 if (rqd->nr_pages > 1) {
232 for (i = 0; i < rqd->nr_pages; i++)
233 rqd->ppa_list[i] = generic_to_dev_addr(dev,
234 rqd->ppa_list[i]);
235 } else {
236 rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr);
237 }
238}
239EXPORT_SYMBOL(nvm_generic_to_addr_mode);
240
241int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd,
242 struct ppa_addr *ppas, int nr_ppas)
243{
244 int i, plane_cnt, pl_idx;
245
246 if (dev->plane_mode == NVM_PLANE_SINGLE && nr_ppas == 1) {
247 rqd->nr_pages = 1;
248 rqd->ppa_addr = ppas[0];
249
250 return 0;
251 }
252
253 plane_cnt = (1 << dev->plane_mode);
254 rqd->nr_pages = plane_cnt * nr_ppas;
255
256 if (dev->ops->max_phys_sect < rqd->nr_pages)
257 return -EINVAL;
258
259 rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
260 if (!rqd->ppa_list) {
261 pr_err("nvm: failed to allocate dma memory\n");
262 return -ENOMEM;
263 }
264
265 for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
266 for (i = 0; i < nr_ppas; i++) {
267 ppas[i].g.pl = pl_idx;
268 rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppas[i];
269 }
270 }
271
272 return 0;
273}
274EXPORT_SYMBOL(nvm_set_rqd_ppalist);
275
276void nvm_free_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd)
277{
278 if (!rqd->ppa_list)
279 return;
280
281 nvm_dev_dma_free(dev, rqd->ppa_list, rqd->dma_ppa_list);
282}
283EXPORT_SYMBOL(nvm_free_rqd_ppalist);
284
285int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas)
286{
287 struct nvm_rq rqd;
288 int ret;
289
290 if (!dev->ops->erase_block)
291 return 0;
292
293 memset(&rqd, 0, sizeof(struct nvm_rq));
294
295 ret = nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas);
296 if (ret)
297 return ret;
298
299 nvm_generic_to_addr_mode(dev, &rqd);
300
301 ret = dev->ops->erase_block(dev, &rqd);
302
303 nvm_free_rqd_ppalist(dev, &rqd);
304
305 return ret;
306}
307EXPORT_SYMBOL(nvm_erase_ppa);
308
309void nvm_end_io(struct nvm_rq *rqd, int error)
310{
311 rqd->error = error;
312 rqd->end_io(rqd);
313}
314EXPORT_SYMBOL(nvm_end_io);
315
316static void nvm_end_io_sync(struct nvm_rq *rqd)
317{
318 struct completion *waiting = rqd->wait;
319
320 rqd->wait = NULL;
321
322 complete(waiting);
323}
324
325int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas,
326 int opcode, int flags, void *buf, int len)
327{
328 DECLARE_COMPLETION_ONSTACK(wait);
329 struct nvm_rq rqd;
330 struct bio *bio;
331 int ret;
332 unsigned long hang_check;
333
334 bio = bio_map_kern(dev->q, buf, len, GFP_KERNEL);
335 if (IS_ERR_OR_NULL(bio))
336 return -ENOMEM;
337
338 memset(&rqd, 0, sizeof(struct nvm_rq));
339 ret = nvm_set_rqd_ppalist(dev, &rqd, ppa, nr_ppas);
340 if (ret) {
341 bio_put(bio);
342 return ret;
343 }
344
345 rqd.opcode = opcode;
346 rqd.bio = bio;
347 rqd.wait = &wait;
348 rqd.dev = dev;
349 rqd.end_io = nvm_end_io_sync;
350 rqd.flags = flags;
351 nvm_generic_to_addr_mode(dev, &rqd);
352
353 ret = dev->ops->submit_io(dev, &rqd);
354
355 /* Prevent hang_check timer from firing at us during very long I/O */
356 hang_check = sysctl_hung_task_timeout_secs;
357 if (hang_check)
358 while (!wait_for_completion_io_timeout(&wait, hang_check * (HZ/2)));
359 else
360 wait_for_completion_io(&wait);
361
362 nvm_free_rqd_ppalist(dev, &rqd);
363
364 return rqd.error;
365}
366EXPORT_SYMBOL(nvm_submit_ppa);
367
368static int nvm_init_slc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
369{
370 int i;
371
372 dev->lps_per_blk = dev->pgs_per_blk;
373 dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
374 if (!dev->lptbl)
375 return -ENOMEM;
376
377 /* Just a linear array */
378 for (i = 0; i < dev->lps_per_blk; i++)
379 dev->lptbl[i] = i;
380
381 return 0;
382}
383
384static int nvm_init_mlc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
385{
386 int i, p;
387 struct nvm_id_lp_mlc *mlc = &grp->lptbl.mlc;
388
389 if (!mlc->num_pairs)
390 return 0;
391
392 dev->lps_per_blk = mlc->num_pairs;
393 dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
394 if (!dev->lptbl)
395 return -ENOMEM;
396
397 /* The lower page table encoding consists of a list of bytes, where each
398 * has a lower and an upper half. The first half byte maintains the
399 * increment value and every value after is an offset added to the
400 * previous incrementation value */
401 dev->lptbl[0] = mlc->pairs[0] & 0xF;
402 for (i = 1; i < dev->lps_per_blk; i++) {
403 p = mlc->pairs[i >> 1];
404 if (i & 0x1) /* upper */
405 dev->lptbl[i] = dev->lptbl[i - 1] + ((p & 0xF0) >> 4);
406 else /* lower */
407 dev->lptbl[i] = dev->lptbl[i - 1] + (p & 0xF);
408 }
409
410 return 0;
411}
412
195static int nvm_core_init(struct nvm_dev *dev) 413static int nvm_core_init(struct nvm_dev *dev)
196{ 414{
197 struct nvm_id *id = &dev->identity; 415 struct nvm_id *id = &dev->identity;
@@ -206,6 +424,7 @@ static int nvm_core_init(struct nvm_dev *dev)
206 dev->sec_size = grp->csecs; 424 dev->sec_size = grp->csecs;
207 dev->oob_size = grp->sos; 425 dev->oob_size = grp->sos;
208 dev->sec_per_pg = grp->fpg_sz / grp->csecs; 426 dev->sec_per_pg = grp->fpg_sz / grp->csecs;
427 dev->mccap = grp->mccap;
209 memcpy(&dev->ppaf, &id->ppaf, sizeof(struct nvm_addr_format)); 428 memcpy(&dev->ppaf, &id->ppaf, sizeof(struct nvm_addr_format));
210 429
211 dev->plane_mode = NVM_PLANE_SINGLE; 430 dev->plane_mode = NVM_PLANE_SINGLE;
@@ -216,11 +435,23 @@ static int nvm_core_init(struct nvm_dev *dev)
216 return -EINVAL; 435 return -EINVAL;
217 } 436 }
218 437
219 if (grp->fmtype != 0 && grp->fmtype != 1) { 438 switch (grp->fmtype) {
439 case NVM_ID_FMTYPE_SLC:
440 if (nvm_init_slc_tbl(dev, grp))
441 return -ENOMEM;
442 break;
443 case NVM_ID_FMTYPE_MLC:
444 if (nvm_init_mlc_tbl(dev, grp))
445 return -ENOMEM;
446 break;
447 default:
220 pr_err("nvm: flash type not supported\n"); 448 pr_err("nvm: flash type not supported\n");
221 return -EINVAL; 449 return -EINVAL;
222 } 450 }
223 451
452 if (!dev->lps_per_blk)
453 pr_info("nvm: lower page programming table missing\n");
454
224 if (grp->mpos & 0x020202) 455 if (grp->mpos & 0x020202)
225 dev->plane_mode = NVM_PLANE_DOUBLE; 456 dev->plane_mode = NVM_PLANE_DOUBLE;
226 if (grp->mpos & 0x040404) 457 if (grp->mpos & 0x040404)
@@ -238,6 +469,7 @@ static int nvm_core_init(struct nvm_dev *dev)
238 dev->nr_chnls; 469 dev->nr_chnls;
239 dev->total_pages = dev->total_blocks * dev->pgs_per_blk; 470 dev->total_pages = dev->total_blocks * dev->pgs_per_blk;
240 INIT_LIST_HEAD(&dev->online_targets); 471 INIT_LIST_HEAD(&dev->online_targets);
472 mutex_init(&dev->mlock);
241 473
242 return 0; 474 return 0;
243} 475}
@@ -249,6 +481,8 @@ static void nvm_free(struct nvm_dev *dev)
249 481
250 if (dev->mt) 482 if (dev->mt)
251 dev->mt->unregister_mgr(dev); 483 dev->mt->unregister_mgr(dev);
484
485 kfree(dev->lptbl);
252} 486}
253 487
254static int nvm_init(struct nvm_dev *dev) 488static int nvm_init(struct nvm_dev *dev)
@@ -338,9 +572,16 @@ int nvm_register(struct request_queue *q, char *disk_name,
338 } 572 }
339 } 573 }
340 574
575 ret = nvm_get_sysblock(dev, &dev->sb);
576 if (!ret)
577 pr_err("nvm: device not initialized.\n");
578 else if (ret < 0)
579 pr_err("nvm: err (%d) on device initialization\n", ret);
580
341 /* register device with a supported media manager */ 581 /* register device with a supported media manager */
342 down_write(&nvm_lock); 582 down_write(&nvm_lock);
343 dev->mt = nvm_init_mgr(dev); 583 if (ret > 0)
584 dev->mt = nvm_init_mgr(dev);
344 list_add(&dev->devices, &nvm_devices); 585 list_add(&dev->devices, &nvm_devices);
345 up_write(&nvm_lock); 586 up_write(&nvm_lock);
346 587
@@ -788,6 +1029,97 @@ static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
788 return __nvm_configure_remove(&remove); 1029 return __nvm_configure_remove(&remove);
789} 1030}
790 1031
1032static void nvm_setup_nvm_sb_info(struct nvm_sb_info *info)
1033{
1034 info->seqnr = 1;
1035 info->erase_cnt = 0;
1036 info->version = 1;
1037}
1038
1039static long __nvm_ioctl_dev_init(struct nvm_ioctl_dev_init *init)
1040{
1041 struct nvm_dev *dev;
1042 struct nvm_sb_info info;
1043 int ret;
1044
1045 down_write(&nvm_lock);
1046 dev = nvm_find_nvm_dev(init->dev);
1047 up_write(&nvm_lock);
1048 if (!dev) {
1049 pr_err("nvm: device not found\n");
1050 return -EINVAL;
1051 }
1052
1053 nvm_setup_nvm_sb_info(&info);
1054
1055 strncpy(info.mmtype, init->mmtype, NVM_MMTYPE_LEN);
1056 info.fs_ppa.ppa = -1;
1057
1058 ret = nvm_init_sysblock(dev, &info);
1059 if (ret)
1060 return ret;
1061
1062 memcpy(&dev->sb, &info, sizeof(struct nvm_sb_info));
1063
1064 down_write(&nvm_lock);
1065 dev->mt = nvm_init_mgr(dev);
1066 up_write(&nvm_lock);
1067
1068 return 0;
1069}
1070
1071static long nvm_ioctl_dev_init(struct file *file, void __user *arg)
1072{
1073 struct nvm_ioctl_dev_init init;
1074
1075 if (!capable(CAP_SYS_ADMIN))
1076 return -EPERM;
1077
1078 if (copy_from_user(&init, arg, sizeof(struct nvm_ioctl_dev_init)))
1079 return -EFAULT;
1080
1081 if (init.flags != 0) {
1082 pr_err("nvm: no flags supported\n");
1083 return -EINVAL;
1084 }
1085
1086 init.dev[DISK_NAME_LEN - 1] = '\0';
1087
1088 return __nvm_ioctl_dev_init(&init);
1089}
1090
1091static long nvm_ioctl_dev_factory(struct file *file, void __user *arg)
1092{
1093 struct nvm_ioctl_dev_factory fact;
1094 struct nvm_dev *dev;
1095
1096 if (!capable(CAP_SYS_ADMIN))
1097 return -EPERM;
1098
1099 if (copy_from_user(&fact, arg, sizeof(struct nvm_ioctl_dev_factory)))
1100 return -EFAULT;
1101
1102 fact.dev[DISK_NAME_LEN - 1] = '\0';
1103
1104 if (fact.flags & ~(NVM_FACTORY_NR_BITS - 1))
1105 return -EINVAL;
1106
1107 down_write(&nvm_lock);
1108 dev = nvm_find_nvm_dev(fact.dev);
1109 up_write(&nvm_lock);
1110 if (!dev) {
1111 pr_err("nvm: device not found\n");
1112 return -EINVAL;
1113 }
1114
1115 if (dev->mt) {
1116 dev->mt->unregister_mgr(dev);
1117 dev->mt = NULL;
1118 }
1119
1120 return nvm_dev_factory(dev, fact.flags);
1121}
1122
791static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg) 1123static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
792{ 1124{
793 void __user *argp = (void __user *)arg; 1125 void __user *argp = (void __user *)arg;
@@ -801,6 +1133,10 @@ static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
801 return nvm_ioctl_dev_create(file, argp); 1133 return nvm_ioctl_dev_create(file, argp);
802 case NVM_DEV_REMOVE: 1134 case NVM_DEV_REMOVE:
803 return nvm_ioctl_dev_remove(file, argp); 1135 return nvm_ioctl_dev_remove(file, argp);
1136 case NVM_DEV_INIT:
1137 return nvm_ioctl_dev_init(file, argp);
1138 case NVM_DEV_FACTORY:
1139 return nvm_ioctl_dev_factory(file, argp);
804 } 1140 }
805 return 0; 1141 return 0;
806} 1142}
diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
index a54b339951a3..7fb725b16148 100644
--- a/drivers/lightnvm/gennvm.c
+++ b/drivers/lightnvm/gennvm.c
@@ -60,7 +60,8 @@ static int gennvm_luns_init(struct nvm_dev *dev, struct gen_nvm *gn)
60 lun->vlun.lun_id = i % dev->luns_per_chnl; 60 lun->vlun.lun_id = i % dev->luns_per_chnl;
61 lun->vlun.chnl_id = i / dev->luns_per_chnl; 61 lun->vlun.chnl_id = i / dev->luns_per_chnl;
62 lun->vlun.nr_free_blocks = dev->blks_per_lun; 62 lun->vlun.nr_free_blocks = dev->blks_per_lun;
63 lun->vlun.nr_inuse_blocks = 0; 63 lun->vlun.nr_open_blocks = 0;
64 lun->vlun.nr_closed_blocks = 0;
64 lun->vlun.nr_bad_blocks = 0; 65 lun->vlun.nr_bad_blocks = 0;
65 } 66 }
66 return 0; 67 return 0;
@@ -89,6 +90,7 @@ static int gennvm_block_bb(struct ppa_addr ppa, int nr_blocks, u8 *blks,
89 90
90 list_move_tail(&blk->list, &lun->bb_list); 91 list_move_tail(&blk->list, &lun->bb_list);
91 lun->vlun.nr_bad_blocks++; 92 lun->vlun.nr_bad_blocks++;
93 lun->vlun.nr_free_blocks--;
92 } 94 }
93 95
94 return 0; 96 return 0;
@@ -133,15 +135,15 @@ static int gennvm_block_map(u64 slba, u32 nlb, __le64 *entries, void *private)
133 pba = pba - (dev->sec_per_lun * lun_id); 135 pba = pba - (dev->sec_per_lun * lun_id);
134 blk = &lun->vlun.blocks[div_u64(pba, dev->sec_per_blk)]; 136 blk = &lun->vlun.blocks[div_u64(pba, dev->sec_per_blk)];
135 137
136 if (!blk->type) { 138 if (!blk->state) {
137 /* at this point, we don't know anything about the 139 /* at this point, we don't know anything about the
138 * block. It's up to the FTL on top to re-etablish the 140 * block. It's up to the FTL on top to re-etablish the
139 * block state 141 * block state. The block is assumed to be open.
140 */ 142 */
141 list_move_tail(&blk->list, &lun->used_list); 143 list_move_tail(&blk->list, &lun->used_list);
142 blk->type = 1; 144 blk->state = NVM_BLK_ST_OPEN;
143 lun->vlun.nr_free_blocks--; 145 lun->vlun.nr_free_blocks--;
144 lun->vlun.nr_inuse_blocks++; 146 lun->vlun.nr_open_blocks++;
145 } 147 }
146 } 148 }
147 149
@@ -255,14 +257,14 @@ static void gennvm_unregister(struct nvm_dev *dev)
255 module_put(THIS_MODULE); 257 module_put(THIS_MODULE);
256} 258}
257 259
258static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev, 260static struct nvm_block *gennvm_get_blk_unlocked(struct nvm_dev *dev,
259 struct nvm_lun *vlun, unsigned long flags) 261 struct nvm_lun *vlun, unsigned long flags)
260{ 262{
261 struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun); 263 struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun);
262 struct nvm_block *blk = NULL; 264 struct nvm_block *blk = NULL;
263 int is_gc = flags & NVM_IOTYPE_GC; 265 int is_gc = flags & NVM_IOTYPE_GC;
264 266
265 spin_lock(&vlun->lock); 267 assert_spin_locked(&vlun->lock);
266 268
267 if (list_empty(&lun->free_list)) { 269 if (list_empty(&lun->free_list)) {
268 pr_err_ratelimited("gennvm: lun %u have no free pages available", 270 pr_err_ratelimited("gennvm: lun %u have no free pages available",
@@ -275,83 +277,64 @@ static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev,
275 277
276 blk = list_first_entry(&lun->free_list, struct nvm_block, list); 278 blk = list_first_entry(&lun->free_list, struct nvm_block, list);
277 list_move_tail(&blk->list, &lun->used_list); 279 list_move_tail(&blk->list, &lun->used_list);
278 blk->type = 1; 280 blk->state = NVM_BLK_ST_OPEN;
279 281
280 lun->vlun.nr_free_blocks--; 282 lun->vlun.nr_free_blocks--;
281 lun->vlun.nr_inuse_blocks++; 283 lun->vlun.nr_open_blocks++;
282 284
283out: 285out:
286 return blk;
287}
288
289static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev,
290 struct nvm_lun *vlun, unsigned long flags)
291{
292 struct nvm_block *blk;
293
294 spin_lock(&vlun->lock);
295 blk = gennvm_get_blk_unlocked(dev, vlun, flags);
284 spin_unlock(&vlun->lock); 296 spin_unlock(&vlun->lock);
285 return blk; 297 return blk;
286} 298}
287 299
288static void gennvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk) 300static void gennvm_put_blk_unlocked(struct nvm_dev *dev, struct nvm_block *blk)
289{ 301{
290 struct nvm_lun *vlun = blk->lun; 302 struct nvm_lun *vlun = blk->lun;
291 struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun); 303 struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun);
292 304
293 spin_lock(&vlun->lock); 305 assert_spin_locked(&vlun->lock);
294 306
295 switch (blk->type) { 307 if (blk->state & NVM_BLK_ST_OPEN) {
296 case 1:
297 list_move_tail(&blk->list, &lun->free_list); 308 list_move_tail(&blk->list, &lun->free_list);
309 lun->vlun.nr_open_blocks--;
298 lun->vlun.nr_free_blocks++; 310 lun->vlun.nr_free_blocks++;
299 lun->vlun.nr_inuse_blocks--; 311 blk->state = NVM_BLK_ST_FREE;
300 blk->type = 0; 312 } else if (blk->state & NVM_BLK_ST_CLOSED) {
301 break; 313 list_move_tail(&blk->list, &lun->free_list);
302 case 2: 314 lun->vlun.nr_closed_blocks--;
315 lun->vlun.nr_free_blocks++;
316 blk->state = NVM_BLK_ST_FREE;
317 } else if (blk->state & NVM_BLK_ST_BAD) {
303 list_move_tail(&blk->list, &lun->bb_list); 318 list_move_tail(&blk->list, &lun->bb_list);
304 lun->vlun.nr_bad_blocks++; 319 lun->vlun.nr_bad_blocks++;
305 lun->vlun.nr_inuse_blocks--; 320 blk->state = NVM_BLK_ST_BAD;
306 break; 321 } else {
307 default:
308 WARN_ON_ONCE(1); 322 WARN_ON_ONCE(1);
309 pr_err("gennvm: erroneous block type (%lu -> %u)\n", 323 pr_err("gennvm: erroneous block type (%lu -> %u)\n",
310 blk->id, blk->type); 324 blk->id, blk->state);
311 list_move_tail(&blk->list, &lun->bb_list); 325 list_move_tail(&blk->list, &lun->bb_list);
312 lun->vlun.nr_bad_blocks++; 326 lun->vlun.nr_bad_blocks++;
313 lun->vlun.nr_inuse_blocks--; 327 blk->state = NVM_BLK_ST_BAD;
314 }
315
316 spin_unlock(&vlun->lock);
317}
318
319static void gennvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
320{
321 int i;
322
323 if (rqd->nr_pages > 1) {
324 for (i = 0; i < rqd->nr_pages; i++)
325 rqd->ppa_list[i] = dev_to_generic_addr(dev,
326 rqd->ppa_list[i]);
327 } else {
328 rqd->ppa_addr = dev_to_generic_addr(dev, rqd->ppa_addr);
329 } 328 }
330} 329}
331 330
332static void gennvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd) 331static void gennvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
333{
334 int i;
335
336 if (rqd->nr_pages > 1) {
337 for (i = 0; i < rqd->nr_pages; i++)
338 rqd->ppa_list[i] = generic_to_dev_addr(dev,
339 rqd->ppa_list[i]);
340 } else {
341 rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr);
342 }
343}
344
345static int gennvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
346{ 332{
347 if (!dev->ops->submit_io) 333 struct nvm_lun *vlun = blk->lun;
348 return 0;
349
350 /* Convert address space */
351 gennvm_generic_to_addr_mode(dev, rqd);
352 334
353 rqd->dev = dev; 335 spin_lock(&vlun->lock);
354 return dev->ops->submit_io(dev, rqd); 336 gennvm_put_blk_unlocked(dev, blk);
337 spin_unlock(&vlun->lock);
355} 338}
356 339
357static void gennvm_blk_set_type(struct nvm_dev *dev, struct ppa_addr *ppa, 340static void gennvm_blk_set_type(struct nvm_dev *dev, struct ppa_addr *ppa,
@@ -376,7 +359,7 @@ static void gennvm_blk_set_type(struct nvm_dev *dev, struct ppa_addr *ppa,
376 blk = &lun->vlun.blocks[ppa->g.blk]; 359 blk = &lun->vlun.blocks[ppa->g.blk];
377 360
378 /* will be moved to bb list on put_blk from target */ 361 /* will be moved to bb list on put_blk from target */
379 blk->type = type; 362 blk->state = type;
380} 363}
381 364
382/* mark block bad. It is expected the target recover from the error. */ 365/* mark block bad. It is expected the target recover from the error. */
@@ -390,77 +373,51 @@ static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd)
390 if (dev->ops->set_bb_tbl(dev, rqd, 1)) 373 if (dev->ops->set_bb_tbl(dev, rqd, 1))
391 return; 374 return;
392 375
393 gennvm_addr_to_generic_mode(dev, rqd); 376 nvm_addr_to_generic_mode(dev, rqd);
394 377
395 /* look up blocks and mark them as bad */ 378 /* look up blocks and mark them as bad */
396 if (rqd->nr_pages > 1) 379 if (rqd->nr_pages > 1)
397 for (i = 0; i < rqd->nr_pages; i++) 380 for (i = 0; i < rqd->nr_pages; i++)
398 gennvm_blk_set_type(dev, &rqd->ppa_list[i], 2); 381 gennvm_blk_set_type(dev, &rqd->ppa_list[i],
382 NVM_BLK_ST_BAD);
399 else 383 else
400 gennvm_blk_set_type(dev, &rqd->ppa_addr, 2); 384 gennvm_blk_set_type(dev, &rqd->ppa_addr, NVM_BLK_ST_BAD);
401} 385}
402 386
403static int gennvm_end_io(struct nvm_rq *rqd, int error) 387static void gennvm_end_io(struct nvm_rq *rqd)
404{ 388{
405 struct nvm_tgt_instance *ins = rqd->ins; 389 struct nvm_tgt_instance *ins = rqd->ins;
406 int ret = 0;
407 390
408 switch (error) { 391 switch (rqd->error) {
409 case NVM_RSP_SUCCESS: 392 case NVM_RSP_SUCCESS:
410 break;
411 case NVM_RSP_ERR_EMPTYPAGE: 393 case NVM_RSP_ERR_EMPTYPAGE:
412 break; 394 break;
413 case NVM_RSP_ERR_FAILWRITE: 395 case NVM_RSP_ERR_FAILWRITE:
414 gennvm_mark_blk_bad(rqd->dev, rqd); 396 gennvm_mark_blk_bad(rqd->dev, rqd);
415 default:
416 ret++;
417 } 397 }
418 398
419 ret += ins->tt->end_io(rqd, error); 399 ins->tt->end_io(rqd);
420
421 return ret;
422} 400}
423 401
424static int gennvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk, 402static int gennvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
425 unsigned long flags)
426{ 403{
427 int plane_cnt = 0, pl_idx, ret; 404 if (!dev->ops->submit_io)
428 struct ppa_addr addr; 405 return -ENODEV;
429 struct nvm_rq rqd;
430
431 if (!dev->ops->erase_block)
432 return 0;
433
434 addr = block_to_ppa(dev, blk);
435
436 if (dev->plane_mode == NVM_PLANE_SINGLE) {
437 rqd.nr_pages = 1;
438 rqd.ppa_addr = addr;
439 } else {
440 plane_cnt = (1 << dev->plane_mode);
441 rqd.nr_pages = plane_cnt;
442
443 rqd.ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL,
444 &rqd.dma_ppa_list);
445 if (!rqd.ppa_list) {
446 pr_err("gennvm: failed to allocate dma memory\n");
447 return -ENOMEM;
448 }
449
450 for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
451 addr.g.pl = pl_idx;
452 rqd.ppa_list[pl_idx] = addr;
453 }
454 }
455 406
456 gennvm_generic_to_addr_mode(dev, &rqd); 407 /* Convert address space */
408 nvm_generic_to_addr_mode(dev, rqd);
457 409
458 ret = dev->ops->erase_block(dev, &rqd); 410 rqd->dev = dev;
411 rqd->end_io = gennvm_end_io;
412 return dev->ops->submit_io(dev, rqd);
413}
459 414
460 if (plane_cnt) 415static int gennvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk,
461 nvm_dev_dma_free(dev, rqd.ppa_list, rqd.dma_ppa_list); 416 unsigned long flags)
417{
418 struct ppa_addr addr = block_to_ppa(dev, blk);
462 419
463 return ret; 420 return nvm_erase_ppa(dev, &addr, 1);
464} 421}
465 422
466static struct nvm_lun *gennvm_get_lun(struct nvm_dev *dev, int lunid) 423static struct nvm_lun *gennvm_get_lun(struct nvm_dev *dev, int lunid)
@@ -480,10 +437,11 @@ static void gennvm_lun_info_print(struct nvm_dev *dev)
480 gennvm_for_each_lun(gn, lun, i) { 437 gennvm_for_each_lun(gn, lun, i) {
481 spin_lock(&lun->vlun.lock); 438 spin_lock(&lun->vlun.lock);
482 439
483 pr_info("%s: lun%8u\t%u\t%u\t%u\n", 440 pr_info("%s: lun%8u\t%u\t%u\t%u\t%u\n",
484 dev->name, i, 441 dev->name, i,
485 lun->vlun.nr_free_blocks, 442 lun->vlun.nr_free_blocks,
486 lun->vlun.nr_inuse_blocks, 443 lun->vlun.nr_open_blocks,
444 lun->vlun.nr_closed_blocks,
487 lun->vlun.nr_bad_blocks); 445 lun->vlun.nr_bad_blocks);
488 446
489 spin_unlock(&lun->vlun.lock); 447 spin_unlock(&lun->vlun.lock);
@@ -491,21 +449,23 @@ static void gennvm_lun_info_print(struct nvm_dev *dev)
491} 449}
492 450
493static struct nvmm_type gennvm = { 451static struct nvmm_type gennvm = {
494 .name = "gennvm", 452 .name = "gennvm",
495 .version = {0, 1, 0}, 453 .version = {0, 1, 0},
454
455 .register_mgr = gennvm_register,
456 .unregister_mgr = gennvm_unregister,
496 457
497 .register_mgr = gennvm_register, 458 .get_blk_unlocked = gennvm_get_blk_unlocked,
498 .unregister_mgr = gennvm_unregister, 459 .put_blk_unlocked = gennvm_put_blk_unlocked,
499 460
500 .get_blk = gennvm_get_blk, 461 .get_blk = gennvm_get_blk,
501 .put_blk = gennvm_put_blk, 462 .put_blk = gennvm_put_blk,
502 463
503 .submit_io = gennvm_submit_io, 464 .submit_io = gennvm_submit_io,
504 .end_io = gennvm_end_io, 465 .erase_blk = gennvm_erase_blk,
505 .erase_blk = gennvm_erase_blk,
506 466
507 .get_lun = gennvm_get_lun, 467 .get_lun = gennvm_get_lun,
508 .lun_info_print = gennvm_lun_info_print, 468 .lun_info_print = gennvm_lun_info_print,
509}; 469};
510 470
511static int __init gennvm_module_init(void) 471static int __init gennvm_module_init(void)
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index 134e4faba482..d8c75958ced3 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -179,16 +179,23 @@ static void rrpc_set_lun_cur(struct rrpc_lun *rlun, struct rrpc_block *rblk)
179static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun, 179static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
180 unsigned long flags) 180 unsigned long flags)
181{ 181{
182 struct nvm_lun *lun = rlun->parent;
182 struct nvm_block *blk; 183 struct nvm_block *blk;
183 struct rrpc_block *rblk; 184 struct rrpc_block *rblk;
184 185
185 blk = nvm_get_blk(rrpc->dev, rlun->parent, flags); 186 spin_lock(&lun->lock);
186 if (!blk) 187 blk = nvm_get_blk_unlocked(rrpc->dev, rlun->parent, flags);
188 if (!blk) {
189 pr_err("nvm: rrpc: cannot get new block from media manager\n");
190 spin_unlock(&lun->lock);
187 return NULL; 191 return NULL;
192 }
188 193
189 rblk = &rlun->blocks[blk->id]; 194 rblk = &rlun->blocks[blk->id];
190 blk->priv = rblk; 195 list_add_tail(&rblk->list, &rlun->open_list);
196 spin_unlock(&lun->lock);
191 197
198 blk->priv = rblk;
192 bitmap_zero(rblk->invalid_pages, rrpc->dev->pgs_per_blk); 199 bitmap_zero(rblk->invalid_pages, rrpc->dev->pgs_per_blk);
193 rblk->next_page = 0; 200 rblk->next_page = 0;
194 rblk->nr_invalid_pages = 0; 201 rblk->nr_invalid_pages = 0;
@@ -199,7 +206,13 @@ static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
199 206
200static void rrpc_put_blk(struct rrpc *rrpc, struct rrpc_block *rblk) 207static void rrpc_put_blk(struct rrpc *rrpc, struct rrpc_block *rblk)
201{ 208{
202 nvm_put_blk(rrpc->dev, rblk->parent); 209 struct rrpc_lun *rlun = rblk->rlun;
210 struct nvm_lun *lun = rlun->parent;
211
212 spin_lock(&lun->lock);
213 nvm_put_blk_unlocked(rrpc->dev, rblk->parent);
214 list_del(&rblk->list);
215 spin_unlock(&lun->lock);
203} 216}
204 217
205static void rrpc_put_blks(struct rrpc *rrpc) 218static void rrpc_put_blks(struct rrpc *rrpc)
@@ -287,6 +300,8 @@ static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
287 } 300 }
288 301
289 page = mempool_alloc(rrpc->page_pool, GFP_NOIO); 302 page = mempool_alloc(rrpc->page_pool, GFP_NOIO);
303 if (!page)
304 return -ENOMEM;
290 305
291 while ((slot = find_first_zero_bit(rblk->invalid_pages, 306 while ((slot = find_first_zero_bit(rblk->invalid_pages,
292 nr_pgs_per_blk)) < nr_pgs_per_blk) { 307 nr_pgs_per_blk)) < nr_pgs_per_blk) {
@@ -328,6 +343,10 @@ try:
328 goto finished; 343 goto finished;
329 } 344 }
330 wait_for_completion_io(&wait); 345 wait_for_completion_io(&wait);
346 if (bio->bi_error) {
347 rrpc_inflight_laddr_release(rrpc, rqd);
348 goto finished;
349 }
331 350
332 bio_reset(bio); 351 bio_reset(bio);
333 reinit_completion(&wait); 352 reinit_completion(&wait);
@@ -350,6 +369,8 @@ try:
350 wait_for_completion_io(&wait); 369 wait_for_completion_io(&wait);
351 370
352 rrpc_inflight_laddr_release(rrpc, rqd); 371 rrpc_inflight_laddr_release(rrpc, rqd);
372 if (bio->bi_error)
373 goto finished;
353 374
354 bio_reset(bio); 375 bio_reset(bio);
355 } 376 }
@@ -373,16 +394,26 @@ static void rrpc_block_gc(struct work_struct *work)
373 struct rrpc *rrpc = gcb->rrpc; 394 struct rrpc *rrpc = gcb->rrpc;
374 struct rrpc_block *rblk = gcb->rblk; 395 struct rrpc_block *rblk = gcb->rblk;
375 struct nvm_dev *dev = rrpc->dev; 396 struct nvm_dev *dev = rrpc->dev;
397 struct nvm_lun *lun = rblk->parent->lun;
398 struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset];
376 399
400 mempool_free(gcb, rrpc->gcb_pool);
377 pr_debug("nvm: block '%lu' being reclaimed\n", rblk->parent->id); 401 pr_debug("nvm: block '%lu' being reclaimed\n", rblk->parent->id);
378 402
379 if (rrpc_move_valid_pages(rrpc, rblk)) 403 if (rrpc_move_valid_pages(rrpc, rblk))
380 goto done; 404 goto put_back;
405
406 if (nvm_erase_blk(dev, rblk->parent))
407 goto put_back;
381 408
382 nvm_erase_blk(dev, rblk->parent);
383 rrpc_put_blk(rrpc, rblk); 409 rrpc_put_blk(rrpc, rblk);
384done: 410
385 mempool_free(gcb, rrpc->gcb_pool); 411 return;
412
413put_back:
414 spin_lock(&rlun->lock);
415 list_add_tail(&rblk->prio, &rlun->prio_list);
416 spin_unlock(&rlun->lock);
386} 417}
387 418
388/* the block with highest number of invalid pages, will be in the beginning 419/* the block with highest number of invalid pages, will be in the beginning
@@ -427,7 +458,7 @@ static void rrpc_lun_gc(struct work_struct *work)
427 if (nr_blocks_need < rrpc->nr_luns) 458 if (nr_blocks_need < rrpc->nr_luns)
428 nr_blocks_need = rrpc->nr_luns; 459 nr_blocks_need = rrpc->nr_luns;
429 460
430 spin_lock(&lun->lock); 461 spin_lock(&rlun->lock);
431 while (nr_blocks_need > lun->nr_free_blocks && 462 while (nr_blocks_need > lun->nr_free_blocks &&
432 !list_empty(&rlun->prio_list)) { 463 !list_empty(&rlun->prio_list)) {
433 struct rrpc_block *rblock = block_prio_find_max(rlun); 464 struct rrpc_block *rblock = block_prio_find_max(rlun);
@@ -436,16 +467,16 @@ static void rrpc_lun_gc(struct work_struct *work)
436 if (!rblock->nr_invalid_pages) 467 if (!rblock->nr_invalid_pages)
437 break; 468 break;
438 469
470 gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
471 if (!gcb)
472 break;
473
439 list_del_init(&rblock->prio); 474 list_del_init(&rblock->prio);
440 475
441 BUG_ON(!block_is_full(rrpc, rblock)); 476 BUG_ON(!block_is_full(rrpc, rblock));
442 477
443 pr_debug("rrpc: selected block '%lu' for GC\n", block->id); 478 pr_debug("rrpc: selected block '%lu' for GC\n", block->id);
444 479
445 gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
446 if (!gcb)
447 break;
448
449 gcb->rrpc = rrpc; 480 gcb->rrpc = rrpc;
450 gcb->rblk = rblock; 481 gcb->rblk = rblock;
451 INIT_WORK(&gcb->ws_gc, rrpc_block_gc); 482 INIT_WORK(&gcb->ws_gc, rrpc_block_gc);
@@ -454,7 +485,7 @@ static void rrpc_lun_gc(struct work_struct *work)
454 485
455 nr_blocks_need--; 486 nr_blocks_need--;
456 } 487 }
457 spin_unlock(&lun->lock); 488 spin_unlock(&rlun->lock);
458 489
459 /* TODO: Hint that request queue can be started again */ 490 /* TODO: Hint that request queue can be started again */
460} 491}
@@ -635,12 +666,24 @@ static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd,
635 lun = rblk->parent->lun; 666 lun = rblk->parent->lun;
636 667
637 cmnt_size = atomic_inc_return(&rblk->data_cmnt_size); 668 cmnt_size = atomic_inc_return(&rblk->data_cmnt_size);
638 if (unlikely(cmnt_size == rrpc->dev->pgs_per_blk)) 669 if (unlikely(cmnt_size == rrpc->dev->pgs_per_blk)) {
670 struct nvm_block *blk = rblk->parent;
671 struct rrpc_lun *rlun = rblk->rlun;
672
673 spin_lock(&lun->lock);
674 lun->nr_open_blocks--;
675 lun->nr_closed_blocks++;
676 blk->state &= ~NVM_BLK_ST_OPEN;
677 blk->state |= NVM_BLK_ST_CLOSED;
678 list_move_tail(&rblk->list, &rlun->closed_list);
679 spin_unlock(&lun->lock);
680
639 rrpc_run_gc(rrpc, rblk); 681 rrpc_run_gc(rrpc, rblk);
682 }
640 } 683 }
641} 684}
642 685
643static int rrpc_end_io(struct nvm_rq *rqd, int error) 686static void rrpc_end_io(struct nvm_rq *rqd)
644{ 687{
645 struct rrpc *rrpc = container_of(rqd->ins, struct rrpc, instance); 688 struct rrpc *rrpc = container_of(rqd->ins, struct rrpc, instance);
646 struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd); 689 struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
@@ -650,11 +693,12 @@ static int rrpc_end_io(struct nvm_rq *rqd, int error)
650 if (bio_data_dir(rqd->bio) == WRITE) 693 if (bio_data_dir(rqd->bio) == WRITE)
651 rrpc_end_io_write(rrpc, rrqd, laddr, npages); 694 rrpc_end_io_write(rrpc, rrqd, laddr, npages);
652 695
696 bio_put(rqd->bio);
697
653 if (rrqd->flags & NVM_IOTYPE_GC) 698 if (rrqd->flags & NVM_IOTYPE_GC)
654 return 0; 699 return;
655 700
656 rrpc_unlock_rq(rrpc, rqd); 701 rrpc_unlock_rq(rrpc, rqd);
657 bio_put(rqd->bio);
658 702
659 if (npages > 1) 703 if (npages > 1)
660 nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list); 704 nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
@@ -662,8 +706,6 @@ static int rrpc_end_io(struct nvm_rq *rqd, int error)
662 nvm_dev_dma_free(rrpc->dev, rqd->metadata, rqd->dma_metadata); 706 nvm_dev_dma_free(rrpc->dev, rqd->metadata, rqd->dma_metadata);
663 707
664 mempool_free(rqd, rrpc->rq_pool); 708 mempool_free(rqd, rrpc->rq_pool);
665
666 return 0;
667} 709}
668 710
669static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio, 711static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
@@ -841,6 +883,13 @@ static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
841 err = nvm_submit_io(rrpc->dev, rqd); 883 err = nvm_submit_io(rrpc->dev, rqd);
842 if (err) { 884 if (err) {
843 pr_err("rrpc: I/O submission failed: %d\n", err); 885 pr_err("rrpc: I/O submission failed: %d\n", err);
886 bio_put(bio);
887 if (!(flags & NVM_IOTYPE_GC)) {
888 rrpc_unlock_rq(rrpc, rqd);
889 if (rqd->nr_pages > 1)
890 nvm_dev_dma_free(rrpc->dev,
891 rqd->ppa_list, rqd->dma_ppa_list);
892 }
844 return NVM_IO_ERR; 893 return NVM_IO_ERR;
845 } 894 }
846 895
@@ -1090,6 +1139,11 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
1090 struct rrpc_lun *rlun; 1139 struct rrpc_lun *rlun;
1091 int i, j; 1140 int i, j;
1092 1141
1142 if (dev->pgs_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
1143 pr_err("rrpc: number of pages per block too high.");
1144 return -EINVAL;
1145 }
1146
1093 spin_lock_init(&rrpc->rev_lock); 1147 spin_lock_init(&rrpc->rev_lock);
1094 1148
1095 rrpc->luns = kcalloc(rrpc->nr_luns, sizeof(struct rrpc_lun), 1149 rrpc->luns = kcalloc(rrpc->nr_luns, sizeof(struct rrpc_lun),
@@ -1101,16 +1155,13 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
1101 for (i = 0; i < rrpc->nr_luns; i++) { 1155 for (i = 0; i < rrpc->nr_luns; i++) {
1102 struct nvm_lun *lun = dev->mt->get_lun(dev, lun_begin + i); 1156 struct nvm_lun *lun = dev->mt->get_lun(dev, lun_begin + i);
1103 1157
1104 if (dev->pgs_per_blk >
1105 MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
1106 pr_err("rrpc: number of pages per block too high.");
1107 goto err;
1108 }
1109
1110 rlun = &rrpc->luns[i]; 1158 rlun = &rrpc->luns[i];
1111 rlun->rrpc = rrpc; 1159 rlun->rrpc = rrpc;
1112 rlun->parent = lun; 1160 rlun->parent = lun;
1113 INIT_LIST_HEAD(&rlun->prio_list); 1161 INIT_LIST_HEAD(&rlun->prio_list);
1162 INIT_LIST_HEAD(&rlun->open_list);
1163 INIT_LIST_HEAD(&rlun->closed_list);
1164
1114 INIT_WORK(&rlun->ws_gc, rrpc_lun_gc); 1165 INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
1115 spin_lock_init(&rlun->lock); 1166 spin_lock_init(&rlun->lock);
1116 1167
@@ -1127,6 +1178,7 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
1127 struct nvm_block *blk = &lun->blocks[j]; 1178 struct nvm_block *blk = &lun->blocks[j];
1128 1179
1129 rblk->parent = blk; 1180 rblk->parent = blk;
1181 rblk->rlun = rlun;
1130 INIT_LIST_HEAD(&rblk->prio); 1182 INIT_LIST_HEAD(&rblk->prio);
1131 spin_lock_init(&rblk->lock); 1183 spin_lock_init(&rblk->lock);
1132 } 1184 }
diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h
index a9696a06c38c..ef13ac7700c8 100644
--- a/drivers/lightnvm/rrpc.h
+++ b/drivers/lightnvm/rrpc.h
@@ -54,7 +54,9 @@ struct rrpc_rq {
54 54
55struct rrpc_block { 55struct rrpc_block {
56 struct nvm_block *parent; 56 struct nvm_block *parent;
57 struct rrpc_lun *rlun;
57 struct list_head prio; 58 struct list_head prio;
59 struct list_head list;
58 60
59#define MAX_INVALID_PAGES_STORAGE 8 61#define MAX_INVALID_PAGES_STORAGE 8
60 /* Bitmap for invalid page intries */ 62 /* Bitmap for invalid page intries */
@@ -73,7 +75,16 @@ struct rrpc_lun {
73 struct nvm_lun *parent; 75 struct nvm_lun *parent;
74 struct rrpc_block *cur, *gc_cur; 76 struct rrpc_block *cur, *gc_cur;
75 struct rrpc_block *blocks; /* Reference to block allocation */ 77 struct rrpc_block *blocks; /* Reference to block allocation */
76 struct list_head prio_list; /* Blocks that may be GC'ed */ 78
79 struct list_head prio_list; /* Blocks that may be GC'ed */
80 struct list_head open_list; /* In-use open blocks. These are blocks
81 * that can be both written to and read
82 * from
83 */
84 struct list_head closed_list; /* In-use closed blocks. These are
85 * blocks that can _only_ be read from
86 */
87
77 struct work_struct ws_gc; 88 struct work_struct ws_gc;
78 89
79 spinlock_t lock; 90 spinlock_t lock;
diff --git a/drivers/lightnvm/sysblk.c b/drivers/lightnvm/sysblk.c
new file mode 100644
index 000000000000..321de1f154c5
--- /dev/null
+++ b/drivers/lightnvm/sysblk.c
@@ -0,0 +1,741 @@
1/*
2 * Copyright (C) 2015 Matias Bjorling. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License version
6 * 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; see the file COPYING. If not, write to
15 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
16 * USA.
17 *
18 */
19
20#include <linux/lightnvm.h>
21
22#define MAX_SYSBLKS 3 /* remember to update mapping scheme on change */
23#define MAX_BLKS_PR_SYSBLK 2 /* 2 blks with 256 pages and 3000 erases
24 * enables ~1.5M updates per sysblk unit
25 */
26
27struct sysblk_scan {
28 /* A row is a collection of flash blocks for a system block. */
29 int nr_rows;
30 int row;
31 int act_blk[MAX_SYSBLKS];
32
33 int nr_ppas;
34 struct ppa_addr ppas[MAX_SYSBLKS * MAX_BLKS_PR_SYSBLK];/* all sysblks */
35};
36
37static inline int scan_ppa_idx(int row, int blkid)
38{
39 return (row * MAX_BLKS_PR_SYSBLK) + blkid;
40}
41
42void nvm_sysblk_to_cpu(struct nvm_sb_info *info, struct nvm_system_block *sb)
43{
44 info->seqnr = be32_to_cpu(sb->seqnr);
45 info->erase_cnt = be32_to_cpu(sb->erase_cnt);
46 info->version = be16_to_cpu(sb->version);
47 strncpy(info->mmtype, sb->mmtype, NVM_MMTYPE_LEN);
48 info->fs_ppa.ppa = be64_to_cpu(sb->fs_ppa);
49}
50
51void nvm_cpu_to_sysblk(struct nvm_system_block *sb, struct nvm_sb_info *info)
52{
53 sb->magic = cpu_to_be32(NVM_SYSBLK_MAGIC);
54 sb->seqnr = cpu_to_be32(info->seqnr);
55 sb->erase_cnt = cpu_to_be32(info->erase_cnt);
56 sb->version = cpu_to_be16(info->version);
57 strncpy(sb->mmtype, info->mmtype, NVM_MMTYPE_LEN);
58 sb->fs_ppa = cpu_to_be64(info->fs_ppa.ppa);
59}
60
61static int nvm_setup_sysblks(struct nvm_dev *dev, struct ppa_addr *sysblk_ppas)
62{
63 int nr_rows = min_t(int, MAX_SYSBLKS, dev->nr_chnls);
64 int i;
65
66 for (i = 0; i < nr_rows; i++)
67 sysblk_ppas[i].ppa = 0;
68
69 /* if possible, place sysblk at first channel, middle channel and last
70 * channel of the device. If not, create only one or two sys blocks
71 */
72 switch (dev->nr_chnls) {
73 case 2:
74 sysblk_ppas[1].g.ch = 1;
75 /* fall-through */
76 case 1:
77 sysblk_ppas[0].g.ch = 0;
78 break;
79 default:
80 sysblk_ppas[0].g.ch = 0;
81 sysblk_ppas[1].g.ch = dev->nr_chnls / 2;
82 sysblk_ppas[2].g.ch = dev->nr_chnls - 1;
83 break;
84 }
85
86 return nr_rows;
87}
88
89void nvm_setup_sysblk_scan(struct nvm_dev *dev, struct sysblk_scan *s,
90 struct ppa_addr *sysblk_ppas)
91{
92 memset(s, 0, sizeof(struct sysblk_scan));
93 s->nr_rows = nvm_setup_sysblks(dev, sysblk_ppas);
94}
95
96static int sysblk_get_host_blks(struct ppa_addr ppa, int nr_blks, u8 *blks,
97 void *private)
98{
99 struct sysblk_scan *s = private;
100 int i, nr_sysblk = 0;
101
102 for (i = 0; i < nr_blks; i++) {
103 if (blks[i] != NVM_BLK_T_HOST)
104 continue;
105
106 if (s->nr_ppas == MAX_BLKS_PR_SYSBLK * MAX_SYSBLKS) {
107 pr_err("nvm: too many host blks\n");
108 return -EINVAL;
109 }
110
111 ppa.g.blk = i;
112
113 s->ppas[scan_ppa_idx(s->row, nr_sysblk)] = ppa;
114 s->nr_ppas++;
115 nr_sysblk++;
116 }
117
118 return 0;
119}
120
121static int nvm_get_all_sysblks(struct nvm_dev *dev, struct sysblk_scan *s,
122 struct ppa_addr *ppas, nvm_bb_update_fn *fn)
123{
124 struct ppa_addr dppa;
125 int i, ret;
126
127 s->nr_ppas = 0;
128
129 for (i = 0; i < s->nr_rows; i++) {
130 dppa = generic_to_dev_addr(dev, ppas[i]);
131 s->row = i;
132
133 ret = dev->ops->get_bb_tbl(dev, dppa, dev->blks_per_lun, fn, s);
134 if (ret) {
135 pr_err("nvm: failed bb tbl for ppa (%u %u)\n",
136 ppas[i].g.ch,
137 ppas[i].g.blk);
138 return ret;
139 }
140 }
141
142 return ret;
143}
144
145/*
146 * scans a block for latest sysblk.
147 * Returns:
148 * 0 - newer sysblk not found. PPA is updated to latest page.
149 * 1 - newer sysblk found and stored in *cur. PPA is updated to
150 * next valid page.
151 * <0- error.
152 */
153static int nvm_scan_block(struct nvm_dev *dev, struct ppa_addr *ppa,
154 struct nvm_system_block *sblk)
155{
156 struct nvm_system_block *cur;
157 int pg, cursz, ret, found = 0;
158
159 /* the full buffer for a flash page is allocated. Only the first of it
160 * contains the system block information
161 */
162 cursz = dev->sec_size * dev->sec_per_pg * dev->nr_planes;
163 cur = kmalloc(cursz, GFP_KERNEL);
164 if (!cur)
165 return -ENOMEM;
166
167 /* perform linear scan through the block */
168 for (pg = 0; pg < dev->lps_per_blk; pg++) {
169 ppa->g.pg = ppa_to_slc(dev, pg);
170
171 ret = nvm_submit_ppa(dev, ppa, 1, NVM_OP_PREAD, NVM_IO_SLC_MODE,
172 cur, cursz);
173 if (ret) {
174 if (ret == NVM_RSP_ERR_EMPTYPAGE) {
175 pr_debug("nvm: sysblk scan empty ppa (%u %u %u %u)\n",
176 ppa->g.ch,
177 ppa->g.lun,
178 ppa->g.blk,
179 ppa->g.pg);
180 break;
181 }
182 pr_err("nvm: read failed (%x) for ppa (%u %u %u %u)",
183 ret,
184 ppa->g.ch,
185 ppa->g.lun,
186 ppa->g.blk,
187 ppa->g.pg);
188 break; /* if we can't read a page, continue to the
189 * next blk
190 */
191 }
192
193 if (be32_to_cpu(cur->magic) != NVM_SYSBLK_MAGIC) {
194 pr_debug("nvm: scan break for ppa (%u %u %u %u)\n",
195 ppa->g.ch,
196 ppa->g.lun,
197 ppa->g.blk,
198 ppa->g.pg);
199 break; /* last valid page already found */
200 }
201
202 if (be32_to_cpu(cur->seqnr) < be32_to_cpu(sblk->seqnr))
203 continue;
204
205 memcpy(sblk, cur, sizeof(struct nvm_system_block));
206 found = 1;
207 }
208
209 kfree(cur);
210
211 return found;
212}
213
214static int nvm_set_bb_tbl(struct nvm_dev *dev, struct sysblk_scan *s, int type)
215{
216 struct nvm_rq rqd;
217 int ret;
218
219 if (s->nr_ppas > dev->ops->max_phys_sect) {
220 pr_err("nvm: unable to update all sysblocks atomically\n");
221 return -EINVAL;
222 }
223
224 memset(&rqd, 0, sizeof(struct nvm_rq));
225
226 nvm_set_rqd_ppalist(dev, &rqd, s->ppas, s->nr_ppas);
227 nvm_generic_to_addr_mode(dev, &rqd);
228
229 ret = dev->ops->set_bb_tbl(dev, &rqd, type);
230 nvm_free_rqd_ppalist(dev, &rqd);
231 if (ret) {
232 pr_err("nvm: sysblk failed bb mark\n");
233 return -EINVAL;
234 }
235
236 return 0;
237}
238
239static int sysblk_get_free_blks(struct ppa_addr ppa, int nr_blks, u8 *blks,
240 void *private)
241{
242 struct sysblk_scan *s = private;
243 struct ppa_addr *sppa;
244 int i, blkid = 0;
245
246 for (i = 0; i < nr_blks; i++) {
247 if (blks[i] == NVM_BLK_T_HOST)
248 return -EEXIST;
249
250 if (blks[i] != NVM_BLK_T_FREE)
251 continue;
252
253 sppa = &s->ppas[scan_ppa_idx(s->row, blkid)];
254 sppa->g.ch = ppa.g.ch;
255 sppa->g.lun = ppa.g.lun;
256 sppa->g.blk = i;
257 s->nr_ppas++;
258 blkid++;
259
260 pr_debug("nvm: use (%u %u %u) as sysblk\n",
261 sppa->g.ch, sppa->g.lun, sppa->g.blk);
262 if (blkid > MAX_BLKS_PR_SYSBLK - 1)
263 return 0;
264 }
265
266 pr_err("nvm: sysblk failed get sysblk\n");
267 return -EINVAL;
268}
269
270static int nvm_write_and_verify(struct nvm_dev *dev, struct nvm_sb_info *info,
271 struct sysblk_scan *s)
272{
273 struct nvm_system_block nvmsb;
274 void *buf;
275 int i, sect, ret, bufsz;
276 struct ppa_addr *ppas;
277
278 nvm_cpu_to_sysblk(&nvmsb, info);
279
280 /* buffer for flash page */
281 bufsz = dev->sec_size * dev->sec_per_pg * dev->nr_planes;
282 buf = kzalloc(bufsz, GFP_KERNEL);
283 if (!buf)
284 return -ENOMEM;
285 memcpy(buf, &nvmsb, sizeof(struct nvm_system_block));
286
287 ppas = kcalloc(dev->sec_per_pg, sizeof(struct ppa_addr), GFP_KERNEL);
288 if (!ppas) {
289 ret = -ENOMEM;
290 goto err;
291 }
292
293 /* Write and verify */
294 for (i = 0; i < s->nr_rows; i++) {
295 ppas[0] = s->ppas[scan_ppa_idx(i, s->act_blk[i])];
296
297 pr_debug("nvm: writing sysblk to ppa (%u %u %u %u)\n",
298 ppas[0].g.ch,
299 ppas[0].g.lun,
300 ppas[0].g.blk,
301 ppas[0].g.pg);
302
303 /* Expand to all sectors within a flash page */
304 if (dev->sec_per_pg > 1) {
305 for (sect = 1; sect < dev->sec_per_pg; sect++) {
306 ppas[sect].ppa = ppas[0].ppa;
307 ppas[sect].g.sec = sect;
308 }
309 }
310
311 ret = nvm_submit_ppa(dev, ppas, dev->sec_per_pg, NVM_OP_PWRITE,
312 NVM_IO_SLC_MODE, buf, bufsz);
313 if (ret) {
314 pr_err("nvm: sysblk failed program (%u %u %u)\n",
315 ppas[0].g.ch,
316 ppas[0].g.lun,
317 ppas[0].g.blk);
318 break;
319 }
320
321 ret = nvm_submit_ppa(dev, ppas, dev->sec_per_pg, NVM_OP_PREAD,
322 NVM_IO_SLC_MODE, buf, bufsz);
323 if (ret) {
324 pr_err("nvm: sysblk failed read (%u %u %u)\n",
325 ppas[0].g.ch,
326 ppas[0].g.lun,
327 ppas[0].g.blk);
328 break;
329 }
330
331 if (memcmp(buf, &nvmsb, sizeof(struct nvm_system_block))) {
332 pr_err("nvm: sysblk failed verify (%u %u %u)\n",
333 ppas[0].g.ch,
334 ppas[0].g.lun,
335 ppas[0].g.blk);
336 ret = -EINVAL;
337 break;
338 }
339 }
340
341 kfree(ppas);
342err:
343 kfree(buf);
344
345 return ret;
346}
347
348static int nvm_prepare_new_sysblks(struct nvm_dev *dev, struct sysblk_scan *s)
349{
350 int i, ret;
351 unsigned long nxt_blk;
352 struct ppa_addr *ppa;
353
354 for (i = 0; i < s->nr_rows; i++) {
355 nxt_blk = (s->act_blk[i] + 1) % MAX_BLKS_PR_SYSBLK;
356 ppa = &s->ppas[scan_ppa_idx(i, nxt_blk)];
357 ppa->g.pg = ppa_to_slc(dev, 0);
358
359 ret = nvm_erase_ppa(dev, ppa, 1);
360 if (ret)
361 return ret;
362
363 s->act_blk[i] = nxt_blk;
364 }
365
366 return 0;
367}
368
369int nvm_get_sysblock(struct nvm_dev *dev, struct nvm_sb_info *info)
370{
371 struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
372 struct sysblk_scan s;
373 struct nvm_system_block *cur;
374 int i, j, found = 0;
375 int ret = -ENOMEM;
376
377 /*
378 * 1. setup sysblk locations
379 * 2. get bad block list
380 * 3. filter on host-specific (type 3)
381 * 4. iterate through all and find the highest seq nr.
382 * 5. return superblock information
383 */
384
385 if (!dev->ops->get_bb_tbl)
386 return -EINVAL;
387
388 nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
389
390 mutex_lock(&dev->mlock);
391 ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, sysblk_get_host_blks);
392 if (ret)
393 goto err_sysblk;
394
395 /* no sysblocks initialized */
396 if (!s.nr_ppas)
397 goto err_sysblk;
398
399 cur = kzalloc(sizeof(struct nvm_system_block), GFP_KERNEL);
400 if (!cur)
401 goto err_sysblk;
402
403 /* find the latest block across all sysblocks */
404 for (i = 0; i < s.nr_rows; i++) {
405 for (j = 0; j < MAX_BLKS_PR_SYSBLK; j++) {
406 struct ppa_addr ppa = s.ppas[scan_ppa_idx(i, j)];
407
408 ret = nvm_scan_block(dev, &ppa, cur);
409 if (ret > 0)
410 found = 1;
411 else if (ret < 0)
412 break;
413 }
414 }
415
416 nvm_sysblk_to_cpu(info, cur);
417
418 kfree(cur);
419err_sysblk:
420 mutex_unlock(&dev->mlock);
421
422 if (found)
423 return 1;
424 return ret;
425}
426
427int nvm_update_sysblock(struct nvm_dev *dev, struct nvm_sb_info *new)
428{
429 /* 1. for each latest superblock
430 * 2. if room
431 * a. write new flash page entry with the updated information
432 * 3. if no room
433 * a. find next available block on lun (linear search)
434 * if none, continue to next lun
435 * if none at all, report error. also report that it wasn't
436 * possible to write to all superblocks.
437 * c. write data to block.
438 */
439 struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
440 struct sysblk_scan s;
441 struct nvm_system_block *cur;
442 int i, j, ppaidx, found = 0;
443 int ret = -ENOMEM;
444
445 if (!dev->ops->get_bb_tbl)
446 return -EINVAL;
447
448 nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
449
450 mutex_lock(&dev->mlock);
451 ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, sysblk_get_host_blks);
452 if (ret)
453 goto err_sysblk;
454
455 cur = kzalloc(sizeof(struct nvm_system_block), GFP_KERNEL);
456 if (!cur)
457 goto err_sysblk;
458
459 /* Get the latest sysblk for each sysblk row */
460 for (i = 0; i < s.nr_rows; i++) {
461 found = 0;
462 for (j = 0; j < MAX_BLKS_PR_SYSBLK; j++) {
463 ppaidx = scan_ppa_idx(i, j);
464 ret = nvm_scan_block(dev, &s.ppas[ppaidx], cur);
465 if (ret > 0) {
466 s.act_blk[i] = j;
467 found = 1;
468 } else if (ret < 0)
469 break;
470 }
471 }
472
473 if (!found) {
474 pr_err("nvm: no valid sysblks found to update\n");
475 ret = -EINVAL;
476 goto err_cur;
477 }
478
479 /*
480 * All sysblocks found. Check that they have same page id in their flash
481 * blocks
482 */
483 for (i = 1; i < s.nr_rows; i++) {
484 struct ppa_addr l = s.ppas[scan_ppa_idx(0, s.act_blk[0])];
485 struct ppa_addr r = s.ppas[scan_ppa_idx(i, s.act_blk[i])];
486
487 if (l.g.pg != r.g.pg) {
488 pr_err("nvm: sysblks not on same page. Previous update failed.\n");
489 ret = -EINVAL;
490 goto err_cur;
491 }
492 }
493
494 /*
495 * Check that there haven't been another update to the seqnr since we
496 * began
497 */
498 if ((new->seqnr - 1) != be32_to_cpu(cur->seqnr)) {
499 pr_err("nvm: seq is not sequential\n");
500 ret = -EINVAL;
501 goto err_cur;
502 }
503
504 /*
505 * When all pages in a block has been written, a new block is selected
506 * and writing is performed on the new block.
507 */
508 if (s.ppas[scan_ppa_idx(0, s.act_blk[0])].g.pg ==
509 dev->lps_per_blk - 1) {
510 ret = nvm_prepare_new_sysblks(dev, &s);
511 if (ret)
512 goto err_cur;
513 }
514
515 ret = nvm_write_and_verify(dev, new, &s);
516err_cur:
517 kfree(cur);
518err_sysblk:
519 mutex_unlock(&dev->mlock);
520
521 return ret;
522}
523
524int nvm_init_sysblock(struct nvm_dev *dev, struct nvm_sb_info *info)
525{
526 struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
527 struct sysblk_scan s;
528 int ret;
529
530 /*
531 * 1. select master blocks and select first available blks
532 * 2. get bad block list
533 * 3. mark MAX_SYSBLKS block as host-based device allocated.
534 * 4. write and verify data to block
535 */
536
537 if (!dev->ops->get_bb_tbl || !dev->ops->set_bb_tbl)
538 return -EINVAL;
539
540 if (!(dev->mccap & NVM_ID_CAP_SLC) || !dev->lps_per_blk) {
541 pr_err("nvm: memory does not support SLC access\n");
542 return -EINVAL;
543 }
544
545 /* Index all sysblocks and mark them as host-driven */
546 nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
547
548 mutex_lock(&dev->mlock);
549 ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, sysblk_get_free_blks);
550 if (ret)
551 goto err_mark;
552
553 ret = nvm_set_bb_tbl(dev, &s, NVM_BLK_T_HOST);
554 if (ret)
555 goto err_mark;
556
557 /* Write to the first block of each row */
558 ret = nvm_write_and_verify(dev, info, &s);
559err_mark:
560 mutex_unlock(&dev->mlock);
561 return ret;
562}
563
564struct factory_blks {
565 struct nvm_dev *dev;
566 int flags;
567 unsigned long *blks;
568};
569
570static int factory_nblks(int nblks)
571{
572 /* Round up to nearest BITS_PER_LONG */
573 return (nblks + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1);
574}
575
576static unsigned int factory_blk_offset(struct nvm_dev *dev, int ch, int lun)
577{
578 int nblks = factory_nblks(dev->blks_per_lun);
579
580 return ((ch * dev->luns_per_chnl * nblks) + (lun * nblks)) /
581 BITS_PER_LONG;
582}
583
584static int nvm_factory_blks(struct ppa_addr ppa, int nr_blks, u8 *blks,
585 void *private)
586{
587 struct factory_blks *f = private;
588 struct nvm_dev *dev = f->dev;
589 int i, lunoff;
590
591 lunoff = factory_blk_offset(dev, ppa.g.ch, ppa.g.lun);
592
593 /* non-set bits correspond to the block must be erased */
594 for (i = 0; i < nr_blks; i++) {
595 switch (blks[i]) {
596 case NVM_BLK_T_FREE:
597 if (f->flags & NVM_FACTORY_ERASE_ONLY_USER)
598 set_bit(i, &f->blks[lunoff]);
599 break;
600 case NVM_BLK_T_HOST:
601 if (!(f->flags & NVM_FACTORY_RESET_HOST_BLKS))
602 set_bit(i, &f->blks[lunoff]);
603 break;
604 case NVM_BLK_T_GRWN_BAD:
605 if (!(f->flags & NVM_FACTORY_RESET_GRWN_BBLKS))
606 set_bit(i, &f->blks[lunoff]);
607 break;
608 default:
609 set_bit(i, &f->blks[lunoff]);
610 break;
611 }
612 }
613
614 return 0;
615}
616
617static int nvm_fact_get_blks(struct nvm_dev *dev, struct ppa_addr *erase_list,
618 int max_ppas, struct factory_blks *f)
619{
620 struct ppa_addr ppa;
621 int ch, lun, blkid, idx, done = 0, ppa_cnt = 0;
622 unsigned long *offset;
623
624 while (!done) {
625 done = 1;
626 for (ch = 0; ch < dev->nr_chnls; ch++) {
627 for (lun = 0; lun < dev->luns_per_chnl; lun++) {
628 idx = factory_blk_offset(dev, ch, lun);
629 offset = &f->blks[idx];
630
631 blkid = find_first_zero_bit(offset,
632 dev->blks_per_lun);
633 if (blkid >= dev->blks_per_lun)
634 continue;
635 set_bit(blkid, offset);
636
637 ppa.ppa = 0;
638 ppa.g.ch = ch;
639 ppa.g.lun = lun;
640 ppa.g.blk = blkid;
641 pr_debug("nvm: erase ppa (%u %u %u)\n",
642 ppa.g.ch,
643 ppa.g.lun,
644 ppa.g.blk);
645
646 erase_list[ppa_cnt] = ppa;
647 ppa_cnt++;
648 done = 0;
649
650 if (ppa_cnt == max_ppas)
651 return ppa_cnt;
652 }
653 }
654 }
655
656 return ppa_cnt;
657}
658
659static int nvm_fact_get_bb_tbl(struct nvm_dev *dev, struct ppa_addr ppa,
660 nvm_bb_update_fn *fn, void *priv)
661{
662 struct ppa_addr dev_ppa;
663 int ret;
664
665 dev_ppa = generic_to_dev_addr(dev, ppa);
666
667 ret = dev->ops->get_bb_tbl(dev, dev_ppa, dev->blks_per_lun, fn, priv);
668 if (ret)
669 pr_err("nvm: failed bb tbl for ch%u lun%u\n",
670 ppa.g.ch, ppa.g.blk);
671 return ret;
672}
673
674static int nvm_fact_select_blks(struct nvm_dev *dev, struct factory_blks *f)
675{
676 int ch, lun, ret;
677 struct ppa_addr ppa;
678
679 ppa.ppa = 0;
680 for (ch = 0; ch < dev->nr_chnls; ch++) {
681 for (lun = 0; lun < dev->luns_per_chnl; lun++) {
682 ppa.g.ch = ch;
683 ppa.g.lun = lun;
684
685 ret = nvm_fact_get_bb_tbl(dev, ppa, nvm_factory_blks,
686 f);
687 if (ret)
688 return ret;
689 }
690 }
691
692 return 0;
693}
694
695int nvm_dev_factory(struct nvm_dev *dev, int flags)
696{
697 struct factory_blks f;
698 struct ppa_addr *ppas;
699 int ppa_cnt, ret = -ENOMEM;
700 int max_ppas = dev->ops->max_phys_sect / dev->nr_planes;
701 struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
702 struct sysblk_scan s;
703
704 f.blks = kzalloc(factory_nblks(dev->blks_per_lun) * dev->nr_luns,
705 GFP_KERNEL);
706 if (!f.blks)
707 return ret;
708
709 ppas = kcalloc(max_ppas, sizeof(struct ppa_addr), GFP_KERNEL);
710 if (!ppas)
711 goto err_blks;
712
713 f.dev = dev;
714 f.flags = flags;
715
716 /* create list of blks to be erased */
717 ret = nvm_fact_select_blks(dev, &f);
718 if (ret)
719 goto err_ppas;
720
721 /* continue to erase until list of blks until empty */
722 while ((ppa_cnt = nvm_fact_get_blks(dev, ppas, max_ppas, &f)) > 0)
723 nvm_erase_ppa(dev, ppas, ppa_cnt);
724
725 /* mark host reserved blocks free */
726 if (flags & NVM_FACTORY_RESET_HOST_BLKS) {
727 nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
728 mutex_lock(&dev->mlock);
729 ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas,
730 sysblk_get_host_blks);
731 if (!ret)
732 ret = nvm_set_bb_tbl(dev, &s, NVM_BLK_T_FREE);
733 mutex_unlock(&dev->mlock);
734 }
735err_ppas:
736 kfree(ppas);
737err_blks:
738 kfree(f.blks);
739 return ret;
740}
741EXPORT_SYMBOL(nvm_dev_factory);
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index 1af54ea20e7b..71f2bbc865cf 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -146,6 +146,16 @@ struct nvme_nvm_command {
146 }; 146 };
147}; 147};
148 148
149struct nvme_nvm_lp_mlc {
150 __u16 num_pairs;
151 __u8 pairs[886];
152};
153
154struct nvme_nvm_lp_tbl {
155 __u8 id[8];
156 struct nvme_nvm_lp_mlc mlc;
157};
158
149struct nvme_nvm_id_group { 159struct nvme_nvm_id_group {
150 __u8 mtype; 160 __u8 mtype;
151 __u8 fmtype; 161 __u8 fmtype;
@@ -169,7 +179,8 @@ struct nvme_nvm_id_group {
169 __le32 mpos; 179 __le32 mpos;
170 __le32 mccap; 180 __le32 mccap;
171 __le16 cpar; 181 __le16 cpar;
172 __u8 reserved[906]; 182 __u8 reserved[10];
183 struct nvme_nvm_lp_tbl lptbl;
173} __packed; 184} __packed;
174 185
175struct nvme_nvm_addr_format { 186struct nvme_nvm_addr_format {
@@ -266,6 +277,15 @@ static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id)
266 dst->mccap = le32_to_cpu(src->mccap); 277 dst->mccap = le32_to_cpu(src->mccap);
267 278
268 dst->cpar = le16_to_cpu(src->cpar); 279 dst->cpar = le16_to_cpu(src->cpar);
280
281 if (dst->fmtype == NVM_ID_FMTYPE_MLC) {
282 memcpy(dst->lptbl.id, src->lptbl.id, 8);
283 dst->lptbl.mlc.num_pairs =
284 le16_to_cpu(src->lptbl.mlc.num_pairs);
285 /* 4 bits per pair */
286 memcpy(dst->lptbl.mlc.pairs, src->lptbl.mlc.pairs,
287 dst->lptbl.mlc.num_pairs >> 1);
288 }
269 } 289 }
270 290
271 return 0; 291 return 0;
@@ -405,11 +425,6 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
405 425
406 ppa = dev_to_generic_addr(nvmdev, ppa); 426 ppa = dev_to_generic_addr(nvmdev, ppa);
407 ret = update_bbtbl(ppa, nr_blocks, bb_tbl->blk, priv); 427 ret = update_bbtbl(ppa, nr_blocks, bb_tbl->blk, priv);
408 if (ret) {
409 ret = -EINTR;
410 goto out;
411 }
412
413out: 428out:
414 kfree(bb_tbl); 429 kfree(bb_tbl);
415 return ret; 430 return ret;
@@ -453,11 +468,8 @@ static inline void nvme_nvm_rqtocmd(struct request *rq, struct nvm_rq *rqd,
453static void nvme_nvm_end_io(struct request *rq, int error) 468static void nvme_nvm_end_io(struct request *rq, int error)
454{ 469{
455 struct nvm_rq *rqd = rq->end_io_data; 470 struct nvm_rq *rqd = rq->end_io_data;
456 struct nvm_dev *dev = rqd->dev;
457 471
458 if (dev->mt && dev->mt->end_io(rqd, error)) 472 nvm_end_io(rqd, error);
459 pr_err("nvme: err status: %x result: %lx\n",
460 rq->errors, (unsigned long)rq->special);
461 473
462 kfree(rq->cmd); 474 kfree(rq->cmd);
463 blk_mq_free_request(rq); 475 blk_mq_free_request(rq);
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index 034117b3be5f..d6750111e48e 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -1,6 +1,8 @@
1#ifndef NVM_H 1#ifndef NVM_H
2#define NVM_H 2#define NVM_H
3 3
4#include <linux/types.h>
5
4enum { 6enum {
5 NVM_IO_OK = 0, 7 NVM_IO_OK = 0,
6 NVM_IO_REQUEUE = 1, 8 NVM_IO_REQUEUE = 1,
@@ -11,12 +13,74 @@ enum {
11 NVM_IOTYPE_GC = 1, 13 NVM_IOTYPE_GC = 1,
12}; 14};
13 15
16#define NVM_BLK_BITS (16)
17#define NVM_PG_BITS (16)
18#define NVM_SEC_BITS (8)
19#define NVM_PL_BITS (8)
20#define NVM_LUN_BITS (8)
21#define NVM_CH_BITS (8)
22
23struct ppa_addr {
24 /* Generic structure for all addresses */
25 union {
26 struct {
27 u64 blk : NVM_BLK_BITS;
28 u64 pg : NVM_PG_BITS;
29 u64 sec : NVM_SEC_BITS;
30 u64 pl : NVM_PL_BITS;
31 u64 lun : NVM_LUN_BITS;
32 u64 ch : NVM_CH_BITS;
33 } g;
34
35 u64 ppa;
36 };
37};
38
39struct nvm_rq;
40struct nvm_id;
41struct nvm_dev;
42
43typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *);
44typedef int (nvm_bb_update_fn)(struct ppa_addr, int, u8 *, void *);
45typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *);
46typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32,
47 nvm_l2p_update_fn *, void *);
48typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, int,
49 nvm_bb_update_fn *, void *);
50typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct nvm_rq *, int);
51typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
52typedef int (nvm_erase_blk_fn)(struct nvm_dev *, struct nvm_rq *);
53typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *);
54typedef void (nvm_destroy_dma_pool_fn)(void *);
55typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t,
56 dma_addr_t *);
57typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t);
58
59struct nvm_dev_ops {
60 nvm_id_fn *identity;
61 nvm_get_l2p_tbl_fn *get_l2p_tbl;
62 nvm_op_bb_tbl_fn *get_bb_tbl;
63 nvm_op_set_bb_fn *set_bb_tbl;
64
65 nvm_submit_io_fn *submit_io;
66 nvm_erase_blk_fn *erase_block;
67
68 nvm_create_dma_pool_fn *create_dma_pool;
69 nvm_destroy_dma_pool_fn *destroy_dma_pool;
70 nvm_dev_dma_alloc_fn *dev_dma_alloc;
71 nvm_dev_dma_free_fn *dev_dma_free;
72
73 unsigned int max_phys_sect;
74};
75
76
77
14#ifdef CONFIG_NVM 78#ifdef CONFIG_NVM
15 79
16#include <linux/blkdev.h> 80#include <linux/blkdev.h>
17#include <linux/types.h>
18#include <linux/file.h> 81#include <linux/file.h>
19#include <linux/dmapool.h> 82#include <linux/dmapool.h>
83#include <uapi/linux/lightnvm.h>
20 84
21enum { 85enum {
22 /* HW Responsibilities */ 86 /* HW Responsibilities */
@@ -58,8 +122,29 @@ enum {
58 /* Block Types */ 122 /* Block Types */
59 NVM_BLK_T_FREE = 0x0, 123 NVM_BLK_T_FREE = 0x0,
60 NVM_BLK_T_BAD = 0x1, 124 NVM_BLK_T_BAD = 0x1,
61 NVM_BLK_T_DEV = 0x2, 125 NVM_BLK_T_GRWN_BAD = 0x2,
62 NVM_BLK_T_HOST = 0x4, 126 NVM_BLK_T_DEV = 0x4,
127 NVM_BLK_T_HOST = 0x8,
128
129 /* Memory capabilities */
130 NVM_ID_CAP_SLC = 0x1,
131 NVM_ID_CAP_CMD_SUSPEND = 0x2,
132 NVM_ID_CAP_SCRAMBLE = 0x4,
133 NVM_ID_CAP_ENCRYPT = 0x8,
134
135 /* Memory types */
136 NVM_ID_FMTYPE_SLC = 0,
137 NVM_ID_FMTYPE_MLC = 1,
138};
139
140struct nvm_id_lp_mlc {
141 u16 num_pairs;
142 u8 pairs[886];
143};
144
145struct nvm_id_lp_tbl {
146 __u8 id[8];
147 struct nvm_id_lp_mlc mlc;
63}; 148};
64 149
65struct nvm_id_group { 150struct nvm_id_group {
@@ -82,6 +167,8 @@ struct nvm_id_group {
82 u32 mpos; 167 u32 mpos;
83 u32 mccap; 168 u32 mccap;
84 u16 cpar; 169 u16 cpar;
170
171 struct nvm_id_lp_tbl lptbl;
85}; 172};
86 173
87struct nvm_addr_format { 174struct nvm_addr_format {
@@ -125,28 +212,8 @@ struct nvm_tgt_instance {
125#define NVM_VERSION_MINOR 0 212#define NVM_VERSION_MINOR 0
126#define NVM_VERSION_PATCH 0 213#define NVM_VERSION_PATCH 0
127 214
128#define NVM_BLK_BITS (16) 215struct nvm_rq;
129#define NVM_PG_BITS (16) 216typedef void (nvm_end_io_fn)(struct nvm_rq *);
130#define NVM_SEC_BITS (8)
131#define NVM_PL_BITS (8)
132#define NVM_LUN_BITS (8)
133#define NVM_CH_BITS (8)
134
135struct ppa_addr {
136 /* Generic structure for all addresses */
137 union {
138 struct {
139 u64 blk : NVM_BLK_BITS;
140 u64 pg : NVM_PG_BITS;
141 u64 sec : NVM_SEC_BITS;
142 u64 pl : NVM_PL_BITS;
143 u64 lun : NVM_LUN_BITS;
144 u64 ch : NVM_CH_BITS;
145 } g;
146
147 u64 ppa;
148 };
149};
150 217
151struct nvm_rq { 218struct nvm_rq {
152 struct nvm_tgt_instance *ins; 219 struct nvm_tgt_instance *ins;
@@ -164,9 +231,14 @@ struct nvm_rq {
164 void *metadata; 231 void *metadata;
165 dma_addr_t dma_metadata; 232 dma_addr_t dma_metadata;
166 233
234 struct completion *wait;
235 nvm_end_io_fn *end_io;
236
167 uint8_t opcode; 237 uint8_t opcode;
168 uint16_t nr_pages; 238 uint16_t nr_pages;
169 uint16_t flags; 239 uint16_t flags;
240
241 int error;
170}; 242};
171 243
172static inline struct nvm_rq *nvm_rq_from_pdu(void *pdu) 244static inline struct nvm_rq *nvm_rq_from_pdu(void *pdu)
@@ -181,51 +253,31 @@ static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata)
181 253
182struct nvm_block; 254struct nvm_block;
183 255
184typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *);
185typedef int (nvm_bb_update_fn)(struct ppa_addr, int, u8 *, void *);
186typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *);
187typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32,
188 nvm_l2p_update_fn *, void *);
189typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, int,
190 nvm_bb_update_fn *, void *);
191typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct nvm_rq *, int);
192typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
193typedef int (nvm_erase_blk_fn)(struct nvm_dev *, struct nvm_rq *);
194typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *);
195typedef void (nvm_destroy_dma_pool_fn)(void *);
196typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t,
197 dma_addr_t *);
198typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t);
199
200struct nvm_dev_ops {
201 nvm_id_fn *identity;
202 nvm_get_l2p_tbl_fn *get_l2p_tbl;
203 nvm_op_bb_tbl_fn *get_bb_tbl;
204 nvm_op_set_bb_fn *set_bb_tbl;
205
206 nvm_submit_io_fn *submit_io;
207 nvm_erase_blk_fn *erase_block;
208
209 nvm_create_dma_pool_fn *create_dma_pool;
210 nvm_destroy_dma_pool_fn *destroy_dma_pool;
211 nvm_dev_dma_alloc_fn *dev_dma_alloc;
212 nvm_dev_dma_free_fn *dev_dma_free;
213
214 unsigned int max_phys_sect;
215};
216
217struct nvm_lun { 256struct nvm_lun {
218 int id; 257 int id;
219 258
220 int lun_id; 259 int lun_id;
221 int chnl_id; 260 int chnl_id;
222 261
223 unsigned int nr_inuse_blocks; /* Number of used blocks */ 262 /* It is up to the target to mark blocks as closed. If the target does
263 * not do it, all blocks are marked as open, and nr_open_blocks
264 * represents the number of blocks in use
265 */
266 unsigned int nr_open_blocks; /* Number of used, writable blocks */
267 unsigned int nr_closed_blocks; /* Number of used, read-only blocks */
224 unsigned int nr_free_blocks; /* Number of unused blocks */ 268 unsigned int nr_free_blocks; /* Number of unused blocks */
225 unsigned int nr_bad_blocks; /* Number of bad blocks */ 269 unsigned int nr_bad_blocks; /* Number of bad blocks */
226 struct nvm_block *blocks;
227 270
228 spinlock_t lock; 271 spinlock_t lock;
272
273 struct nvm_block *blocks;
274};
275
276enum {
277 NVM_BLK_ST_FREE = 0x1, /* Free block */
278 NVM_BLK_ST_OPEN = 0x2, /* Open block - read-write */
279 NVM_BLK_ST_CLOSED = 0x4, /* Closed block - read-only */
280 NVM_BLK_ST_BAD = 0x8, /* Bad block */
229}; 281};
230 282
231struct nvm_block { 283struct nvm_block {
@@ -234,7 +286,16 @@ struct nvm_block {
234 unsigned long id; 286 unsigned long id;
235 287
236 void *priv; 288 void *priv;
237 int type; 289 int state;
290};
291
292/* system block cpu representation */
293struct nvm_sb_info {
294 unsigned long seqnr;
295 unsigned long erase_cnt;
296 unsigned int version;
297 char mmtype[NVM_MMTYPE_LEN];
298 struct ppa_addr fs_ppa;
238}; 299};
239 300
240struct nvm_dev { 301struct nvm_dev {
@@ -247,6 +308,9 @@ struct nvm_dev {
247 struct nvmm_type *mt; 308 struct nvmm_type *mt;
248 void *mp; 309 void *mp;
249 310
311 /* System blocks */
312 struct nvm_sb_info sb;
313
250 /* Device information */ 314 /* Device information */
251 int nr_chnls; 315 int nr_chnls;
252 int nr_planes; 316 int nr_planes;
@@ -256,6 +320,7 @@ struct nvm_dev {
256 int blks_per_lun; 320 int blks_per_lun;
257 int sec_size; 321 int sec_size;
258 int oob_size; 322 int oob_size;
323 int mccap;
259 struct nvm_addr_format ppaf; 324 struct nvm_addr_format ppaf;
260 325
261 /* Calculated/Cached values. These do not reflect the actual usable 326 /* Calculated/Cached values. These do not reflect the actual usable
@@ -268,6 +333,10 @@ struct nvm_dev {
268 int sec_per_blk; 333 int sec_per_blk;
269 int sec_per_lun; 334 int sec_per_lun;
270 335
336 /* lower page table */
337 int lps_per_blk;
338 int *lptbl;
339
271 unsigned long total_pages; 340 unsigned long total_pages;
272 unsigned long total_blocks; 341 unsigned long total_blocks;
273 int nr_luns; 342 int nr_luns;
@@ -280,6 +349,8 @@ struct nvm_dev {
280 /* Backend device */ 349 /* Backend device */
281 struct request_queue *q; 350 struct request_queue *q;
282 char name[DISK_NAME_LEN]; 351 char name[DISK_NAME_LEN];
352
353 struct mutex mlock;
283}; 354};
284 355
285static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev, 356static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev,
@@ -345,9 +416,13 @@ static inline struct ppa_addr block_to_ppa(struct nvm_dev *dev,
345 return ppa; 416 return ppa;
346} 417}
347 418
419static inline int ppa_to_slc(struct nvm_dev *dev, int slc_pg)
420{
421 return dev->lptbl[slc_pg];
422}
423
348typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *); 424typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *);
349typedef sector_t (nvm_tgt_capacity_fn)(void *); 425typedef sector_t (nvm_tgt_capacity_fn)(void *);
350typedef int (nvm_tgt_end_io_fn)(struct nvm_rq *, int);
351typedef void *(nvm_tgt_init_fn)(struct nvm_dev *, struct gendisk *, int, int); 426typedef void *(nvm_tgt_init_fn)(struct nvm_dev *, struct gendisk *, int, int);
352typedef void (nvm_tgt_exit_fn)(void *); 427typedef void (nvm_tgt_exit_fn)(void *);
353 428
@@ -358,7 +433,7 @@ struct nvm_tgt_type {
358 /* target entry points */ 433 /* target entry points */
359 nvm_tgt_make_rq_fn *make_rq; 434 nvm_tgt_make_rq_fn *make_rq;
360 nvm_tgt_capacity_fn *capacity; 435 nvm_tgt_capacity_fn *capacity;
361 nvm_tgt_end_io_fn *end_io; 436 nvm_end_io_fn *end_io;
362 437
363 /* module-specific init/teardown */ 438 /* module-specific init/teardown */
364 nvm_tgt_init_fn *init; 439 nvm_tgt_init_fn *init;
@@ -383,7 +458,6 @@ typedef int (nvmm_open_blk_fn)(struct nvm_dev *, struct nvm_block *);
383typedef int (nvmm_close_blk_fn)(struct nvm_dev *, struct nvm_block *); 458typedef int (nvmm_close_blk_fn)(struct nvm_dev *, struct nvm_block *);
384typedef void (nvmm_flush_blk_fn)(struct nvm_dev *, struct nvm_block *); 459typedef void (nvmm_flush_blk_fn)(struct nvm_dev *, struct nvm_block *);
385typedef int (nvmm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *); 460typedef int (nvmm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
386typedef int (nvmm_end_io_fn)(struct nvm_rq *, int);
387typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *, 461typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *,
388 unsigned long); 462 unsigned long);
389typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int); 463typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int);
@@ -397,6 +471,8 @@ struct nvmm_type {
397 nvmm_unregister_fn *unregister_mgr; 471 nvmm_unregister_fn *unregister_mgr;
398 472
399 /* Block administration callbacks */ 473 /* Block administration callbacks */
474 nvmm_get_blk_fn *get_blk_unlocked;
475 nvmm_put_blk_fn *put_blk_unlocked;
400 nvmm_get_blk_fn *get_blk; 476 nvmm_get_blk_fn *get_blk;
401 nvmm_put_blk_fn *put_blk; 477 nvmm_put_blk_fn *put_blk;
402 nvmm_open_blk_fn *open_blk; 478 nvmm_open_blk_fn *open_blk;
@@ -404,7 +480,6 @@ struct nvmm_type {
404 nvmm_flush_blk_fn *flush_blk; 480 nvmm_flush_blk_fn *flush_blk;
405 481
406 nvmm_submit_io_fn *submit_io; 482 nvmm_submit_io_fn *submit_io;
407 nvmm_end_io_fn *end_io;
408 nvmm_erase_blk_fn *erase_blk; 483 nvmm_erase_blk_fn *erase_blk;
409 484
410 /* Configuration management */ 485 /* Configuration management */
@@ -418,6 +493,10 @@ struct nvmm_type {
418extern int nvm_register_mgr(struct nvmm_type *); 493extern int nvm_register_mgr(struct nvmm_type *);
419extern void nvm_unregister_mgr(struct nvmm_type *); 494extern void nvm_unregister_mgr(struct nvmm_type *);
420 495
496extern struct nvm_block *nvm_get_blk_unlocked(struct nvm_dev *,
497 struct nvm_lun *, unsigned long);
498extern void nvm_put_blk_unlocked(struct nvm_dev *, struct nvm_block *);
499
421extern struct nvm_block *nvm_get_blk(struct nvm_dev *, struct nvm_lun *, 500extern struct nvm_block *nvm_get_blk(struct nvm_dev *, struct nvm_lun *,
422 unsigned long); 501 unsigned long);
423extern void nvm_put_blk(struct nvm_dev *, struct nvm_block *); 502extern void nvm_put_blk(struct nvm_dev *, struct nvm_block *);
@@ -427,7 +506,36 @@ extern int nvm_register(struct request_queue *, char *,
427extern void nvm_unregister(char *); 506extern void nvm_unregister(char *);
428 507
429extern int nvm_submit_io(struct nvm_dev *, struct nvm_rq *); 508extern int nvm_submit_io(struct nvm_dev *, struct nvm_rq *);
509extern void nvm_generic_to_addr_mode(struct nvm_dev *, struct nvm_rq *);
510extern void nvm_addr_to_generic_mode(struct nvm_dev *, struct nvm_rq *);
511extern int nvm_set_rqd_ppalist(struct nvm_dev *, struct nvm_rq *,
512 struct ppa_addr *, int);
513extern void nvm_free_rqd_ppalist(struct nvm_dev *, struct nvm_rq *);
514extern int nvm_erase_ppa(struct nvm_dev *, struct ppa_addr *, int);
430extern int nvm_erase_blk(struct nvm_dev *, struct nvm_block *); 515extern int nvm_erase_blk(struct nvm_dev *, struct nvm_block *);
516extern void nvm_end_io(struct nvm_rq *, int);
517extern int nvm_submit_ppa(struct nvm_dev *, struct ppa_addr *, int, int, int,
518 void *, int);
519
520/* sysblk.c */
521#define NVM_SYSBLK_MAGIC 0x4E564D53 /* "NVMS" */
522
523/* system block on disk representation */
524struct nvm_system_block {
525 __be32 magic; /* magic signature */
526 __be32 seqnr; /* sequence number */
527 __be32 erase_cnt; /* erase count */
528 __be16 version; /* version number */
529 u8 mmtype[NVM_MMTYPE_LEN]; /* media manager name */
530 __be64 fs_ppa; /* PPA for media manager
531 * superblock */
532};
533
534extern int nvm_get_sysblock(struct nvm_dev *, struct nvm_sb_info *);
535extern int nvm_update_sysblock(struct nvm_dev *, struct nvm_sb_info *);
536extern int nvm_init_sysblock(struct nvm_dev *, struct nvm_sb_info *);
537
538extern int nvm_dev_factory(struct nvm_dev *, int flags);
431#else /* CONFIG_NVM */ 539#else /* CONFIG_NVM */
432struct nvm_dev_ops; 540struct nvm_dev_ops;
433 541
diff --git a/include/uapi/linux/lightnvm.h b/include/uapi/linux/lightnvm.h
index 928f98997d8a..774a43128a7a 100644
--- a/include/uapi/linux/lightnvm.h
+++ b/include/uapi/linux/lightnvm.h
@@ -33,6 +33,7 @@
33 33
34#define NVM_TTYPE_NAME_MAX 48 34#define NVM_TTYPE_NAME_MAX 48
35#define NVM_TTYPE_MAX 63 35#define NVM_TTYPE_MAX 63
36#define NVM_MMTYPE_LEN 8
36 37
37#define NVM_CTRL_FILE "/dev/lightnvm/control" 38#define NVM_CTRL_FILE "/dev/lightnvm/control"
38 39
@@ -100,6 +101,26 @@ struct nvm_ioctl_remove {
100 __u32 flags; 101 __u32 flags;
101}; 102};
102 103
104struct nvm_ioctl_dev_init {
105 char dev[DISK_NAME_LEN]; /* open-channel SSD device */
106 char mmtype[NVM_MMTYPE_LEN]; /* register to media manager */
107
108 __u32 flags;
109};
110
111enum {
112 NVM_FACTORY_ERASE_ONLY_USER = 1 << 0, /* erase only blocks used as
113 * host blks or grown blks */
114 NVM_FACTORY_RESET_HOST_BLKS = 1 << 1, /* remove host blk marks */
115 NVM_FACTORY_RESET_GRWN_BBLKS = 1 << 2, /* remove grown blk marks */
116 NVM_FACTORY_NR_BITS = 1 << 3, /* stops here */
117};
118
119struct nvm_ioctl_dev_factory {
120 char dev[DISK_NAME_LEN];
121
122 __u32 flags;
123};
103 124
104/* The ioctl type, 'L', 0x20 - 0x2F documented in ioctl-number.txt */ 125/* The ioctl type, 'L', 0x20 - 0x2F documented in ioctl-number.txt */
105enum { 126enum {
@@ -110,6 +131,12 @@ enum {
110 /* device level cmds */ 131 /* device level cmds */
111 NVM_DEV_CREATE_CMD, 132 NVM_DEV_CREATE_CMD,
112 NVM_DEV_REMOVE_CMD, 133 NVM_DEV_REMOVE_CMD,
134
135 /* Init a device to support LightNVM media managers */
136 NVM_DEV_INIT_CMD,
137
138 /* Factory reset device */
139 NVM_DEV_FACTORY_CMD,
113}; 140};
114 141
115#define NVM_IOCTL 'L' /* 0x4c */ 142#define NVM_IOCTL 'L' /* 0x4c */
@@ -122,6 +149,10 @@ enum {
122 struct nvm_ioctl_create) 149 struct nvm_ioctl_create)
123#define NVM_DEV_REMOVE _IOW(NVM_IOCTL, NVM_DEV_REMOVE_CMD, \ 150#define NVM_DEV_REMOVE _IOW(NVM_IOCTL, NVM_DEV_REMOVE_CMD, \
124 struct nvm_ioctl_remove) 151 struct nvm_ioctl_remove)
152#define NVM_DEV_INIT _IOW(NVM_IOCTL, NVM_DEV_INIT_CMD, \
153 struct nvm_ioctl_dev_init)
154#define NVM_DEV_FACTORY _IOW(NVM_IOCTL, NVM_DEV_FACTORY_CMD, \
155 struct nvm_ioctl_dev_factory)
125 156
126#define NVM_VERSION_MAJOR 1 157#define NVM_VERSION_MAJOR 1
127#define NVM_VERSION_MINOR 0 158#define NVM_VERSION_MINOR 0