diff options
author | Matias Bjørling <m@bjorling.me> | 2015-11-16 09:34:37 -0500 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2015-11-16 17:20:25 -0500 |
commit | 11450469830f2481a9e7cb181609288d40f41323 (patch) | |
tree | 1aef6904c2c988b7924aa53093836b2979f78f1f /drivers/nvme | |
parent | aedf17f4515b12ba1cd73298e66baa69cf93010e (diff) |
lightnvm: update bad block table format
The specification was changed to reflect a multi-value bad block table.
Instead of bit-based bad block table, the bad block table now allows
eight bad block categories. Currently four are defined:
* Factory bad blocks
* Grown bad blocks
* Device-side reserved blocks
* Host-side reserved blocks
The factory and grown bad blocks are the regular bad blocks. The
reserved blocks are either for internal use or external use. In
particular, the device-side reserved blocks allows the host to
bootstrap from a limited number of flash blocks. Reducing the flash
blocks to scan upon super block initialization.
Support for both get bad block table and set bad block table is added.
Signed-off-by: Matias Bjørling <m@bjorling.me>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers/nvme')
-rw-r--r-- | drivers/nvme/host/lightnvm.c | 113 |
1 files changed, 90 insertions, 23 deletions
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c index e0b7b95813bc..2c3546516300 100644 --- a/drivers/nvme/host/lightnvm.c +++ b/drivers/nvme/host/lightnvm.c | |||
@@ -93,7 +93,7 @@ struct nvme_nvm_l2ptbl { | |||
93 | __le16 cdw14[6]; | 93 | __le16 cdw14[6]; |
94 | }; | 94 | }; |
95 | 95 | ||
96 | struct nvme_nvm_bbtbl { | 96 | struct nvme_nvm_getbbtbl { |
97 | __u8 opcode; | 97 | __u8 opcode; |
98 | __u8 flags; | 98 | __u8 flags; |
99 | __u16 command_id; | 99 | __u16 command_id; |
@@ -101,10 +101,23 @@ struct nvme_nvm_bbtbl { | |||
101 | __u64 rsvd[2]; | 101 | __u64 rsvd[2]; |
102 | __le64 prp1; | 102 | __le64 prp1; |
103 | __le64 prp2; | 103 | __le64 prp2; |
104 | __le32 prp1_len; | 104 | __le64 spba; |
105 | __le32 prp2_len; | 105 | __u32 rsvd4[4]; |
106 | __le32 lbb; | 106 | }; |
107 | __u32 rsvd11[3]; | 107 | |
108 | struct nvme_nvm_setbbtbl { | ||
109 | __u8 opcode; | ||
110 | __u8 flags; | ||
111 | __u16 command_id; | ||
112 | __le32 nsid; | ||
113 | __le64 rsvd[2]; | ||
114 | __le64 prp1; | ||
115 | __le64 prp2; | ||
116 | __le64 spba; | ||
117 | __le16 nlb; | ||
118 | __u8 value; | ||
119 | __u8 rsvd3; | ||
120 | __u32 rsvd4[3]; | ||
108 | }; | 121 | }; |
109 | 122 | ||
110 | struct nvme_nvm_erase_blk { | 123 | struct nvme_nvm_erase_blk { |
@@ -129,8 +142,8 @@ struct nvme_nvm_command { | |||
129 | struct nvme_nvm_hb_rw hb_rw; | 142 | struct nvme_nvm_hb_rw hb_rw; |
130 | struct nvme_nvm_ph_rw ph_rw; | 143 | struct nvme_nvm_ph_rw ph_rw; |
131 | struct nvme_nvm_l2ptbl l2p; | 144 | struct nvme_nvm_l2ptbl l2p; |
132 | struct nvme_nvm_bbtbl get_bb; | 145 | struct nvme_nvm_getbbtbl get_bb; |
133 | struct nvme_nvm_bbtbl set_bb; | 146 | struct nvme_nvm_setbbtbl set_bb; |
134 | struct nvme_nvm_erase_blk erase; | 147 | struct nvme_nvm_erase_blk erase; |
135 | }; | 148 | }; |
136 | }; | 149 | }; |
@@ -187,6 +200,20 @@ struct nvme_nvm_id { | |||
187 | struct nvme_nvm_id_group groups[4]; | 200 | struct nvme_nvm_id_group groups[4]; |
188 | } __packed; | 201 | } __packed; |
189 | 202 | ||
203 | struct nvme_nvm_bb_tbl { | ||
204 | __u8 tblid[4]; | ||
205 | __le16 verid; | ||
206 | __le16 revid; | ||
207 | __le32 rvsd1; | ||
208 | __le32 tblks; | ||
209 | __le32 tfact; | ||
210 | __le32 tgrown; | ||
211 | __le32 tdresv; | ||
212 | __le32 thresv; | ||
213 | __le32 rsvd2[8]; | ||
214 | __u8 blk[0]; | ||
215 | }; | ||
216 | |||
190 | /* | 217 | /* |
191 | * Check we didn't inadvertently grow the command struct | 218 | * Check we didn't inadvertently grow the command struct |
192 | */ | 219 | */ |
@@ -195,12 +222,14 @@ static inline void _nvme_nvm_check_size(void) | |||
195 | BUILD_BUG_ON(sizeof(struct nvme_nvm_identity) != 64); | 222 | BUILD_BUG_ON(sizeof(struct nvme_nvm_identity) != 64); |
196 | BUILD_BUG_ON(sizeof(struct nvme_nvm_hb_rw) != 64); | 223 | BUILD_BUG_ON(sizeof(struct nvme_nvm_hb_rw) != 64); |
197 | BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw) != 64); | 224 | BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw) != 64); |
198 | BUILD_BUG_ON(sizeof(struct nvme_nvm_bbtbl) != 64); | 225 | BUILD_BUG_ON(sizeof(struct nvme_nvm_getbbtbl) != 64); |
226 | BUILD_BUG_ON(sizeof(struct nvme_nvm_setbbtbl) != 64); | ||
199 | BUILD_BUG_ON(sizeof(struct nvme_nvm_l2ptbl) != 64); | 227 | BUILD_BUG_ON(sizeof(struct nvme_nvm_l2ptbl) != 64); |
200 | BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64); | 228 | BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64); |
201 | BUILD_BUG_ON(sizeof(struct nvme_nvm_id_group) != 960); | 229 | BUILD_BUG_ON(sizeof(struct nvme_nvm_id_group) != 960); |
202 | BUILD_BUG_ON(sizeof(struct nvme_nvm_addr_format) != 128); | 230 | BUILD_BUG_ON(sizeof(struct nvme_nvm_addr_format) != 128); |
203 | BUILD_BUG_ON(sizeof(struct nvme_nvm_id) != 4096); | 231 | BUILD_BUG_ON(sizeof(struct nvme_nvm_id) != 4096); |
232 | BUILD_BUG_ON(sizeof(struct nvme_nvm_bb_tbl) != 512); | ||
204 | } | 233 | } |
205 | 234 | ||
206 | static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id) | 235 | static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id) |
@@ -322,43 +351,80 @@ out: | |||
322 | return ret; | 351 | return ret; |
323 | } | 352 | } |
324 | 353 | ||
325 | static int nvme_nvm_get_bb_tbl(struct request_queue *q, int lunid, | 354 | static int nvme_nvm_get_bb_tbl(struct request_queue *q, struct ppa_addr ppa, |
326 | unsigned int nr_blocks, | 355 | int nr_blocks, nvm_bb_update_fn *update_bbtbl, |
327 | nvm_bb_update_fn *update_bbtbl, void *priv) | 356 | void *priv) |
328 | { | 357 | { |
329 | struct nvme_ns *ns = q->queuedata; | 358 | struct nvme_ns *ns = q->queuedata; |
330 | struct nvme_dev *dev = ns->dev; | 359 | struct nvme_dev *dev = ns->dev; |
331 | struct nvme_nvm_command c = {}; | 360 | struct nvme_nvm_command c = {}; |
332 | void *bb_bitmap; | 361 | struct nvme_nvm_bb_tbl *bb_tbl; |
333 | u16 bb_bitmap_size; | 362 | int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blocks; |
334 | int ret = 0; | 363 | int ret = 0; |
335 | 364 | ||
336 | c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl; | 365 | c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl; |
337 | c.get_bb.nsid = cpu_to_le32(ns->ns_id); | 366 | c.get_bb.nsid = cpu_to_le32(ns->ns_id); |
338 | c.get_bb.lbb = cpu_to_le32(lunid); | 367 | c.get_bb.spba = cpu_to_le64(ppa.ppa); |
339 | bb_bitmap_size = ((nr_blocks >> 15) + 1) * PAGE_SIZE; | ||
340 | bb_bitmap = kmalloc(bb_bitmap_size, GFP_KERNEL); | ||
341 | if (!bb_bitmap) | ||
342 | return -ENOMEM; | ||
343 | 368 | ||
344 | bitmap_zero(bb_bitmap, nr_blocks); | 369 | bb_tbl = kzalloc(tblsz, GFP_KERNEL); |
370 | if (!bb_tbl) | ||
371 | return -ENOMEM; | ||
345 | 372 | ||
346 | ret = nvme_submit_sync_cmd(q, (struct nvme_command *)&c, bb_bitmap, | 373 | ret = nvme_submit_sync_cmd(q, (struct nvme_command *)&c, bb_tbl, tblsz); |
347 | bb_bitmap_size); | ||
348 | if (ret) { | 374 | if (ret) { |
349 | dev_err(dev->dev, "get bad block table failed (%d)\n", ret); | 375 | dev_err(dev->dev, "get bad block table failed (%d)\n", ret); |
350 | ret = -EIO; | 376 | ret = -EIO; |
351 | goto out; | 377 | goto out; |
352 | } | 378 | } |
353 | 379 | ||
354 | ret = update_bbtbl(lunid, bb_bitmap, nr_blocks, priv); | 380 | if (bb_tbl->tblid[0] != 'B' || bb_tbl->tblid[1] != 'B' || |
381 | bb_tbl->tblid[2] != 'L' || bb_tbl->tblid[3] != 'T') { | ||
382 | dev_err(dev->dev, "bbt format mismatch\n"); | ||
383 | ret = -EINVAL; | ||
384 | goto out; | ||
385 | } | ||
386 | |||
387 | if (le16_to_cpu(bb_tbl->verid) != 1) { | ||
388 | ret = -EINVAL; | ||
389 | dev_err(dev->dev, "bbt version not supported\n"); | ||
390 | goto out; | ||
391 | } | ||
392 | |||
393 | if (le32_to_cpu(bb_tbl->tblks) != nr_blocks) { | ||
394 | ret = -EINVAL; | ||
395 | dev_err(dev->dev, "bbt unsuspected blocks returned (%u!=%u)", | ||
396 | le32_to_cpu(bb_tbl->tblks), nr_blocks); | ||
397 | goto out; | ||
398 | } | ||
399 | |||
400 | ret = update_bbtbl(ppa, nr_blocks, bb_tbl->blk, priv); | ||
355 | if (ret) { | 401 | if (ret) { |
356 | ret = -EINTR; | 402 | ret = -EINTR; |
357 | goto out; | 403 | goto out; |
358 | } | 404 | } |
359 | 405 | ||
360 | out: | 406 | out: |
361 | kfree(bb_bitmap); | 407 | kfree(bb_tbl); |
408 | return ret; | ||
409 | } | ||
410 | |||
411 | static int nvme_nvm_set_bb_tbl(struct request_queue *q, struct nvm_rq *rqd, | ||
412 | int type) | ||
413 | { | ||
414 | struct nvme_ns *ns = q->queuedata; | ||
415 | struct nvme_dev *dev = ns->dev; | ||
416 | struct nvme_nvm_command c = {}; | ||
417 | int ret = 0; | ||
418 | |||
419 | c.set_bb.opcode = nvme_nvm_admin_set_bb_tbl; | ||
420 | c.set_bb.nsid = cpu_to_le32(ns->ns_id); | ||
421 | c.set_bb.spba = cpu_to_le64(rqd->ppa_addr.ppa); | ||
422 | c.set_bb.nlb = cpu_to_le16(rqd->nr_pages - 1); | ||
423 | c.set_bb.value = type; | ||
424 | |||
425 | ret = nvme_submit_sync_cmd(q, (struct nvme_command *)&c, NULL, 0); | ||
426 | if (ret) | ||
427 | dev_err(dev->dev, "set bad block table failed (%d)\n", ret); | ||
362 | return ret; | 428 | return ret; |
363 | } | 429 | } |
364 | 430 | ||
@@ -474,6 +540,7 @@ static struct nvm_dev_ops nvme_nvm_dev_ops = { | |||
474 | .get_l2p_tbl = nvme_nvm_get_l2p_tbl, | 540 | .get_l2p_tbl = nvme_nvm_get_l2p_tbl, |
475 | 541 | ||
476 | .get_bb_tbl = nvme_nvm_get_bb_tbl, | 542 | .get_bb_tbl = nvme_nvm_get_bb_tbl, |
543 | .set_bb_tbl = nvme_nvm_set_bb_tbl, | ||
477 | 544 | ||
478 | .submit_io = nvme_nvm_submit_io, | 545 | .submit_io = nvme_nvm_submit_io, |
479 | .erase_block = nvme_nvm_erase_block, | 546 | .erase_block = nvme_nvm_erase_block, |