summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/lightnvm/core.c67
-rw-r--r--drivers/lightnvm/gennvm.c68
-rw-r--r--include/linux/lightnvm.h3
3 files changed, 74 insertions, 64 deletions
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 8f41b245cd55..6134339aa6cf 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -192,6 +192,73 @@ int nvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk)
192} 192}
193EXPORT_SYMBOL(nvm_erase_blk); 193EXPORT_SYMBOL(nvm_erase_blk);
194 194
195void nvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
196{
197 int i;
198
199 if (rqd->nr_pages > 1) {
200 for (i = 0; i < rqd->nr_pages; i++)
201 rqd->ppa_list[i] = dev_to_generic_addr(dev,
202 rqd->ppa_list[i]);
203 } else {
204 rqd->ppa_addr = dev_to_generic_addr(dev, rqd->ppa_addr);
205 }
206}
207EXPORT_SYMBOL(nvm_addr_to_generic_mode);
208
209void nvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
210{
211 int i;
212
213 if (rqd->nr_pages > 1) {
214 for (i = 0; i < rqd->nr_pages; i++)
215 rqd->ppa_list[i] = generic_to_dev_addr(dev,
216 rqd->ppa_list[i]);
217 } else {
218 rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr);
219 }
220}
221EXPORT_SYMBOL(nvm_generic_to_addr_mode);
222
223int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr ppa)
224{
225 int plane_cnt = 0, pl_idx, ret;
226 struct nvm_rq rqd;
227
228 if (!dev->ops->erase_block)
229 return 0;
230
231 if (dev->plane_mode == NVM_PLANE_SINGLE) {
232 rqd.nr_pages = 1;
233 rqd.ppa_addr = ppa;
234 } else {
235 plane_cnt = (1 << dev->plane_mode);
236 rqd.nr_pages = plane_cnt;
237
238 rqd.ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL,
239 &rqd.dma_ppa_list);
240 if (!rqd.ppa_list) {
241 pr_err("nvm: failed to allocate dma memory\n");
242 return -ENOMEM;
243 }
244
245 for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
246 ppa.g.pl = pl_idx;
247 rqd.ppa_list[pl_idx] = ppa;
248 }
249 }
250
251 nvm_generic_to_addr_mode(dev, &rqd);
252
253 ret = dev->ops->erase_block(dev, &rqd);
254
255 if (plane_cnt)
256 nvm_dev_dma_free(dev, rqd.ppa_list, rqd.dma_ppa_list);
257
258 return ret;
259}
260EXPORT_SYMBOL(nvm_erase_ppa);
261
195static int nvm_core_init(struct nvm_dev *dev) 262static int nvm_core_init(struct nvm_dev *dev)
196{ 263{
197 struct nvm_id *id = &dev->identity; 264 struct nvm_id *id = &dev->identity;
diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
index 2a96ff6923f0..373be72816bd 100644
--- a/drivers/lightnvm/gennvm.c
+++ b/drivers/lightnvm/gennvm.c
@@ -317,39 +317,13 @@ static void gennvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
317 spin_unlock(&vlun->lock); 317 spin_unlock(&vlun->lock);
318} 318}
319 319
320static void gennvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
321{
322 int i;
323
324 if (rqd->nr_pages > 1) {
325 for (i = 0; i < rqd->nr_pages; i++)
326 rqd->ppa_list[i] = dev_to_generic_addr(dev,
327 rqd->ppa_list[i]);
328 } else {
329 rqd->ppa_addr = dev_to_generic_addr(dev, rqd->ppa_addr);
330 }
331}
332
333static void gennvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
334{
335 int i;
336
337 if (rqd->nr_pages > 1) {
338 for (i = 0; i < rqd->nr_pages; i++)
339 rqd->ppa_list[i] = generic_to_dev_addr(dev,
340 rqd->ppa_list[i]);
341 } else {
342 rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr);
343 }
344}
345
346static int gennvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd) 320static int gennvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
347{ 321{
348 if (!dev->ops->submit_io) 322 if (!dev->ops->submit_io)
349 return -ENODEV; 323 return -ENODEV;
350 324
351 /* Convert address space */ 325 /* Convert address space */
352 gennvm_generic_to_addr_mode(dev, rqd); 326 nvm_generic_to_addr_mode(dev, rqd);
353 327
354 rqd->dev = dev; 328 rqd->dev = dev;
355 return dev->ops->submit_io(dev, rqd); 329 return dev->ops->submit_io(dev, rqd);
@@ -391,7 +365,7 @@ static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd)
391 if (dev->ops->set_bb_tbl(dev, rqd, 1)) 365 if (dev->ops->set_bb_tbl(dev, rqd, 1))
392 return; 366 return;
393 367
394 gennvm_addr_to_generic_mode(dev, rqd); 368 nvm_addr_to_generic_mode(dev, rqd);
395 369
396 /* look up blocks and mark them as bad */ 370 /* look up blocks and mark them as bad */
397 if (rqd->nr_pages > 1) 371 if (rqd->nr_pages > 1)
@@ -425,43 +399,9 @@ static int gennvm_end_io(struct nvm_rq *rqd, int error)
425static int gennvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk, 399static int gennvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk,
426 unsigned long flags) 400 unsigned long flags)
427{ 401{
428 int plane_cnt = 0, pl_idx, ret; 402 struct ppa_addr addr = block_to_ppa(dev, blk);
429 struct ppa_addr addr;
430 struct nvm_rq rqd;
431
432 if (!dev->ops->erase_block)
433 return 0;
434
435 addr = block_to_ppa(dev, blk);
436
437 if (dev->plane_mode == NVM_PLANE_SINGLE) {
438 rqd.nr_pages = 1;
439 rqd.ppa_addr = addr;
440 } else {
441 plane_cnt = (1 << dev->plane_mode);
442 rqd.nr_pages = plane_cnt;
443
444 rqd.ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL,
445 &rqd.dma_ppa_list);
446 if (!rqd.ppa_list) {
447 pr_err("gennvm: failed to allocate dma memory\n");
448 return -ENOMEM;
449 }
450 403
451 for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) { 404 return nvm_erase_ppa(dev, addr);
452 addr.g.pl = pl_idx;
453 rqd.ppa_list[pl_idx] = addr;
454 }
455 }
456
457 gennvm_generic_to_addr_mode(dev, &rqd);
458
459 ret = dev->ops->erase_block(dev, &rqd);
460
461 if (plane_cnt)
462 nvm_dev_dma_free(dev, rqd.ppa_list, rqd.dma_ppa_list);
463
464 return ret;
465} 405}
466 406
467static struct nvm_lun *gennvm_get_lun(struct nvm_dev *dev, int lunid) 407static struct nvm_lun *gennvm_get_lun(struct nvm_dev *dev, int lunid)
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index 034117b3be5f..c228dbc803bf 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -427,6 +427,9 @@ extern int nvm_register(struct request_queue *, char *,
427extern void nvm_unregister(char *); 427extern void nvm_unregister(char *);
428 428
429extern int nvm_submit_io(struct nvm_dev *, struct nvm_rq *); 429extern int nvm_submit_io(struct nvm_dev *, struct nvm_rq *);
430extern void nvm_generic_to_addr_mode(struct nvm_dev *, struct nvm_rq *);
431extern void nvm_addr_to_generic_mode(struct nvm_dev *, struct nvm_rq *);
432extern int nvm_erase_ppa(struct nvm_dev *, struct ppa_addr);
430extern int nvm_erase_blk(struct nvm_dev *, struct nvm_block *); 433extern int nvm_erase_blk(struct nvm_dev *, struct nvm_block *);
431#else /* CONFIG_NVM */ 434#else /* CONFIG_NVM */
432struct nvm_dev_ops; 435struct nvm_dev_ops;