aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJavier González <javier@javigon.com>2018-10-09 07:12:02 -0400
committerJens Axboe <axboe@kernel.dk>2018-10-09 10:25:07 -0400
commit45dcf29b98377bbdc40aa4a23a79ade60295dbaf (patch)
tree2c21e73e66a009ba9ebfb22e425064e952b91571
parent090ee26fd51270cc3bd54a0efbc716ede320ad27 (diff)
lightnvm: pblk: encapsulate rqd dma allocations
dma allocations for ppa_list and meta_list in rqd are replicated in several places across the pblk codebase. Make helpers to encapsulate creation and deletion to simplify the code. Signed-off-by: Javier González <javier@cnexlabs.com> Signed-off-by: Matias Bjørling <mb@lightnvm.io> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--drivers/lightnvm/pblk-core.c51
-rw-r--r--drivers/lightnvm/pblk-read.c31
-rw-r--r--drivers/lightnvm/pblk-recovery.c30
-rw-r--r--drivers/lightnvm/pblk-write.c15
-rw-r--r--drivers/lightnvm/pblk.h2
5 files changed, 59 insertions, 70 deletions
diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c
index 875f3cf615ac..8ae40855d4c9 100644
--- a/drivers/lightnvm/pblk-core.c
+++ b/drivers/lightnvm/pblk-core.c
@@ -237,6 +237,33 @@ static void pblk_invalidate_range(struct pblk *pblk, sector_t slba,
237 spin_unlock(&pblk->trans_lock); 237 spin_unlock(&pblk->trans_lock);
238} 238}
239 239
240int pblk_alloc_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd)
241{
242 struct nvm_tgt_dev *dev = pblk->dev;
243
244 rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
245 &rqd->dma_meta_list);
246 if (!rqd->meta_list)
247 return -ENOMEM;
248
249 if (rqd->nr_ppas == 1)
250 return 0;
251
252 rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size;
253 rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size;
254
255 return 0;
256}
257
258void pblk_free_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd)
259{
260 struct nvm_tgt_dev *dev = pblk->dev;
261
262 if (rqd->meta_list)
263 nvm_dev_dma_free(dev->parent, rqd->meta_list,
264 rqd->dma_meta_list);
265}
266
240/* Caller must guarantee that the request is a valid type */ 267/* Caller must guarantee that the request is a valid type */
241struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int type) 268struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int type)
242{ 269{
@@ -268,7 +295,6 @@ struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int type)
268/* Typically used on completion path. Cannot guarantee request consistency */ 295/* Typically used on completion path. Cannot guarantee request consistency */
269void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type) 296void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type)
270{ 297{
271 struct nvm_tgt_dev *dev = pblk->dev;
272 mempool_t *pool; 298 mempool_t *pool;
273 299
274 switch (type) { 300 switch (type) {
@@ -289,9 +315,7 @@ void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type)
289 return; 315 return;
290 } 316 }
291 317
292 if (rqd->meta_list) 318 pblk_free_rqd_meta(pblk, rqd);
293 nvm_dev_dma_free(dev->parent, rqd->meta_list,
294 rqd->dma_meta_list);
295 mempool_free(rqd, pool); 319 mempool_free(rqd, pool);
296} 320}
297 321
@@ -838,18 +862,14 @@ static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
838 862
839 memset(&rqd, 0, sizeof(struct nvm_rq)); 863 memset(&rqd, 0, sizeof(struct nvm_rq));
840 864
841 rqd.meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL, 865 ret = pblk_alloc_rqd_meta(pblk, &rqd);
842 &rqd.dma_meta_list); 866 if (ret)
843 if (!rqd.meta_list) 867 return ret;
844 return -ENOMEM;
845
846 rqd.ppa_list = rqd.meta_list + pblk_dma_meta_size;
847 rqd.dma_ppa_list = rqd.dma_meta_list + pblk_dma_meta_size;
848 868
849 bio = bio_map_kern(dev->q, line->smeta, lm->smeta_len, GFP_KERNEL); 869 bio = bio_map_kern(dev->q, line->smeta, lm->smeta_len, GFP_KERNEL);
850 if (IS_ERR(bio)) { 870 if (IS_ERR(bio)) {
851 ret = PTR_ERR(bio); 871 ret = PTR_ERR(bio);
852 goto free_ppa_list; 872 goto clear_rqd;
853 } 873 }
854 874
855 bio->bi_iter.bi_sector = 0; /* internal bio */ 875 bio->bi_iter.bi_sector = 0; /* internal bio */
@@ -881,7 +901,7 @@ static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
881 if (ret) { 901 if (ret) {
882 pblk_err(pblk, "smeta I/O submission failed: %d\n", ret); 902 pblk_err(pblk, "smeta I/O submission failed: %d\n", ret);
883 bio_put(bio); 903 bio_put(bio);
884 goto free_ppa_list; 904 goto clear_rqd;
885 } 905 }
886 906
887 atomic_dec(&pblk->inflight_io); 907 atomic_dec(&pblk->inflight_io);
@@ -894,9 +914,8 @@ static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
894 pblk_log_read_err(pblk, &rqd); 914 pblk_log_read_err(pblk, &rqd);
895 } 915 }
896 916
897free_ppa_list: 917clear_rqd:
898 nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list); 918 pblk_free_rqd_meta(pblk, &rqd);
899
900 return ret; 919 return ret;
901} 920}
902 921
diff --git a/drivers/lightnvm/pblk-read.c b/drivers/lightnvm/pblk-read.c
index 829e92857289..f5fe01d3a07f 100644
--- a/drivers/lightnvm/pblk-read.c
+++ b/drivers/lightnvm/pblk-read.c
@@ -453,21 +453,13 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
453 */ 453 */
454 bio_init_idx = pblk_get_bi_idx(bio); 454 bio_init_idx = pblk_get_bi_idx(bio);
455 455
456 rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL, 456 if (pblk_alloc_rqd_meta(pblk, rqd))
457 &rqd->dma_meta_list);
458 if (!rqd->meta_list) {
459 pblk_err(pblk, "not able to allocate ppa list\n");
460 goto fail_rqd_free; 457 goto fail_rqd_free;
461 }
462
463 if (nr_secs > 1) {
464 rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size;
465 rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size;
466 458
459 if (nr_secs > 1)
467 pblk_read_ppalist_rq(pblk, rqd, bio, blba, read_bitmap); 460 pblk_read_ppalist_rq(pblk, rqd, bio, blba, read_bitmap);
468 } else { 461 else
469 pblk_read_rq(pblk, rqd, bio, blba, read_bitmap); 462 pblk_read_rq(pblk, rqd, bio, blba, read_bitmap);
470 }
471 463
472 if (bitmap_full(read_bitmap, nr_secs)) { 464 if (bitmap_full(read_bitmap, nr_secs)) {
473 atomic_inc(&pblk->inflight_io); 465 atomic_inc(&pblk->inflight_io);
@@ -594,15 +586,11 @@ int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
594 586
595 memset(&rqd, 0, sizeof(struct nvm_rq)); 587 memset(&rqd, 0, sizeof(struct nvm_rq));
596 588
597 rqd.meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL, 589 ret = pblk_alloc_rqd_meta(pblk, &rqd);
598 &rqd.dma_meta_list); 590 if (ret)
599 if (!rqd.meta_list) 591 return ret;
600 return -ENOMEM;
601 592
602 if (gc_rq->nr_secs > 1) { 593 if (gc_rq->nr_secs > 1) {
603 rqd.ppa_list = rqd.meta_list + pblk_dma_meta_size;
604 rqd.dma_ppa_list = rqd.dma_meta_list + pblk_dma_meta_size;
605
606 gc_rq->secs_to_gc = read_ppalist_rq_gc(pblk, &rqd, gc_rq->line, 594 gc_rq->secs_to_gc = read_ppalist_rq_gc(pblk, &rqd, gc_rq->line,
607 gc_rq->lba_list, 595 gc_rq->lba_list,
608 gc_rq->paddr_list, 596 gc_rq->paddr_list,
@@ -623,7 +611,8 @@ int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
623 PBLK_VMALLOC_META, GFP_KERNEL); 611 PBLK_VMALLOC_META, GFP_KERNEL);
624 if (IS_ERR(bio)) { 612 if (IS_ERR(bio)) {
625 pblk_err(pblk, "could not allocate GC bio (%lu)\n", 613 pblk_err(pblk, "could not allocate GC bio (%lu)\n",
626 PTR_ERR(bio)); 614 PTR_ERR(bio));
615 ret = PTR_ERR(bio);
627 goto err_free_dma; 616 goto err_free_dma;
628 } 617 }
629 618
@@ -658,12 +647,12 @@ int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
658#endif 647#endif
659 648
660out: 649out:
661 nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list); 650 pblk_free_rqd_meta(pblk, &rqd);
662 return ret; 651 return ret;
663 652
664err_free_bio: 653err_free_bio:
665 bio_put(bio); 654 bio_put(bio);
666err_free_dma: 655err_free_dma:
667 nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list); 656 pblk_free_rqd_meta(pblk, &rqd);
668 return ret; 657 return ret;
669} 658}
diff --git a/drivers/lightnvm/pblk-recovery.c b/drivers/lightnvm/pblk-recovery.c
index 2526722304bb..218292979953 100644
--- a/drivers/lightnvm/pblk-recovery.c
+++ b/drivers/lightnvm/pblk-recovery.c
@@ -241,13 +241,11 @@ static int pblk_recov_pad_oob(struct pblk *pblk, struct pblk_line *line,
241{ 241{
242 struct nvm_tgt_dev *dev = pblk->dev; 242 struct nvm_tgt_dev *dev = pblk->dev;
243 struct nvm_geo *geo = &dev->geo; 243 struct nvm_geo *geo = &dev->geo;
244 struct ppa_addr *ppa_list;
245 struct pblk_sec_meta *meta_list; 244 struct pblk_sec_meta *meta_list;
246 struct pblk_pad_rq *pad_rq; 245 struct pblk_pad_rq *pad_rq;
247 struct nvm_rq *rqd; 246 struct nvm_rq *rqd;
248 struct bio *bio; 247 struct bio *bio;
249 void *data; 248 void *data;
250 dma_addr_t dma_ppa_list, dma_meta_list;
251 __le64 *lba_list = emeta_to_lbas(pblk, line->emeta->buf); 249 __le64 *lba_list = emeta_to_lbas(pblk, line->emeta->buf);
252 u64 w_ptr = line->cur_sec; 250 u64 w_ptr = line->cur_sec;
253 int left_line_ppas, rq_ppas, rq_len; 251 int left_line_ppas, rq_ppas, rq_len;
@@ -281,20 +279,11 @@ next_pad_rq:
281 279
282 rq_len = rq_ppas * geo->csecs; 280 rq_len = rq_ppas * geo->csecs;
283 281
284 meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL, &dma_meta_list);
285 if (!meta_list) {
286 ret = -ENOMEM;
287 goto fail_free_pad;
288 }
289
290 ppa_list = (void *)(meta_list) + pblk_dma_meta_size;
291 dma_ppa_list = dma_meta_list + pblk_dma_meta_size;
292
293 bio = pblk_bio_map_addr(pblk, data, rq_ppas, rq_len, 282 bio = pblk_bio_map_addr(pblk, data, rq_ppas, rq_len,
294 PBLK_VMALLOC_META, GFP_KERNEL); 283 PBLK_VMALLOC_META, GFP_KERNEL);
295 if (IS_ERR(bio)) { 284 if (IS_ERR(bio)) {
296 ret = PTR_ERR(bio); 285 ret = PTR_ERR(bio);
297 goto fail_free_meta; 286 goto fail_free_pad;
298 } 287 }
299 288
300 bio->bi_iter.bi_sector = 0; /* internal bio */ 289 bio->bi_iter.bi_sector = 0; /* internal bio */
@@ -302,17 +291,19 @@ next_pad_rq:
302 291
303 rqd = pblk_alloc_rqd(pblk, PBLK_WRITE_INT); 292 rqd = pblk_alloc_rqd(pblk, PBLK_WRITE_INT);
304 293
294 ret = pblk_alloc_rqd_meta(pblk, rqd);
295 if (ret)
296 goto fail_free_rqd;
297
305 rqd->bio = bio; 298 rqd->bio = bio;
306 rqd->opcode = NVM_OP_PWRITE; 299 rqd->opcode = NVM_OP_PWRITE;
307 rqd->is_seq = 1; 300 rqd->is_seq = 1;
308 rqd->meta_list = meta_list;
309 rqd->nr_ppas = rq_ppas; 301 rqd->nr_ppas = rq_ppas;
310 rqd->ppa_list = ppa_list;
311 rqd->dma_ppa_list = dma_ppa_list;
312 rqd->dma_meta_list = dma_meta_list;
313 rqd->end_io = pblk_end_io_recov; 302 rqd->end_io = pblk_end_io_recov;
314 rqd->private = pad_rq; 303 rqd->private = pad_rq;
315 304
305 meta_list = rqd->meta_list;
306
316 for (i = 0; i < rqd->nr_ppas; ) { 307 for (i = 0; i < rqd->nr_ppas; ) {
317 struct ppa_addr ppa; 308 struct ppa_addr ppa;
318 int pos; 309 int pos;
@@ -346,7 +337,7 @@ next_pad_rq:
346 if (ret) { 337 if (ret) {
347 pblk_err(pblk, "I/O submission failed: %d\n", ret); 338 pblk_err(pblk, "I/O submission failed: %d\n", ret);
348 pblk_up_chunk(pblk, rqd->ppa_list[0]); 339 pblk_up_chunk(pblk, rqd->ppa_list[0]);
349 goto fail_free_bio; 340 goto fail_free_rqd;
350 } 341 }
351 342
352 left_line_ppas -= rq_ppas; 343 left_line_ppas -= rq_ppas;
@@ -370,10 +361,9 @@ free_rq:
370 kfree(pad_rq); 361 kfree(pad_rq);
371 return ret; 362 return ret;
372 363
373fail_free_bio: 364fail_free_rqd:
365 pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
374 bio_put(bio); 366 bio_put(bio);
375fail_free_meta:
376 nvm_dev_dma_free(dev->parent, meta_list, dma_meta_list);
377fail_free_pad: 367fail_free_pad:
378 kfree(pad_rq); 368 kfree(pad_rq);
379 vfree(data); 369 vfree(data);
diff --git a/drivers/lightnvm/pblk-write.c b/drivers/lightnvm/pblk-write.c
index 9554febee480..a276f25d3931 100644
--- a/drivers/lightnvm/pblk-write.c
+++ b/drivers/lightnvm/pblk-write.c
@@ -285,11 +285,8 @@ static void pblk_end_io_write_meta(struct nvm_rq *rqd)
285} 285}
286 286
287static int pblk_alloc_w_rq(struct pblk *pblk, struct nvm_rq *rqd, 287static int pblk_alloc_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
288 unsigned int nr_secs, 288 unsigned int nr_secs, nvm_end_io_fn(*end_io))
289 nvm_end_io_fn(*end_io))
290{ 289{
291 struct nvm_tgt_dev *dev = pblk->dev;
292
293 /* Setup write request */ 290 /* Setup write request */
294 rqd->opcode = NVM_OP_PWRITE; 291 rqd->opcode = NVM_OP_PWRITE;
295 rqd->nr_ppas = nr_secs; 292 rqd->nr_ppas = nr_secs;
@@ -297,15 +294,7 @@ static int pblk_alloc_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
297 rqd->private = pblk; 294 rqd->private = pblk;
298 rqd->end_io = end_io; 295 rqd->end_io = end_io;
299 296
300 rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL, 297 return pblk_alloc_rqd_meta(pblk, rqd);
301 &rqd->dma_meta_list);
302 if (!rqd->meta_list)
303 return -ENOMEM;
304
305 rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size;
306 rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size;
307
308 return 0;
309} 298}
310 299
311static int pblk_setup_w_rq(struct pblk *pblk, struct nvm_rq *rqd, 300static int pblk_setup_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
diff --git a/drivers/lightnvm/pblk.h b/drivers/lightnvm/pblk.h
index d123cff82589..b06ab0edab69 100644
--- a/drivers/lightnvm/pblk.h
+++ b/drivers/lightnvm/pblk.h
@@ -778,6 +778,8 @@ ssize_t pblk_rb_sysfs(struct pblk_rb *rb, char *buf);
778 */ 778 */
779struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int type); 779struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int type);
780void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type); 780void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type);
781int pblk_alloc_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd);
782void pblk_free_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd);
781void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write); 783void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write);
782int pblk_setup_w_rec_rq(struct pblk *pblk, struct nvm_rq *rqd, 784int pblk_setup_w_rec_rq(struct pblk *pblk, struct nvm_rq *rqd,
783 struct pblk_c_ctx *c_ctx); 785 struct pblk_c_ctx *c_ctx);